Merge remote-tracking branch 'ry/v0.10'
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "natives.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
44 #include "once.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
47 #include "snapshot.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "v8utils.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
55 #endif
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
59 #endif
60
61 namespace v8 {
62 namespace internal {
63
64
65 Heap::Heap()
66     : isolate_(NULL),
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71       code_range_size_(512*MB),
72 #else
73 #define LUMP_OF_MEMORY MB
74       code_range_size_(0),
75 #endif
76 #if defined(ANDROID)
77       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       initial_semispace_size_(Page::kPageSize),
80       max_old_generation_size_(192*MB),
81       max_executable_size_(max_old_generation_size_),
82 #else
83       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       initial_semispace_size_(Page::kPageSize),
86       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87       max_executable_size_(256l * LUMP_OF_MEMORY),
88 #endif
89
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94       survived_since_last_expansion_(0),
95       sweep_generation_(0),
96       always_allocate_scope_depth_(0),
97       linear_allocation_scope_depth_(0),
98       contexts_disposed_(0),
99       global_ic_age_(0),
100       flush_monomorphic_ics_(false),
101       scan_on_scavenge_pages_(0),
102       new_space_(this),
103       old_pointer_space_(NULL),
104       old_data_space_(NULL),
105       code_space_(NULL),
106       map_space_(NULL),
107       cell_space_(NULL),
108       lo_space_(NULL),
109       gc_state_(NOT_IN_GC),
110       gc_post_processing_depth_(0),
111       ms_count_(0),
112       gc_count_(0),
113       remembered_unmapped_pages_index_(0),
114       unflattened_strings_length_(0),
115 #ifdef DEBUG
116       allocation_allowed_(true),
117       allocation_timeout_(0),
118       disallow_allocation_failure_(false),
119 #endif  // DEBUG
120       new_space_high_promotion_mode_active_(false),
121       old_gen_promotion_limit_(kMinimumPromotionLimit),
122       old_gen_allocation_limit_(kMinimumAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       ms_count_at_last_idle_notification_(0),
157       gc_count_at_last_idle_gc_(0),
158       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
159       gcs_since_last_deopt_(0),
160 #ifdef VERIFY_HEAP
161       no_weak_embedded_maps_verification_scope_depth_(0),
162 #endif
163       promotion_queue_(this),
164       configured_(false),
165       chunks_queued_for_free_(NULL),
166       relocation_mutex_(NULL) {
167   // Allow build-time customization of the max semispace size. Building
168   // V8 with snapshots and a non-default max semispace size is much
169   // easier if you can define it as part of the build environment.
170 #if defined(V8_MAX_SEMISPACE_SIZE)
171   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
172 #endif
173
174   intptr_t max_virtual = OS::MaxVirtualMemory();
175
176   if (max_virtual > 0) {
177     if (code_range_size_ > 0) {
178       // Reserve no more than 1/8 of the memory for the code range.
179       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
180     }
181   }
182
183   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
184   native_contexts_list_ = NULL;
185   mark_compact_collector_.heap_ = this;
186   external_string_table_.heap_ = this;
187   // Put a dummy entry in the remembered pages so we can find the list the
188   // minidump even if there are no real unmapped pages.
189   RememberUnmappedPage(NULL, false);
190
191   ClearObjectStats(true);
192 }
193
194
195 intptr_t Heap::Capacity() {
196   if (!HasBeenSetUp()) return 0;
197
198   return new_space_.Capacity() +
199       old_pointer_space_->Capacity() +
200       old_data_space_->Capacity() +
201       code_space_->Capacity() +
202       map_space_->Capacity() +
203       cell_space_->Capacity();
204 }
205
206
207 intptr_t Heap::CommittedMemory() {
208   if (!HasBeenSetUp()) return 0;
209
210   return new_space_.CommittedMemory() +
211       old_pointer_space_->CommittedMemory() +
212       old_data_space_->CommittedMemory() +
213       code_space_->CommittedMemory() +
214       map_space_->CommittedMemory() +
215       cell_space_->CommittedMemory() +
216       lo_space_->Size();
217 }
218
219
220 size_t Heap::CommittedPhysicalMemory() {
221   if (!HasBeenSetUp()) return 0;
222
223   return new_space_.CommittedPhysicalMemory() +
224       old_pointer_space_->CommittedPhysicalMemory() +
225       old_data_space_->CommittedPhysicalMemory() +
226       code_space_->CommittedPhysicalMemory() +
227       map_space_->CommittedPhysicalMemory() +
228       cell_space_->CommittedPhysicalMemory() +
229       lo_space_->CommittedPhysicalMemory();
230 }
231
232
233 intptr_t Heap::CommittedMemoryExecutable() {
234   if (!HasBeenSetUp()) return 0;
235
236   return isolate()->memory_allocator()->SizeExecutable();
237 }
238
239
240 intptr_t Heap::Available() {
241   if (!HasBeenSetUp()) return 0;
242
243   return new_space_.Available() +
244       old_pointer_space_->Available() +
245       old_data_space_->Available() +
246       code_space_->Available() +
247       map_space_->Available() +
248       cell_space_->Available();
249 }
250
251
252 bool Heap::HasBeenSetUp() {
253   return old_pointer_space_ != NULL &&
254          old_data_space_ != NULL &&
255          code_space_ != NULL &&
256          map_space_ != NULL &&
257          cell_space_ != NULL &&
258          lo_space_ != NULL;
259 }
260
261
262 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
263   if (IntrusiveMarking::IsMarked(object)) {
264     return IntrusiveMarking::SizeOfMarkedObject(object);
265   }
266   return object->SizeFromMap(object->map());
267 }
268
269
270 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
271                                               const char** reason) {
272   // Is global GC requested?
273   if (space != NEW_SPACE) {
274     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
275     *reason = "GC in old space requested";
276     return MARK_COMPACTOR;
277   }
278
279   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
280     *reason = "GC in old space forced by flags";
281     return MARK_COMPACTOR;
282   }
283
284   // Is enough data promoted to justify a global GC?
285   if (OldGenerationPromotionLimitReached()) {
286     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
287     *reason = "promotion limit reached";
288     return MARK_COMPACTOR;
289   }
290
291   // Have allocation in OLD and LO failed?
292   if (old_gen_exhausted_) {
293     isolate_->counters()->
294         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
295     *reason = "old generations exhausted";
296     return MARK_COMPACTOR;
297   }
298
299   // Is there enough space left in OLD to guarantee that a scavenge can
300   // succeed?
301   //
302   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
303   // for object promotion. It counts only the bytes that the memory
304   // allocator has not yet allocated from the OS and assigned to any space,
305   // and does not count available bytes already in the old space or code
306   // space.  Undercounting is safe---we may get an unrequested full GC when
307   // a scavenge would have succeeded.
308   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
309     isolate_->counters()->
310         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
311     *reason = "scavenge might not succeed";
312     return MARK_COMPACTOR;
313   }
314
315   // Default
316   *reason = NULL;
317   return SCAVENGER;
318 }
319
320
321 // TODO(1238405): Combine the infrastructure for --heap-stats and
322 // --log-gc to avoid the complicated preprocessor and flag testing.
323 void Heap::ReportStatisticsBeforeGC() {
324   // Heap::ReportHeapStatistics will also log NewSpace statistics when
325   // compiled --log-gc is set.  The following logic is used to avoid
326   // double logging.
327 #ifdef DEBUG
328   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
329   if (FLAG_heap_stats) {
330     ReportHeapStatistics("Before GC");
331   } else if (FLAG_log_gc) {
332     new_space_.ReportStatistics();
333   }
334   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
335 #else
336   if (FLAG_log_gc) {
337     new_space_.CollectStatistics();
338     new_space_.ReportStatistics();
339     new_space_.ClearHistograms();
340   }
341 #endif  // DEBUG
342 }
343
344
345 void Heap::PrintShortHeapStatistics() {
346   if (!FLAG_trace_gc_verbose) return;
347   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
348                ", available: %6" V8_PTR_PREFIX "d KB\n",
349            isolate_->memory_allocator()->Size() / KB,
350            isolate_->memory_allocator()->Available() / KB);
351   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
352                ", available: %6" V8_PTR_PREFIX "d KB"
353                ", committed: %6" V8_PTR_PREFIX "d KB\n",
354            new_space_.Size() / KB,
355            new_space_.Available() / KB,
356            new_space_.CommittedMemory() / KB);
357   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB"
359                ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            old_pointer_space_->SizeOfObjects() / KB,
361            old_pointer_space_->Available() / KB,
362            old_pointer_space_->CommittedMemory() / KB);
363   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
364                ", available: %6" V8_PTR_PREFIX "d KB"
365                ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            old_data_space_->SizeOfObjects() / KB,
367            old_data_space_->Available() / KB,
368            old_data_space_->CommittedMemory() / KB);
369   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
370                ", available: %6" V8_PTR_PREFIX "d KB"
371                ", committed: %6" V8_PTR_PREFIX "d KB\n",
372            code_space_->SizeOfObjects() / KB,
373            code_space_->Available() / KB,
374            code_space_->CommittedMemory() / KB);
375   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
376                ", available: %6" V8_PTR_PREFIX "d KB"
377                ", committed: %6" V8_PTR_PREFIX "d KB\n",
378            map_space_->SizeOfObjects() / KB,
379            map_space_->Available() / KB,
380            map_space_->CommittedMemory() / KB);
381   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
382                ", available: %6" V8_PTR_PREFIX "d KB"
383                ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            cell_space_->SizeOfObjects() / KB,
385            cell_space_->Available() / KB,
386            cell_space_->CommittedMemory() / KB);
387   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
388                ", available: %6" V8_PTR_PREFIX "d KB"
389                ", committed: %6" V8_PTR_PREFIX "d KB\n",
390            lo_space_->SizeOfObjects() / KB,
391            lo_space_->Available() / KB,
392            lo_space_->CommittedMemory() / KB);
393   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
394                ", available: %6" V8_PTR_PREFIX "d KB"
395                ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            this->SizeOfObjects() / KB,
397            this->Available() / KB,
398            this->CommittedMemory() / KB);
399   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
400 }
401
402
403 // TODO(1238405): Combine the infrastructure for --heap-stats and
404 // --log-gc to avoid the complicated preprocessor and flag testing.
405 void Heap::ReportStatisticsAfterGC() {
406   // Similar to the before GC, we use some complicated logic to ensure that
407   // NewSpace statistics are logged exactly once when --log-gc is turned on.
408 #if defined(DEBUG)
409   if (FLAG_heap_stats) {
410     new_space_.CollectStatistics();
411     ReportHeapStatistics("After GC");
412   } else if (FLAG_log_gc) {
413     new_space_.ReportStatistics();
414   }
415 #else
416   if (FLAG_log_gc) new_space_.ReportStatistics();
417 #endif  // DEBUG
418 }
419
420
421 void Heap::GarbageCollectionPrologue() {
422   isolate_->transcendental_cache()->Clear();
423   ClearJSFunctionResultCaches();
424   gc_count_++;
425   unflattened_strings_length_ = 0;
426
427   if (FLAG_flush_code && FLAG_flush_code_incrementally) {
428     mark_compact_collector()->EnableCodeFlushing(true);
429   }
430
431 #ifdef VERIFY_HEAP
432   if (FLAG_verify_heap) {
433     Verify();
434   }
435 #endif
436
437 #ifdef DEBUG
438   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
439   allow_allocation(false);
440
441   if (FLAG_gc_verbose) Print();
442
443   ReportStatisticsBeforeGC();
444 #endif  // DEBUG
445
446   store_buffer()->GCPrologue();
447 }
448
449
450 intptr_t Heap::SizeOfObjects() {
451   intptr_t total = 0;
452   AllSpaces spaces(this);
453   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454     total += space->SizeOfObjects();
455   }
456   return total;
457 }
458
459
460 void Heap::RepairFreeListsAfterBoot() {
461   PagedSpaces spaces(this);
462   for (PagedSpace* space = spaces.next();
463        space != NULL;
464        space = spaces.next()) {
465     space->RepairFreeListsAfterBoot();
466   }
467 }
468
469
470 void Heap::GarbageCollectionEpilogue() {
471   store_buffer()->GCEpilogue();
472
473   // In release mode, we only zap the from space under heap verification.
474   if (Heap::ShouldZapGarbage()) {
475     ZapFromSpace();
476   }
477
478 #ifdef VERIFY_HEAP
479   if (FLAG_verify_heap) {
480     Verify();
481   }
482 #endif
483
484 #ifdef DEBUG
485   allow_allocation(true);
486   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
487   if (FLAG_print_handles) PrintHandles();
488   if (FLAG_gc_verbose) Print();
489   if (FLAG_code_stats) ReportCodeStatistics("After GC");
490 #endif
491   if (FLAG_deopt_every_n_garbage_collections > 0) {
492     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
493       Deoptimizer::DeoptimizeAll(isolate());
494       gcs_since_last_deopt_ = 0;
495     }
496   }
497
498   isolate_->counters()->alive_after_last_gc()->Set(
499       static_cast<int>(SizeOfObjects()));
500
501   isolate_->counters()->string_table_capacity()->Set(
502       string_table()->Capacity());
503   isolate_->counters()->number_of_symbols()->Set(
504       string_table()->NumberOfElements());
505
506   if (CommittedMemory() > 0) {
507     isolate_->counters()->external_fragmentation_total()->AddSample(
508         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
509
510     isolate_->counters()->heap_fraction_map_space()->AddSample(
511         static_cast<int>(
512             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
513     isolate_->counters()->heap_fraction_cell_space()->AddSample(
514         static_cast<int>(
515             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
516
517     isolate_->counters()->heap_sample_total_committed()->AddSample(
518         static_cast<int>(CommittedMemory() / KB));
519     isolate_->counters()->heap_sample_total_used()->AddSample(
520         static_cast<int>(SizeOfObjects() / KB));
521     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
522         static_cast<int>(map_space()->CommittedMemory() / KB));
523     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
524         static_cast<int>(cell_space()->CommittedMemory() / KB));
525   }
526
527 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
528   isolate_->counters()->space##_bytes_available()->Set(                        \
529       static_cast<int>(space()->Available()));                                 \
530   isolate_->counters()->space##_bytes_committed()->Set(                        \
531       static_cast<int>(space()->CommittedMemory()));                           \
532   isolate_->counters()->space##_bytes_used()->Set(                             \
533       static_cast<int>(space()->SizeOfObjects()));
534 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
535   if (space()->CommittedMemory() > 0) {                                        \
536     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
537         static_cast<int>(100 -                                                 \
538             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
539   }
540 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
541   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
542   UPDATE_FRAGMENTATION_FOR_SPACE(space)
543
544   UPDATE_COUNTERS_FOR_SPACE(new_space)
545   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
546   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
547   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
548   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
549   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
550   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
551 #undef UPDATE_COUNTERS_FOR_SPACE
552 #undef UPDATE_FRAGMENTATION_FOR_SPACE
553 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
554
555 #if defined(DEBUG)
556   ReportStatisticsAfterGC();
557 #endif  // DEBUG
558 #ifdef ENABLE_DEBUGGER_SUPPORT
559   isolate_->debug()->AfterGarbageCollection();
560 #endif  // ENABLE_DEBUGGER_SUPPORT
561
562   error_object_list_.DeferredFormatStackTrace(isolate());
563 }
564
565
566 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
567   // Since we are ignoring the return value, the exact choice of space does
568   // not matter, so long as we do not specify NEW_SPACE, which would not
569   // cause a full GC.
570   mark_compact_collector_.SetFlags(flags);
571   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
572   mark_compact_collector_.SetFlags(kNoGCFlags);
573 }
574
575
576 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
577   // Since we are ignoring the return value, the exact choice of space does
578   // not matter, so long as we do not specify NEW_SPACE, which would not
579   // cause a full GC.
580   // Major GC would invoke weak handle callbacks on weakly reachable
581   // handles, but won't collect weakly reachable objects until next
582   // major GC.  Therefore if we collect aggressively and weak handle callback
583   // has been invoked, we rerun major GC to release objects which become
584   // garbage.
585   // Note: as weak callbacks can execute arbitrary code, we cannot
586   // hope that eventually there will be no weak callbacks invocations.
587   // Therefore stop recollecting after several attempts.
588   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
589                                      kReduceMemoryFootprintMask);
590   isolate_->compilation_cache()->Clear();
591   const int kMaxNumberOfAttempts = 7;
592   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
593     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
594       break;
595     }
596   }
597   mark_compact_collector()->SetFlags(kNoGCFlags);
598   new_space_.Shrink();
599   UncommitFromSpace();
600   incremental_marking()->UncommitMarkingDeque();
601 }
602
603
604 bool Heap::CollectGarbage(AllocationSpace space,
605                           GarbageCollector collector,
606                           const char* gc_reason,
607                           const char* collector_reason) {
608   // The VM is in the GC state until exiting this function.
609   VMState<GC> state(isolate_);
610
611 #ifdef DEBUG
612   // Reset the allocation timeout to the GC interval, but make sure to
613   // allow at least a few allocations after a collection. The reason
614   // for this is that we have a lot of allocation sequences and we
615   // assume that a garbage collection will allow the subsequent
616   // allocation attempts to go through.
617   allocation_timeout_ = Max(6, FLAG_gc_interval);
618 #endif
619
620   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
621     if (FLAG_trace_incremental_marking) {
622       PrintF("[IncrementalMarking] Scavenge during marking.\n");
623     }
624   }
625
626   if (collector == MARK_COMPACTOR &&
627       !mark_compact_collector()->abort_incremental_marking() &&
628       !incremental_marking()->IsStopped() &&
629       !incremental_marking()->should_hurry() &&
630       FLAG_incremental_marking_steps) {
631     // Make progress in incremental marking.
632     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
633     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
634                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
635     if (!incremental_marking()->IsComplete()) {
636       if (FLAG_trace_incremental_marking) {
637         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
638       }
639       collector = SCAVENGER;
640       collector_reason = "incremental marking delaying mark-sweep";
641     }
642   }
643
644   bool next_gc_likely_to_collect_more = false;
645
646   { GCTracer tracer(this, gc_reason, collector_reason);
647     GarbageCollectionPrologue();
648     // The GC count was incremented in the prologue.  Tell the tracer about
649     // it.
650     tracer.set_gc_count(gc_count_);
651
652     // Tell the tracer which collector we've selected.
653     tracer.set_collector(collector);
654
655     {
656       HistogramTimerScope histogram_timer_scope(
657           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
658                                    : isolate_->counters()->gc_compactor());
659       next_gc_likely_to_collect_more =
660           PerformGarbageCollection(collector, &tracer);
661     }
662
663     GarbageCollectionEpilogue();
664   }
665
666   // Start incremental marking for the next cycle. The heap snapshot
667   // generator needs incremental marking to stay off after it aborted.
668   if (!mark_compact_collector()->abort_incremental_marking() &&
669       incremental_marking()->IsStopped() &&
670       incremental_marking()->WorthActivating() &&
671       NextGCIsLikelyToBeFull()) {
672     incremental_marking()->Start();
673   }
674
675   return next_gc_likely_to_collect_more;
676 }
677
678
679 void Heap::PerformScavenge() {
680   GCTracer tracer(this, NULL, NULL);
681   if (incremental_marking()->IsStopped()) {
682     PerformGarbageCollection(SCAVENGER, &tracer);
683   } else {
684     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
685   }
686 }
687
688
689 void Heap::MoveElements(FixedArray* array,
690                         int dst_index,
691                         int src_index,
692                         int len) {
693   if (len == 0) return;
694
695   ASSERT(array->map() != HEAP->fixed_cow_array_map());
696   Object** dst_objects = array->data_start() + dst_index;
697   OS::MemMove(dst_objects,
698               array->data_start() + src_index,
699               len * kPointerSize);
700   if (!InNewSpace(array)) {
701     for (int i = 0; i < len; i++) {
702       // TODO(hpayer): check store buffer for entries
703       if (InNewSpace(dst_objects[i])) {
704         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
705       }
706     }
707   }
708   incremental_marking()->RecordWrites(array);
709 }
710
711
712 #ifdef VERIFY_HEAP
713 // Helper class for verifying the string table.
714 class StringTableVerifier : public ObjectVisitor {
715  public:
716   void VisitPointers(Object** start, Object** end) {
717     // Visit all HeapObject pointers in [start, end).
718     for (Object** p = start; p < end; p++) {
719       if ((*p)->IsHeapObject()) {
720         // Check that the string is actually internalized.
721         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
722               (*p)->IsInternalizedString());
723       }
724     }
725   }
726 };
727
728
729 static void VerifyStringTable() {
730   StringTableVerifier verifier;
731   HEAP->string_table()->IterateElements(&verifier);
732 }
733 #endif  // VERIFY_HEAP
734
735
736 static bool AbortIncrementalMarkingAndCollectGarbage(
737     Heap* heap,
738     AllocationSpace space,
739     const char* gc_reason = NULL) {
740   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
741   bool result = heap->CollectGarbage(space, gc_reason);
742   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
743   return result;
744 }
745
746
747 void Heap::ReserveSpace(
748     int *sizes,
749     Address *locations_out) {
750   bool gc_performed = true;
751   int counter = 0;
752   static const int kThreshold = 20;
753   while (gc_performed && counter++ < kThreshold) {
754     gc_performed = false;
755     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
756     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
757       if (sizes[space] != 0) {
758         MaybeObject* allocation;
759         if (space == NEW_SPACE) {
760           allocation = new_space()->AllocateRaw(sizes[space]);
761         } else {
762           allocation = paged_space(space)->AllocateRaw(sizes[space]);
763         }
764         FreeListNode* node;
765         if (!allocation->To<FreeListNode>(&node)) {
766           if (space == NEW_SPACE) {
767             Heap::CollectGarbage(NEW_SPACE,
768                                  "failed to reserve space in the new space");
769           } else {
770             AbortIncrementalMarkingAndCollectGarbage(
771                 this,
772                 static_cast<AllocationSpace>(space),
773                 "failed to reserve space in paged space");
774           }
775           gc_performed = true;
776           break;
777         } else {
778           // Mark with a free list node, in case we have a GC before
779           // deserializing.
780           node->set_size(this, sizes[space]);
781           locations_out[space] = node->address();
782         }
783       }
784     }
785   }
786
787   if (gc_performed) {
788     // Failed to reserve the space after several attempts.
789     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
790   }
791 }
792
793
794 void Heap::EnsureFromSpaceIsCommitted() {
795   if (new_space_.CommitFromSpaceIfNeeded()) return;
796
797   // Committing memory to from space failed.
798   // Memory is exhausted and we will die.
799   V8::FatalProcessOutOfMemory("Committing semi space failed.");
800 }
801
802
803 void Heap::ClearJSFunctionResultCaches() {
804   if (isolate_->bootstrapper()->IsActive()) return;
805
806   Object* context = native_contexts_list_;
807   while (!context->IsUndefined()) {
808     // Get the caches for this context. GC can happen when the context
809     // is not fully initialized, so the caches can be undefined.
810     Object* caches_or_undefined =
811         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
812     if (!caches_or_undefined->IsUndefined()) {
813       FixedArray* caches = FixedArray::cast(caches_or_undefined);
814       // Clear the caches:
815       int length = caches->length();
816       for (int i = 0; i < length; i++) {
817         JSFunctionResultCache::cast(caches->get(i))->Clear();
818       }
819     }
820     // Get the next context:
821     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
822   }
823 }
824
825
826 void Heap::ClearNormalizedMapCaches() {
827   if (isolate_->bootstrapper()->IsActive() &&
828       !incremental_marking()->IsMarking()) {
829     return;
830   }
831
832   Object* context = native_contexts_list_;
833   while (!context->IsUndefined()) {
834     // GC can happen when the context is not fully initialized,
835     // so the cache can be undefined.
836     Object* cache =
837         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
838     if (!cache->IsUndefined()) {
839       NormalizedMapCache::cast(cache)->Clear();
840     }
841     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
842   }
843 }
844
845
846 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
847   double survival_rate =
848       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
849       start_new_space_size;
850
851   if (survival_rate > kYoungSurvivalRateHighThreshold) {
852     high_survival_rate_period_length_++;
853   } else {
854     high_survival_rate_period_length_ = 0;
855   }
856
857   if (survival_rate < kYoungSurvivalRateLowThreshold) {
858     low_survival_rate_period_length_++;
859   } else {
860     low_survival_rate_period_length_ = 0;
861   }
862
863   double survival_rate_diff = survival_rate_ - survival_rate;
864
865   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
866     set_survival_rate_trend(DECREASING);
867   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
868     set_survival_rate_trend(INCREASING);
869   } else {
870     set_survival_rate_trend(STABLE);
871   }
872
873   survival_rate_ = survival_rate;
874 }
875
876 bool Heap::PerformGarbageCollection(GarbageCollector collector,
877                                     GCTracer* tracer) {
878   bool next_gc_likely_to_collect_more = false;
879
880   if (collector != SCAVENGER) {
881     PROFILE(isolate_, CodeMovingGCEvent());
882   }
883
884 #ifdef VERIFY_HEAP
885   if (FLAG_verify_heap) {
886     VerifyStringTable();
887   }
888 #endif
889
890   GCType gc_type =
891       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
892
893   {
894     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
895     VMState<EXTERNAL> state(isolate_);
896     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
897   }
898
899   EnsureFromSpaceIsCommitted();
900
901   int start_new_space_size = Heap::new_space()->SizeAsInt();
902
903   if (IsHighSurvivalRate()) {
904     // We speed up the incremental marker if it is running so that it
905     // does not fall behind the rate of promotion, which would cause a
906     // constantly growing old space.
907     incremental_marking()->NotifyOfHighPromotionRate();
908   }
909
910   if (collector == MARK_COMPACTOR) {
911     // Perform mark-sweep with optional compaction.
912     MarkCompact(tracer);
913     sweep_generation_++;
914
915     UpdateSurvivalRateTrend(start_new_space_size);
916
917     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
918
919     old_gen_promotion_limit_ =
920         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
921     old_gen_allocation_limit_ =
922         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
923
924     old_gen_exhausted_ = false;
925   } else {
926     tracer_ = tracer;
927     Scavenge();
928     tracer_ = NULL;
929
930     UpdateSurvivalRateTrend(start_new_space_size);
931   }
932
933   if (!new_space_high_promotion_mode_active_ &&
934       new_space_.Capacity() == new_space_.MaximumCapacity() &&
935       IsStableOrIncreasingSurvivalTrend() &&
936       IsHighSurvivalRate()) {
937     // Stable high survival rates even though young generation is at
938     // maximum capacity indicates that most objects will be promoted.
939     // To decrease scavenger pauses and final mark-sweep pauses, we
940     // have to limit maximal capacity of the young generation.
941     new_space_high_promotion_mode_active_ = true;
942     if (FLAG_trace_gc) {
943       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
944                new_space_.InitialCapacity() / MB);
945     }
946     // Support for global pre-tenuring uses the high promotion mode as a
947     // heuristic indicator of whether to pretenure or not, we trigger
948     // deoptimization here to take advantage of pre-tenuring as soon as
949     // possible.
950     if (FLAG_pretenure_literals) {
951       isolate_->stack_guard()->FullDeopt();
952     }
953   } else if (new_space_high_promotion_mode_active_ &&
954       IsStableOrDecreasingSurvivalTrend() &&
955       IsLowSurvivalRate()) {
956     // Decreasing low survival rates might indicate that the above high
957     // promotion mode is over and we should allow the young generation
958     // to grow again.
959     new_space_high_promotion_mode_active_ = false;
960     if (FLAG_trace_gc) {
961       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
962                new_space_.MaximumCapacity() / MB);
963     }
964     // Trigger deoptimization here to turn off pre-tenuring as soon as
965     // possible.
966     if (FLAG_pretenure_literals) {
967       isolate_->stack_guard()->FullDeopt();
968     }
969   }
970
971   if (new_space_high_promotion_mode_active_ &&
972       new_space_.Capacity() > new_space_.InitialCapacity()) {
973     new_space_.Shrink();
974   }
975
976   isolate_->counters()->objs_since_last_young()->Set(0);
977
978   // Callbacks that fire after this point might trigger nested GCs and
979   // restart incremental marking, the assertion can't be moved down.
980   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
981
982   gc_post_processing_depth_++;
983   { DisableAssertNoAllocation allow_allocation;
984     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
985     next_gc_likely_to_collect_more =
986         isolate_->global_handles()->PostGarbageCollectionProcessing(
987             collector, tracer);
988   }
989   gc_post_processing_depth_--;
990
991   // Update relocatables.
992   Relocatable::PostGarbageCollectionProcessing();
993
994   if (collector == MARK_COMPACTOR) {
995     // Register the amount of external allocated memory.
996     amount_of_external_allocated_memory_at_last_global_gc_ =
997         amount_of_external_allocated_memory_;
998   }
999
1000   {
1001     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1002     VMState<EXTERNAL> state(isolate_);
1003     CallGCEpilogueCallbacks(gc_type);
1004   }
1005
1006 #ifdef VERIFY_HEAP
1007   if (FLAG_verify_heap) {
1008     VerifyStringTable();
1009   }
1010 #endif
1011
1012   return next_gc_likely_to_collect_more;
1013 }
1014
1015
1016 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1017   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1018     global_gc_prologue_callback_();
1019   }
1020   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1021     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1022       gc_prologue_callbacks_[i].callback(gc_type, flags);
1023     }
1024   }
1025 }
1026
1027
1028 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1029   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1030     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1031       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1032     }
1033   }
1034   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1035     global_gc_epilogue_callback_();
1036   }
1037 }
1038
1039
1040 void Heap::MarkCompact(GCTracer* tracer) {
1041   gc_state_ = MARK_COMPACT;
1042   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1043
1044   mark_compact_collector_.Prepare(tracer);
1045
1046   ms_count_++;
1047   tracer->set_full_gc_count(ms_count_);
1048
1049   MarkCompactPrologue();
1050
1051   mark_compact_collector_.CollectGarbage();
1052
1053   LOG(isolate_, ResourceEvent("markcompact", "end"));
1054
1055   gc_state_ = NOT_IN_GC;
1056
1057   isolate_->counters()->objs_since_last_full()->Set(0);
1058
1059   contexts_disposed_ = 0;
1060
1061   flush_monomorphic_ics_ = false;
1062 }
1063
1064
1065 void Heap::MarkCompactPrologue() {
1066   // At any old GC clear the keyed lookup cache to enable collection of unused
1067   // maps.
1068   isolate_->keyed_lookup_cache()->Clear();
1069   isolate_->context_slot_cache()->Clear();
1070   isolate_->descriptor_lookup_cache()->Clear();
1071   RegExpResultsCache::Clear(string_split_cache());
1072   RegExpResultsCache::Clear(regexp_multiple_cache());
1073
1074   isolate_->compilation_cache()->MarkCompactPrologue();
1075
1076   CompletelyClearInstanceofCache();
1077
1078   FlushNumberStringCache();
1079   if (FLAG_cleanup_code_caches_at_gc) {
1080     polymorphic_code_cache()->set_cache(undefined_value());
1081   }
1082
1083   ClearNormalizedMapCaches();
1084 }
1085
1086
1087 Object* Heap::FindCodeObject(Address a) {
1088   return isolate()->inner_pointer_to_code_cache()->
1089       GcSafeFindCodeForInnerPointer(a);
1090 }
1091
1092
1093 // Helper class for copying HeapObjects
1094 class ScavengeVisitor: public ObjectVisitor {
1095  public:
1096   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1097
1098   void VisitPointer(Object** p) { ScavengePointer(p); }
1099
1100   void VisitPointers(Object** start, Object** end) {
1101     // Copy all HeapObject pointers in [start, end)
1102     for (Object** p = start; p < end; p++) ScavengePointer(p);
1103   }
1104
1105  private:
1106   void ScavengePointer(Object** p) {
1107     Object* object = *p;
1108     if (!heap_->InNewSpace(object)) return;
1109     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1110                          reinterpret_cast<HeapObject*>(object));
1111   }
1112
1113   Heap* heap_;
1114 };
1115
1116
1117 #ifdef VERIFY_HEAP
1118 // Visitor class to verify pointers in code or data space do not point into
1119 // new space.
1120 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1121  public:
1122   void VisitPointers(Object** start, Object**end) {
1123     for (Object** current = start; current < end; current++) {
1124       if ((*current)->IsHeapObject()) {
1125         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1126       }
1127     }
1128   }
1129 };
1130
1131
1132 static void VerifyNonPointerSpacePointers() {
1133   // Verify that there are no pointers to new space in spaces where we
1134   // do not expect them.
1135   VerifyNonPointerSpacePointersVisitor v;
1136   HeapObjectIterator code_it(HEAP->code_space());
1137   for (HeapObject* object = code_it.Next();
1138        object != NULL; object = code_it.Next())
1139     object->Iterate(&v);
1140
1141   // The old data space was normally swept conservatively so that the iterator
1142   // doesn't work, so we normally skip the next bit.
1143   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1144     HeapObjectIterator data_it(HEAP->old_data_space());
1145     for (HeapObject* object = data_it.Next();
1146          object != NULL; object = data_it.Next())
1147       object->Iterate(&v);
1148   }
1149 }
1150 #endif  // VERIFY_HEAP
1151
1152
1153 void Heap::CheckNewSpaceExpansionCriteria() {
1154   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1155       survived_since_last_expansion_ > new_space_.Capacity() &&
1156       !new_space_high_promotion_mode_active_) {
1157     // Grow the size of new space if there is room to grow, enough data
1158     // has survived scavenge since the last expansion and we are not in
1159     // high promotion mode.
1160     new_space_.Grow();
1161     survived_since_last_expansion_ = 0;
1162   }
1163 }
1164
1165
1166 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1167   return heap->InNewSpace(*p) &&
1168       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1169 }
1170
1171
1172 void Heap::ScavengeStoreBufferCallback(
1173     Heap* heap,
1174     MemoryChunk* page,
1175     StoreBufferEvent event) {
1176   heap->store_buffer_rebuilder_.Callback(page, event);
1177 }
1178
1179
1180 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1181   if (event == kStoreBufferStartScanningPagesEvent) {
1182     start_of_current_page_ = NULL;
1183     current_page_ = NULL;
1184   } else if (event == kStoreBufferScanningPageEvent) {
1185     if (current_page_ != NULL) {
1186       // If this page already overflowed the store buffer during this iteration.
1187       if (current_page_->scan_on_scavenge()) {
1188         // Then we should wipe out the entries that have been added for it.
1189         store_buffer_->SetTop(start_of_current_page_);
1190       } else if (store_buffer_->Top() - start_of_current_page_ >=
1191                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1192         // Did we find too many pointers in the previous page?  The heuristic is
1193         // that no page can take more then 1/5 the remaining slots in the store
1194         // buffer.
1195         current_page_->set_scan_on_scavenge(true);
1196         store_buffer_->SetTop(start_of_current_page_);
1197       } else {
1198         // In this case the page we scanned took a reasonable number of slots in
1199         // the store buffer.  It has now been rehabilitated and is no longer
1200         // marked scan_on_scavenge.
1201         ASSERT(!current_page_->scan_on_scavenge());
1202       }
1203     }
1204     start_of_current_page_ = store_buffer_->Top();
1205     current_page_ = page;
1206   } else if (event == kStoreBufferFullEvent) {
1207     // The current page overflowed the store buffer again.  Wipe out its entries
1208     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1209     // several times while scanning.
1210     if (current_page_ == NULL) {
1211       // Store Buffer overflowed while scanning promoted objects.  These are not
1212       // in any particular page, though they are likely to be clustered by the
1213       // allocation routines.
1214       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1215     } else {
1216       // Store Buffer overflowed while scanning a particular old space page for
1217       // pointers to new space.
1218       ASSERT(current_page_ == page);
1219       ASSERT(page != NULL);
1220       current_page_->set_scan_on_scavenge(true);
1221       ASSERT(start_of_current_page_ != store_buffer_->Top());
1222       store_buffer_->SetTop(start_of_current_page_);
1223     }
1224   } else {
1225     UNREACHABLE();
1226   }
1227 }
1228
1229
1230 void PromotionQueue::Initialize() {
1231   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1232   // entries (where each is a pair of intptr_t). This allows us to simplify
1233   // the test fpr when to switch pages.
1234   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1235          == 0);
1236   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1237   front_ = rear_ =
1238       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1239   emergency_stack_ = NULL;
1240   guard_ = false;
1241 }
1242
1243
1244 void PromotionQueue::RelocateQueueHead() {
1245   ASSERT(emergency_stack_ == NULL);
1246
1247   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1248   intptr_t* head_start = rear_;
1249   intptr_t* head_end =
1250       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1251
1252   int entries_count =
1253       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1254
1255   emergency_stack_ = new List<Entry>(2 * entries_count);
1256
1257   while (head_start != head_end) {
1258     int size = static_cast<int>(*(head_start++));
1259     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1260     emergency_stack_->Add(Entry(obj, size));
1261   }
1262   rear_ = head_end;
1263 }
1264
1265
1266 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1267  public:
1268   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1269
1270   virtual Object* RetainAs(Object* object) {
1271     if (!heap_->InFromSpace(object)) {
1272       return object;
1273     }
1274
1275     MapWord map_word = HeapObject::cast(object)->map_word();
1276     if (map_word.IsForwardingAddress()) {
1277       return map_word.ToForwardingAddress();
1278     }
1279     return NULL;
1280   }
1281
1282  private:
1283   Heap* heap_;
1284 };
1285
1286
1287 void Heap::Scavenge() {
1288   RelocationLock relocation_lock(this);
1289
1290 #ifdef VERIFY_HEAP
1291   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1292 #endif
1293
1294   gc_state_ = SCAVENGE;
1295
1296   // Implements Cheney's copying algorithm
1297   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1298
1299   // Clear descriptor cache.
1300   isolate_->descriptor_lookup_cache()->Clear();
1301
1302   // Used for updating survived_since_last_expansion_ at function end.
1303   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1304
1305   CheckNewSpaceExpansionCriteria();
1306
1307   SelectScavengingVisitorsTable();
1308
1309   incremental_marking()->PrepareForScavenge();
1310
1311   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1312   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1313
1314   // Flip the semispaces.  After flipping, to space is empty, from space has
1315   // live objects.
1316   new_space_.Flip();
1317   new_space_.ResetAllocationInfo();
1318
1319   // We need to sweep newly copied objects which can be either in the
1320   // to space or promoted to the old generation.  For to-space
1321   // objects, we treat the bottom of the to space as a queue.  Newly
1322   // copied and unswept objects lie between a 'front' mark and the
1323   // allocation pointer.
1324   //
1325   // Promoted objects can go into various old-generation spaces, and
1326   // can be allocated internally in the spaces (from the free list).
1327   // We treat the top of the to space as a queue of addresses of
1328   // promoted objects.  The addresses of newly promoted and unswept
1329   // objects lie between a 'front' mark and a 'rear' mark that is
1330   // updated as a side effect of promoting an object.
1331   //
1332   // There is guaranteed to be enough room at the top of the to space
1333   // for the addresses of promoted objects: every object promoted
1334   // frees up its size in bytes from the top of the new space, and
1335   // objects are at least one pointer in size.
1336   Address new_space_front = new_space_.ToSpaceStart();
1337   promotion_queue_.Initialize();
1338
1339 #ifdef DEBUG
1340   store_buffer()->Clean();
1341 #endif
1342
1343   ScavengeVisitor scavenge_visitor(this);
1344   // Copy roots.
1345   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1346
1347   // Copy objects reachable from the old generation.
1348   {
1349     StoreBufferRebuildScope scope(this,
1350                                   store_buffer(),
1351                                   &ScavengeStoreBufferCallback);
1352     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1353   }
1354
1355   // Copy objects reachable from cells by scavenging cell values directly.
1356   HeapObjectIterator cell_iterator(cell_space_);
1357   for (HeapObject* heap_object = cell_iterator.Next();
1358        heap_object != NULL;
1359        heap_object = cell_iterator.Next()) {
1360     if (heap_object->IsJSGlobalPropertyCell()) {
1361       JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1362       Address value_address = cell->ValueAddress();
1363       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1364     }
1365   }
1366
1367   // Copy objects reachable from the code flushing candidates list.
1368   MarkCompactCollector* collector = mark_compact_collector();
1369   if (collector->is_code_flushing_enabled()) {
1370     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1371   }
1372
1373   // Scavenge object reachable from the native contexts list directly.
1374   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1375
1376   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1377
1378   while (isolate()->global_handles()->IterateObjectGroups(
1379       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1380     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1381   }
1382   isolate()->global_handles()->RemoveObjectGroups();
1383   isolate()->global_handles()->RemoveImplicitRefGroups();
1384
1385   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1386       &IsUnscavengedHeapObject);
1387   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1388       &scavenge_visitor);
1389   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1390
1391   UpdateNewSpaceReferencesInExternalStringTable(
1392       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1393
1394   error_object_list_.UpdateReferencesInNewSpace(this);
1395
1396   promotion_queue_.Destroy();
1397
1398   if (!FLAG_watch_ic_patching) {
1399     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1400   }
1401   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1402
1403   ScavengeWeakObjectRetainer weak_object_retainer(this);
1404   ProcessWeakReferences(&weak_object_retainer);
1405
1406   ASSERT(new_space_front == new_space_.top());
1407
1408   // Set age mark.
1409   new_space_.set_age_mark(new_space_.top());
1410
1411   new_space_.LowerInlineAllocationLimit(
1412       new_space_.inline_allocation_limit_step());
1413
1414   // Update how much has survived scavenge.
1415   IncrementYoungSurvivorsCounter(static_cast<int>(
1416       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1417
1418   LOG(isolate_, ResourceEvent("scavenge", "end"));
1419
1420   gc_state_ = NOT_IN_GC;
1421
1422   scavenges_since_last_idle_round_++;
1423 }
1424
1425
1426 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1427                                                                 Object** p) {
1428   MapWord first_word = HeapObject::cast(*p)->map_word();
1429
1430   if (!first_word.IsForwardingAddress()) {
1431     // Unreachable external string can be finalized.
1432     heap->FinalizeExternalString(String::cast(*p));
1433     return NULL;
1434   }
1435
1436   // String is still reachable.
1437   return String::cast(first_word.ToForwardingAddress());
1438 }
1439
1440
1441 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1442     ExternalStringTableUpdaterCallback updater_func) {
1443 #ifdef VERIFY_HEAP
1444   if (FLAG_verify_heap) {
1445     external_string_table_.Verify();
1446   }
1447 #endif
1448
1449   if (external_string_table_.new_space_strings_.is_empty()) return;
1450
1451   Object** start = &external_string_table_.new_space_strings_[0];
1452   Object** end = start + external_string_table_.new_space_strings_.length();
1453   Object** last = start;
1454
1455   for (Object** p = start; p < end; ++p) {
1456     ASSERT(InFromSpace(*p));
1457     String* target = updater_func(this, p);
1458
1459     if (target == NULL) continue;
1460
1461     ASSERT(target->IsExternalString());
1462
1463     if (InNewSpace(target)) {
1464       // String is still in new space.  Update the table entry.
1465       *last = target;
1466       ++last;
1467     } else {
1468       // String got promoted.  Move it to the old string list.
1469       external_string_table_.AddOldString(target);
1470     }
1471   }
1472
1473   ASSERT(last <= end);
1474   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1475 }
1476
1477
1478 void Heap::UpdateReferencesInExternalStringTable(
1479     ExternalStringTableUpdaterCallback updater_func) {
1480
1481   // Update old space string references.
1482   if (external_string_table_.old_space_strings_.length() > 0) {
1483     Object** start = &external_string_table_.old_space_strings_[0];
1484     Object** end = start + external_string_table_.old_space_strings_.length();
1485     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1486   }
1487
1488   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1489 }
1490
1491
1492 static Object* ProcessFunctionWeakReferences(Heap* heap,
1493                                              Object* function,
1494                                              WeakObjectRetainer* retainer,
1495                                              bool record_slots) {
1496   Object* undefined = heap->undefined_value();
1497   Object* head = undefined;
1498   JSFunction* tail = NULL;
1499   Object* candidate = function;
1500   while (candidate != undefined) {
1501     // Check whether to keep the candidate in the list.
1502     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1503     Object* retain = retainer->RetainAs(candidate);
1504     if (retain != NULL) {
1505       if (head == undefined) {
1506         // First element in the list.
1507         head = retain;
1508       } else {
1509         // Subsequent elements in the list.
1510         ASSERT(tail != NULL);
1511         tail->set_next_function_link(retain);
1512         if (record_slots) {
1513           Object** next_function =
1514               HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1515           heap->mark_compact_collector()->RecordSlot(
1516               next_function, next_function, retain);
1517         }
1518       }
1519       // Retained function is new tail.
1520       candidate_function = reinterpret_cast<JSFunction*>(retain);
1521       tail = candidate_function;
1522
1523       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1524
1525       if (retain == undefined) break;
1526     }
1527
1528     // Move to next element in the list.
1529     candidate = candidate_function->next_function_link();
1530   }
1531
1532   // Terminate the list if there is one or more elements.
1533   if (tail != NULL) {
1534     tail->set_next_function_link(undefined);
1535   }
1536
1537   return head;
1538 }
1539
1540
1541 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1542   Object* undefined = undefined_value();
1543   Object* head = undefined;
1544   Context* tail = NULL;
1545   Object* candidate = native_contexts_list_;
1546
1547   // We don't record weak slots during marking or scavenges.
1548   // Instead we do it once when we complete mark-compact cycle.
1549   // Note that write barrier has no effect if we are already in the middle of
1550   // compacting mark-sweep cycle and we have to record slots manually.
1551   bool record_slots =
1552       gc_state() == MARK_COMPACT &&
1553       mark_compact_collector()->is_compacting();
1554
1555   while (candidate != undefined) {
1556     // Check whether to keep the candidate in the list.
1557     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1558     Object* retain = retainer->RetainAs(candidate);
1559     if (retain != NULL) {
1560       if (head == undefined) {
1561         // First element in the list.
1562         head = retain;
1563       } else {
1564         // Subsequent elements in the list.
1565         ASSERT(tail != NULL);
1566         tail->set_unchecked(this,
1567                             Context::NEXT_CONTEXT_LINK,
1568                             retain,
1569                             UPDATE_WRITE_BARRIER);
1570
1571         if (record_slots) {
1572           Object** next_context =
1573               HeapObject::RawField(
1574                   tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1575           mark_compact_collector()->RecordSlot(
1576               next_context, next_context, retain);
1577         }
1578       }
1579       // Retained context is new tail.
1580       candidate_context = reinterpret_cast<Context*>(retain);
1581       tail = candidate_context;
1582
1583       if (retain == undefined) break;
1584
1585       // Process the weak list of optimized functions for the context.
1586       Object* function_list_head =
1587           ProcessFunctionWeakReferences(
1588               this,
1589               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1590               retainer,
1591               record_slots);
1592       candidate_context->set_unchecked(this,
1593                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1594                                        function_list_head,
1595                                        UPDATE_WRITE_BARRIER);
1596       if (record_slots) {
1597         Object** optimized_functions =
1598             HeapObject::RawField(
1599                 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1600         mark_compact_collector()->RecordSlot(
1601             optimized_functions, optimized_functions, function_list_head);
1602       }
1603     }
1604
1605     // Move to next element in the list.
1606     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1607   }
1608
1609   // Terminate the list if there is one or more elements.
1610   if (tail != NULL) {
1611     tail->set_unchecked(this,
1612                         Context::NEXT_CONTEXT_LINK,
1613                         Heap::undefined_value(),
1614                         UPDATE_WRITE_BARRIER);
1615   }
1616
1617   // Update the head of the list of contexts.
1618   native_contexts_list_ = head;
1619 }
1620
1621
1622 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1623   AssertNoAllocation no_allocation;
1624
1625   // Both the external string table and the string table may contain
1626   // external strings, but neither lists them exhaustively, nor is the
1627   // intersection set empty.  Therefore we iterate over the external string
1628   // table first, ignoring internalized strings, and then over the
1629   // internalized string table.
1630
1631   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1632    public:
1633     explicit ExternalStringTableVisitorAdapter(
1634         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1635     virtual void VisitPointers(Object** start, Object** end) {
1636       for (Object** p = start; p < end; p++) {
1637         // Visit non-internalized external strings,
1638         // since internalized strings are listed in the string table.
1639         if (!(*p)->IsInternalizedString()) {
1640           ASSERT((*p)->IsExternalString());
1641           visitor_->VisitExternalString(Utils::ToLocal(
1642               Handle<String>(String::cast(*p))));
1643         }
1644       }
1645     }
1646    private:
1647     v8::ExternalResourceVisitor* visitor_;
1648   } external_string_table_visitor(visitor);
1649
1650   external_string_table_.Iterate(&external_string_table_visitor);
1651
1652   class StringTableVisitorAdapter : public ObjectVisitor {
1653    public:
1654     explicit StringTableVisitorAdapter(
1655         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1656     virtual void VisitPointers(Object** start, Object** end) {
1657       for (Object** p = start; p < end; p++) {
1658         if ((*p)->IsExternalString()) {
1659           ASSERT((*p)->IsInternalizedString());
1660           visitor_->VisitExternalString(Utils::ToLocal(
1661               Handle<String>(String::cast(*p))));
1662         }
1663       }
1664     }
1665    private:
1666     v8::ExternalResourceVisitor* visitor_;
1667   } string_table_visitor(visitor);
1668
1669   string_table()->IterateElements(&string_table_visitor);
1670 }
1671
1672
1673 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1674  public:
1675   static inline void VisitPointer(Heap* heap, Object** p) {
1676     Object* object = *p;
1677     if (!heap->InNewSpace(object)) return;
1678     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1679                          reinterpret_cast<HeapObject*>(object));
1680   }
1681 };
1682
1683
1684 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1685                          Address new_space_front) {
1686   do {
1687     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1688     // The addresses new_space_front and new_space_.top() define a
1689     // queue of unprocessed copied objects.  Process them until the
1690     // queue is empty.
1691     while (new_space_front != new_space_.top()) {
1692       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1693         HeapObject* object = HeapObject::FromAddress(new_space_front);
1694         new_space_front +=
1695           NewSpaceScavenger::IterateBody(object->map(), object);
1696       } else {
1697         new_space_front =
1698             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1699       }
1700     }
1701
1702     // Promote and process all the to-be-promoted objects.
1703     {
1704       StoreBufferRebuildScope scope(this,
1705                                     store_buffer(),
1706                                     &ScavengeStoreBufferCallback);
1707       while (!promotion_queue()->is_empty()) {
1708         HeapObject* target;
1709         int size;
1710         promotion_queue()->remove(&target, &size);
1711
1712         // Promoted object might be already partially visited
1713         // during old space pointer iteration. Thus we search specificly
1714         // for pointers to from semispace instead of looking for pointers
1715         // to new space.
1716         ASSERT(!target->IsMap());
1717         IterateAndMarkPointersToFromSpace(target->address(),
1718                                           target->address() + size,
1719                                           &ScavengeObject);
1720       }
1721     }
1722
1723     // Take another spin if there are now unswept objects in new space
1724     // (there are currently no more unswept promoted objects).
1725   } while (new_space_front != new_space_.top());
1726
1727   return new_space_front;
1728 }
1729
1730
1731 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1732
1733
1734 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1735                                               HeapObject* object,
1736                                               int size));
1737
1738 static HeapObject* EnsureDoubleAligned(Heap* heap,
1739                                        HeapObject* object,
1740                                        int size) {
1741   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1742     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1743     return HeapObject::FromAddress(object->address() + kPointerSize);
1744   } else {
1745     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1746                                kPointerSize);
1747     return object;
1748   }
1749 }
1750
1751
1752 enum LoggingAndProfiling {
1753   LOGGING_AND_PROFILING_ENABLED,
1754   LOGGING_AND_PROFILING_DISABLED
1755 };
1756
1757
1758 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1759
1760
1761 template<MarksHandling marks_handling,
1762          LoggingAndProfiling logging_and_profiling_mode>
1763 class ScavengingVisitor : public StaticVisitorBase {
1764  public:
1765   static void Initialize() {
1766     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1767     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1768     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1769     table_.Register(kVisitByteArray, &EvacuateByteArray);
1770     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1771     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1772
1773     table_.Register(kVisitNativeContext,
1774                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1775                         template VisitSpecialized<Context::kSize>);
1776
1777     table_.Register(kVisitConsString,
1778                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1779                         template VisitSpecialized<ConsString::kSize>);
1780
1781     table_.Register(kVisitSlicedString,
1782                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1783                         template VisitSpecialized<SlicedString::kSize>);
1784
1785     table_.Register(kVisitSymbol,
1786                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1787                         template VisitSpecialized<Symbol::kSize>);
1788
1789     table_.Register(kVisitSharedFunctionInfo,
1790                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1791                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1792
1793     table_.Register(kVisitJSWeakMap,
1794                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1795                     Visit);
1796
1797     table_.Register(kVisitJSRegExp,
1798                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1799                     Visit);
1800
1801     if (marks_handling == IGNORE_MARKS) {
1802       table_.Register(kVisitJSFunction,
1803                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1804                           template VisitSpecialized<JSFunction::kSize>);
1805     } else {
1806       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1807     }
1808
1809     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1810                                    kVisitDataObject,
1811                                    kVisitDataObjectGeneric>();
1812
1813     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1814                                    kVisitJSObject,
1815                                    kVisitJSObjectGeneric>();
1816
1817     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1818                                    kVisitStruct,
1819                                    kVisitStructGeneric>();
1820   }
1821
1822   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1823     return &table_;
1824   }
1825
1826  private:
1827   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1828   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1829
1830   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1831     bool should_record = false;
1832 #ifdef DEBUG
1833     should_record = FLAG_heap_stats;
1834 #endif
1835     should_record = should_record || FLAG_log_gc;
1836     if (should_record) {
1837       if (heap->new_space()->Contains(obj)) {
1838         heap->new_space()->RecordAllocation(obj);
1839       } else {
1840         heap->new_space()->RecordPromotion(obj);
1841       }
1842     }
1843   }
1844
1845   // Helper function used by CopyObject to copy a source object to an
1846   // allocated target object and update the forwarding pointer in the source
1847   // object.  Returns the target object.
1848   INLINE(static void MigrateObject(Heap* heap,
1849                                    HeapObject* source,
1850                                    HeapObject* target,
1851                                    int size)) {
1852     // Copy the content of source to target.
1853     heap->CopyBlock(target->address(), source->address(), size);
1854
1855     // Set the forwarding address.
1856     source->set_map_word(MapWord::FromForwardingAddress(target));
1857
1858     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1859       // Update NewSpace stats if necessary.
1860       RecordCopiedObject(heap, target);
1861       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1862       Isolate* isolate = heap->isolate();
1863       if (isolate->logger()->is_logging_code_events() ||
1864           isolate->cpu_profiler()->is_profiling()) {
1865         if (target->IsSharedFunctionInfo()) {
1866           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1867               source->address(), target->address()));
1868         }
1869       }
1870     }
1871
1872     if (marks_handling == TRANSFER_MARKS) {
1873       if (Marking::TransferColor(source, target)) {
1874         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1875       }
1876     }
1877   }
1878
1879
1880   template<ObjectContents object_contents,
1881            SizeRestriction size_restriction,
1882            int alignment>
1883   static inline void EvacuateObject(Map* map,
1884                                     HeapObject** slot,
1885                                     HeapObject* object,
1886                                     int object_size) {
1887     SLOW_ASSERT((size_restriction != SMALL) ||
1888                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1889     SLOW_ASSERT(object->Size() == object_size);
1890
1891     int allocation_size = object_size;
1892     if (alignment != kObjectAlignment) {
1893       ASSERT(alignment == kDoubleAlignment);
1894       allocation_size += kPointerSize;
1895     }
1896
1897     Heap* heap = map->GetHeap();
1898     if (heap->ShouldBePromoted(object->address(), object_size)) {
1899       MaybeObject* maybe_result;
1900
1901       if ((size_restriction != SMALL) &&
1902           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1903         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1904                                                      NOT_EXECUTABLE);
1905       } else {
1906         if (object_contents == DATA_OBJECT) {
1907           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1908         } else {
1909           maybe_result =
1910               heap->old_pointer_space()->AllocateRaw(allocation_size);
1911         }
1912       }
1913
1914       Object* result = NULL;  // Initialization to please compiler.
1915       if (maybe_result->ToObject(&result)) {
1916         HeapObject* target = HeapObject::cast(result);
1917
1918         if (alignment != kObjectAlignment) {
1919           target = EnsureDoubleAligned(heap, target, allocation_size);
1920         }
1921
1922         // Order is important: slot might be inside of the target if target
1923         // was allocated over a dead object and slot comes from the store
1924         // buffer.
1925         *slot = target;
1926         MigrateObject(heap, object, target, object_size);
1927
1928         if (object_contents == POINTER_OBJECT) {
1929           if (map->instance_type() == JS_FUNCTION_TYPE) {
1930             heap->promotion_queue()->insert(
1931                 target, JSFunction::kNonWeakFieldsEndOffset);
1932           } else {
1933             heap->promotion_queue()->insert(target, object_size);
1934           }
1935         }
1936
1937         heap->tracer()->increment_promoted_objects_size(object_size);
1938         return;
1939       }
1940     }
1941     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1942     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1943     Object* result = allocation->ToObjectUnchecked();
1944     HeapObject* target = HeapObject::cast(result);
1945
1946     if (alignment != kObjectAlignment) {
1947       target = EnsureDoubleAligned(heap, target, allocation_size);
1948     }
1949
1950     // Order is important: slot might be inside of the target if target
1951     // was allocated over a dead object and slot comes from the store
1952     // buffer.
1953     *slot = target;
1954     MigrateObject(heap, object, target, object_size);
1955     return;
1956   }
1957
1958
1959   static inline void EvacuateJSFunction(Map* map,
1960                                         HeapObject** slot,
1961                                         HeapObject* object) {
1962     ObjectEvacuationStrategy<POINTER_OBJECT>::
1963         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1964
1965     HeapObject* target = *slot;
1966     MarkBit mark_bit = Marking::MarkBitFrom(target);
1967     if (Marking::IsBlack(mark_bit)) {
1968       // This object is black and it might not be rescanned by marker.
1969       // We should explicitly record code entry slot for compaction because
1970       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1971       // miss it as it is not HeapObject-tagged.
1972       Address code_entry_slot =
1973           target->address() + JSFunction::kCodeEntryOffset;
1974       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1975       map->GetHeap()->mark_compact_collector()->
1976           RecordCodeEntrySlot(code_entry_slot, code);
1977     }
1978   }
1979
1980
1981   static inline void EvacuateFixedArray(Map* map,
1982                                         HeapObject** slot,
1983                                         HeapObject* object) {
1984     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1985     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1986                                                  slot,
1987                                                  object,
1988                                                  object_size);
1989   }
1990
1991
1992   static inline void EvacuateFixedDoubleArray(Map* map,
1993                                               HeapObject** slot,
1994                                               HeapObject* object) {
1995     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1996     int object_size = FixedDoubleArray::SizeFor(length);
1997     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1998         map,
1999         slot,
2000         object,
2001         object_size);
2002   }
2003
2004
2005   static inline void EvacuateByteArray(Map* map,
2006                                        HeapObject** slot,
2007                                        HeapObject* object) {
2008     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2009     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2010         map, slot, object, object_size);
2011   }
2012
2013
2014   static inline void EvacuateSeqOneByteString(Map* map,
2015                                             HeapObject** slot,
2016                                             HeapObject* object) {
2017     int object_size = SeqOneByteString::cast(object)->
2018         SeqOneByteStringSize(map->instance_type());
2019     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2020         map, slot, object, object_size);
2021   }
2022
2023
2024   static inline void EvacuateSeqTwoByteString(Map* map,
2025                                               HeapObject** slot,
2026                                               HeapObject* object) {
2027     int object_size = SeqTwoByteString::cast(object)->
2028         SeqTwoByteStringSize(map->instance_type());
2029     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2030         map, slot, object, object_size);
2031   }
2032
2033
2034   static inline bool IsShortcutCandidate(int type) {
2035     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2036   }
2037
2038   static inline void EvacuateShortcutCandidate(Map* map,
2039                                                HeapObject** slot,
2040                                                HeapObject* object) {
2041     ASSERT(IsShortcutCandidate(map->instance_type()));
2042
2043     Heap* heap = map->GetHeap();
2044
2045     if (marks_handling == IGNORE_MARKS &&
2046         ConsString::cast(object)->unchecked_second() ==
2047         heap->empty_string()) {
2048       HeapObject* first =
2049           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2050
2051       *slot = first;
2052
2053       if (!heap->InNewSpace(first)) {
2054         object->set_map_word(MapWord::FromForwardingAddress(first));
2055         return;
2056       }
2057
2058       MapWord first_word = first->map_word();
2059       if (first_word.IsForwardingAddress()) {
2060         HeapObject* target = first_word.ToForwardingAddress();
2061
2062         *slot = target;
2063         object->set_map_word(MapWord::FromForwardingAddress(target));
2064         return;
2065       }
2066
2067       heap->DoScavengeObject(first->map(), slot, first);
2068       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2069       return;
2070     }
2071
2072     int object_size = ConsString::kSize;
2073     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2074         map, slot, object, object_size);
2075   }
2076
2077   template<ObjectContents object_contents>
2078   class ObjectEvacuationStrategy {
2079    public:
2080     template<int object_size>
2081     static inline void VisitSpecialized(Map* map,
2082                                         HeapObject** slot,
2083                                         HeapObject* object) {
2084       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2085           map, slot, object, object_size);
2086     }
2087
2088     static inline void Visit(Map* map,
2089                              HeapObject** slot,
2090                              HeapObject* object) {
2091       int object_size = map->instance_size();
2092       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2093           map, slot, object, object_size);
2094     }
2095   };
2096
2097   static VisitorDispatchTable<ScavengingCallback> table_;
2098 };
2099
2100
2101 template<MarksHandling marks_handling,
2102          LoggingAndProfiling logging_and_profiling_mode>
2103 VisitorDispatchTable<ScavengingCallback>
2104     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2105
2106
2107 static void InitializeScavengingVisitorsTables() {
2108   ScavengingVisitor<TRANSFER_MARKS,
2109                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2110   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2111   ScavengingVisitor<TRANSFER_MARKS,
2112                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2113   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2114 }
2115
2116
2117 void Heap::SelectScavengingVisitorsTable() {
2118   bool logging_and_profiling =
2119       isolate()->logger()->is_logging() ||
2120       isolate()->cpu_profiler()->is_profiling() ||
2121       (isolate()->heap_profiler() != NULL &&
2122        isolate()->heap_profiler()->is_profiling());
2123
2124   if (!incremental_marking()->IsMarking()) {
2125     if (!logging_and_profiling) {
2126       scavenging_visitors_table_.CopyFrom(
2127           ScavengingVisitor<IGNORE_MARKS,
2128                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2129     } else {
2130       scavenging_visitors_table_.CopyFrom(
2131           ScavengingVisitor<IGNORE_MARKS,
2132                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2133     }
2134   } else {
2135     if (!logging_and_profiling) {
2136       scavenging_visitors_table_.CopyFrom(
2137           ScavengingVisitor<TRANSFER_MARKS,
2138                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2139     } else {
2140       scavenging_visitors_table_.CopyFrom(
2141           ScavengingVisitor<TRANSFER_MARKS,
2142                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2143     }
2144
2145     if (incremental_marking()->IsCompacting()) {
2146       // When compacting forbid short-circuiting of cons-strings.
2147       // Scavenging code relies on the fact that new space object
2148       // can't be evacuated into evacuation candidate but
2149       // short-circuiting violates this assumption.
2150       scavenging_visitors_table_.Register(
2151           StaticVisitorBase::kVisitShortcutCandidate,
2152           scavenging_visitors_table_.GetVisitorById(
2153               StaticVisitorBase::kVisitConsString));
2154     }
2155   }
2156 }
2157
2158
2159 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2160   SLOW_ASSERT(HEAP->InFromSpace(object));
2161   MapWord first_word = object->map_word();
2162   SLOW_ASSERT(!first_word.IsForwardingAddress());
2163   Map* map = first_word.ToMap();
2164   map->GetHeap()->DoScavengeObject(map, p, object);
2165 }
2166
2167
2168 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2169                                       int instance_size) {
2170   Object* result;
2171   MaybeObject* maybe_result = AllocateRawMap();
2172   if (!maybe_result->ToObject(&result)) return maybe_result;
2173
2174   // Map::cast cannot be used due to uninitialized map field.
2175   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2176   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2177   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2178   reinterpret_cast<Map*>(result)->set_visitor_id(
2179         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2180   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2181   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2182   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2183   reinterpret_cast<Map*>(result)->set_bit_field(0);
2184   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2185   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2186                    Map::OwnsDescriptors::encode(true);
2187   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2188   return result;
2189 }
2190
2191
2192 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2193                                int instance_size,
2194                                ElementsKind elements_kind) {
2195   Object* result;
2196   MaybeObject* maybe_result = AllocateRawMap();
2197   if (!maybe_result->To(&result)) return maybe_result;
2198
2199   Map* map = reinterpret_cast<Map*>(result);
2200   map->set_map_no_write_barrier(meta_map());
2201   map->set_instance_type(instance_type);
2202   map->set_visitor_id(
2203       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2204   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2205   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2206   map->set_instance_size(instance_size);
2207   map->set_inobject_properties(0);
2208   map->set_pre_allocated_property_fields(0);
2209   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2210   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2211                           SKIP_WRITE_BARRIER);
2212   map->init_back_pointer(undefined_value());
2213   map->set_unused_property_fields(0);
2214   map->set_instance_descriptors(empty_descriptor_array());
2215   map->set_bit_field(0);
2216   map->set_bit_field2(1 << Map::kIsExtensible);
2217   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2218                    Map::OwnsDescriptors::encode(true);
2219   map->set_bit_field3(bit_field3);
2220   map->set_elements_kind(elements_kind);
2221
2222   return map;
2223 }
2224
2225
2226 MaybeObject* Heap::AllocateCodeCache() {
2227   CodeCache* code_cache;
2228   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2229     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2230   }
2231   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2232   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2233   return code_cache;
2234 }
2235
2236
2237 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2238   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2239 }
2240
2241
2242 MaybeObject* Heap::AllocateAccessorPair() {
2243   AccessorPair* accessors;
2244   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2245     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2246   }
2247   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2248   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2249   return accessors;
2250 }
2251
2252
2253 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2254   TypeFeedbackInfo* info;
2255   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2256     if (!maybe_info->To(&info)) return maybe_info;
2257   }
2258   info->initialize_storage();
2259   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2260                                 SKIP_WRITE_BARRIER);
2261   return info;
2262 }
2263
2264
2265 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2266   AliasedArgumentsEntry* entry;
2267   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2268     if (!maybe_entry->To(&entry)) return maybe_entry;
2269   }
2270   entry->set_aliased_context_slot(aliased_context_slot);
2271   return entry;
2272 }
2273
2274
2275 const Heap::StringTypeTable Heap::string_type_table[] = {
2276 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2277   {type, size, k##camel_name##MapRootIndex},
2278   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2279 #undef STRING_TYPE_ELEMENT
2280 };
2281
2282
2283 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2284 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2285   {contents, k##name##RootIndex},
2286   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2287 #undef CONSTANT_STRING_ELEMENT
2288 };
2289
2290
2291 const Heap::StructTable Heap::struct_table[] = {
2292 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2293   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2294   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2295 #undef STRUCT_TABLE_ELEMENT
2296 };
2297
2298
2299 bool Heap::CreateInitialMaps() {
2300   Object* obj;
2301   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2302     if (!maybe_obj->ToObject(&obj)) return false;
2303   }
2304   // Map::cast cannot be used due to uninitialized map field.
2305   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2306   set_meta_map(new_meta_map);
2307   new_meta_map->set_map(new_meta_map);
2308
2309   { MaybeObject* maybe_obj =
2310         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2311     if (!maybe_obj->ToObject(&obj)) return false;
2312   }
2313   set_fixed_array_map(Map::cast(obj));
2314
2315   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2316     if (!maybe_obj->ToObject(&obj)) return false;
2317   }
2318   set_oddball_map(Map::cast(obj));
2319
2320   // Allocate the empty array.
2321   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2322     if (!maybe_obj->ToObject(&obj)) return false;
2323   }
2324   set_empty_fixed_array(FixedArray::cast(obj));
2325
2326   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2327     if (!maybe_obj->ToObject(&obj)) return false;
2328   }
2329   set_null_value(Oddball::cast(obj));
2330   Oddball::cast(obj)->set_kind(Oddball::kNull);
2331
2332   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2333     if (!maybe_obj->ToObject(&obj)) return false;
2334   }
2335   set_undefined_value(Oddball::cast(obj));
2336   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2337   ASSERT(!InNewSpace(undefined_value()));
2338
2339   // Allocate the empty descriptor array.
2340   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2341     if (!maybe_obj->ToObject(&obj)) return false;
2342   }
2343   set_empty_descriptor_array(DescriptorArray::cast(obj));
2344
2345   // Fix the instance_descriptors for the existing maps.
2346   meta_map()->set_code_cache(empty_fixed_array());
2347   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2348   meta_map()->init_back_pointer(undefined_value());
2349   meta_map()->set_instance_descriptors(empty_descriptor_array());
2350
2351   fixed_array_map()->set_code_cache(empty_fixed_array());
2352   fixed_array_map()->set_dependent_code(
2353       DependentCode::cast(empty_fixed_array()));
2354   fixed_array_map()->init_back_pointer(undefined_value());
2355   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2356
2357   oddball_map()->set_code_cache(empty_fixed_array());
2358   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2359   oddball_map()->init_back_pointer(undefined_value());
2360   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2361
2362   // Fix prototype object for existing maps.
2363   meta_map()->set_prototype(null_value());
2364   meta_map()->set_constructor(null_value());
2365
2366   fixed_array_map()->set_prototype(null_value());
2367   fixed_array_map()->set_constructor(null_value());
2368
2369   oddball_map()->set_prototype(null_value());
2370   oddball_map()->set_constructor(null_value());
2371
2372   { MaybeObject* maybe_obj =
2373         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2374     if (!maybe_obj->ToObject(&obj)) return false;
2375   }
2376   set_fixed_cow_array_map(Map::cast(obj));
2377   ASSERT(fixed_array_map() != fixed_cow_array_map());
2378
2379   { MaybeObject* maybe_obj =
2380         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2381     if (!maybe_obj->ToObject(&obj)) return false;
2382   }
2383   set_scope_info_map(Map::cast(obj));
2384
2385   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2386     if (!maybe_obj->ToObject(&obj)) return false;
2387   }
2388   set_heap_number_map(Map::cast(obj));
2389
2390   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2391     if (!maybe_obj->ToObject(&obj)) return false;
2392   }
2393   set_symbol_map(Map::cast(obj));
2394
2395   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2396     if (!maybe_obj->ToObject(&obj)) return false;
2397   }
2398   set_foreign_map(Map::cast(obj));
2399
2400   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2401     const StringTypeTable& entry = string_type_table[i];
2402     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2403       if (!maybe_obj->ToObject(&obj)) return false;
2404     }
2405     roots_[entry.index] = Map::cast(obj);
2406   }
2407
2408   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2409     if (!maybe_obj->ToObject(&obj)) return false;
2410   }
2411   set_undetectable_string_map(Map::cast(obj));
2412   Map::cast(obj)->set_is_undetectable();
2413
2414   { MaybeObject* maybe_obj =
2415         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2416     if (!maybe_obj->ToObject(&obj)) return false;
2417   }
2418   set_undetectable_ascii_string_map(Map::cast(obj));
2419   Map::cast(obj)->set_is_undetectable();
2420
2421   { MaybeObject* maybe_obj =
2422         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2423     if (!maybe_obj->ToObject(&obj)) return false;
2424   }
2425   set_fixed_double_array_map(Map::cast(obj));
2426
2427   { MaybeObject* maybe_obj =
2428         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2429     if (!maybe_obj->ToObject(&obj)) return false;
2430   }
2431   set_byte_array_map(Map::cast(obj));
2432
2433   { MaybeObject* maybe_obj =
2434         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2435     if (!maybe_obj->ToObject(&obj)) return false;
2436   }
2437   set_free_space_map(Map::cast(obj));
2438
2439   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2440     if (!maybe_obj->ToObject(&obj)) return false;
2441   }
2442   set_empty_byte_array(ByteArray::cast(obj));
2443
2444   { MaybeObject* maybe_obj =
2445         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2446     if (!maybe_obj->ToObject(&obj)) return false;
2447   }
2448   set_external_pixel_array_map(Map::cast(obj));
2449
2450   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2451                                          ExternalArray::kAlignedSize);
2452     if (!maybe_obj->ToObject(&obj)) return false;
2453   }
2454   set_external_byte_array_map(Map::cast(obj));
2455
2456   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2457                                          ExternalArray::kAlignedSize);
2458     if (!maybe_obj->ToObject(&obj)) return false;
2459   }
2460   set_external_unsigned_byte_array_map(Map::cast(obj));
2461
2462   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2463                                          ExternalArray::kAlignedSize);
2464     if (!maybe_obj->ToObject(&obj)) return false;
2465   }
2466   set_external_short_array_map(Map::cast(obj));
2467
2468   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2469                                          ExternalArray::kAlignedSize);
2470     if (!maybe_obj->ToObject(&obj)) return false;
2471   }
2472   set_external_unsigned_short_array_map(Map::cast(obj));
2473
2474   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2475                                          ExternalArray::kAlignedSize);
2476     if (!maybe_obj->ToObject(&obj)) return false;
2477   }
2478   set_external_int_array_map(Map::cast(obj));
2479
2480   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2481                                          ExternalArray::kAlignedSize);
2482     if (!maybe_obj->ToObject(&obj)) return false;
2483   }
2484   set_external_unsigned_int_array_map(Map::cast(obj));
2485
2486   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2487                                          ExternalArray::kAlignedSize);
2488     if (!maybe_obj->ToObject(&obj)) return false;
2489   }
2490   set_external_float_array_map(Map::cast(obj));
2491
2492   { MaybeObject* maybe_obj =
2493         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2494     if (!maybe_obj->ToObject(&obj)) return false;
2495   }
2496   set_non_strict_arguments_elements_map(Map::cast(obj));
2497
2498   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2499                                          ExternalArray::kAlignedSize);
2500     if (!maybe_obj->ToObject(&obj)) return false;
2501   }
2502   set_external_double_array_map(Map::cast(obj));
2503
2504   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_empty_external_byte_array(ExternalArray::cast(obj));
2508
2509   { MaybeObject* maybe_obj =
2510         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2511     if (!maybe_obj->ToObject(&obj)) return false;
2512   }
2513   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2514
2515   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2516     if (!maybe_obj->ToObject(&obj)) return false;
2517   }
2518   set_empty_external_short_array(ExternalArray::cast(obj));
2519
2520   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2521       kExternalUnsignedShortArray);
2522     if (!maybe_obj->ToObject(&obj)) return false;
2523   }
2524   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2525
2526   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2527     if (!maybe_obj->ToObject(&obj)) return false;
2528   }
2529   set_empty_external_int_array(ExternalArray::cast(obj));
2530
2531   { MaybeObject* maybe_obj =
2532         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2533     if (!maybe_obj->ToObject(&obj)) return false;
2534   }
2535   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2536
2537   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2538     if (!maybe_obj->ToObject(&obj)) return false;
2539   }
2540   set_empty_external_float_array(ExternalArray::cast(obj));
2541
2542   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2543     if (!maybe_obj->ToObject(&obj)) return false;
2544   }
2545   set_empty_external_double_array(ExternalArray::cast(obj));
2546
2547   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2548     if (!maybe_obj->ToObject(&obj)) return false;
2549   }
2550   set_empty_external_pixel_array(ExternalArray::cast(obj));
2551
2552   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2553     if (!maybe_obj->ToObject(&obj)) return false;
2554   }
2555   set_code_map(Map::cast(obj));
2556
2557   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2558                                          JSGlobalPropertyCell::kSize);
2559     if (!maybe_obj->ToObject(&obj)) return false;
2560   }
2561   set_global_property_cell_map(Map::cast(obj));
2562
2563   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2564     if (!maybe_obj->ToObject(&obj)) return false;
2565   }
2566   set_one_pointer_filler_map(Map::cast(obj));
2567
2568   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2569     if (!maybe_obj->ToObject(&obj)) return false;
2570   }
2571   set_two_pointer_filler_map(Map::cast(obj));
2572
2573   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2574     const StructTable& entry = struct_table[i];
2575     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2576       if (!maybe_obj->ToObject(&obj)) return false;
2577     }
2578     roots_[entry.index] = Map::cast(obj);
2579   }
2580
2581   { MaybeObject* maybe_obj =
2582         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2583     if (!maybe_obj->ToObject(&obj)) return false;
2584   }
2585   set_hash_table_map(Map::cast(obj));
2586
2587   { MaybeObject* maybe_obj =
2588         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2589     if (!maybe_obj->ToObject(&obj)) return false;
2590   }
2591   set_function_context_map(Map::cast(obj));
2592
2593   { MaybeObject* maybe_obj =
2594         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2595     if (!maybe_obj->ToObject(&obj)) return false;
2596   }
2597   set_catch_context_map(Map::cast(obj));
2598
2599   { MaybeObject* maybe_obj =
2600         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2601     if (!maybe_obj->ToObject(&obj)) return false;
2602   }
2603   set_with_context_map(Map::cast(obj));
2604
2605   { MaybeObject* maybe_obj =
2606         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2607     if (!maybe_obj->ToObject(&obj)) return false;
2608   }
2609   set_block_context_map(Map::cast(obj));
2610
2611   { MaybeObject* maybe_obj =
2612         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2613     if (!maybe_obj->ToObject(&obj)) return false;
2614   }
2615   set_module_context_map(Map::cast(obj));
2616
2617   { MaybeObject* maybe_obj =
2618         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2619     if (!maybe_obj->ToObject(&obj)) return false;
2620   }
2621   set_global_context_map(Map::cast(obj));
2622
2623   { MaybeObject* maybe_obj =
2624         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2625     if (!maybe_obj->ToObject(&obj)) return false;
2626   }
2627   Map* native_context_map = Map::cast(obj);
2628   native_context_map->set_dictionary_map(true);
2629   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2630   set_native_context_map(native_context_map);
2631
2632   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2633                                          SharedFunctionInfo::kAlignedSize);
2634     if (!maybe_obj->ToObject(&obj)) return false;
2635   }
2636   set_shared_function_info_map(Map::cast(obj));
2637
2638   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2639                                          JSMessageObject::kSize);
2640     if (!maybe_obj->ToObject(&obj)) return false;
2641   }
2642   set_message_object_map(Map::cast(obj));
2643
2644   Map* external_map;
2645   { MaybeObject* maybe_obj =
2646         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2647     if (!maybe_obj->To(&external_map)) return false;
2648   }
2649   external_map->set_is_extensible(false);
2650   set_external_map(external_map);
2651
2652   ASSERT(!InNewSpace(empty_fixed_array()));
2653   return true;
2654 }
2655
2656
2657 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2658   // Statically ensure that it is safe to allocate heap numbers in paged
2659   // spaces.
2660   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2661   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2662
2663   Object* result;
2664   { MaybeObject* maybe_result =
2665         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2666     if (!maybe_result->ToObject(&result)) return maybe_result;
2667   }
2668
2669   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2670   HeapNumber::cast(result)->set_value(value);
2671   return result;
2672 }
2673
2674
2675 MaybeObject* Heap::AllocateHeapNumber(double value) {
2676   // Use general version, if we're forced to always allocate.
2677   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2678
2679   // This version of AllocateHeapNumber is optimized for
2680   // allocation in new space.
2681   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2682   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2683   Object* result;
2684   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2685     if (!maybe_result->ToObject(&result)) return maybe_result;
2686   }
2687   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2688   HeapNumber::cast(result)->set_value(value);
2689   return result;
2690 }
2691
2692
2693 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2694   Object* result;
2695   { MaybeObject* maybe_result = AllocateRawCell();
2696     if (!maybe_result->ToObject(&result)) return maybe_result;
2697   }
2698   HeapObject::cast(result)->set_map_no_write_barrier(
2699       global_property_cell_map());
2700   JSGlobalPropertyCell::cast(result)->set_value(value);
2701   return result;
2702 }
2703
2704
2705 MaybeObject* Heap::CreateOddball(const char* to_string,
2706                                  Object* to_number,
2707                                  byte kind) {
2708   Object* result;
2709   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2710     if (!maybe_result->ToObject(&result)) return maybe_result;
2711   }
2712   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2713 }
2714
2715
2716 bool Heap::CreateApiObjects() {
2717   Object* obj;
2718
2719   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2720     if (!maybe_obj->ToObject(&obj)) return false;
2721   }
2722   // Don't use Smi-only elements optimizations for objects with the neander
2723   // map. There are too many cases where element values are set directly with a
2724   // bottleneck to trap the Smi-only -> fast elements transition, and there
2725   // appears to be no benefit for optimize this case.
2726   Map* new_neander_map = Map::cast(obj);
2727   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2728   set_neander_map(new_neander_map);
2729
2730   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2731     if (!maybe_obj->ToObject(&obj)) return false;
2732   }
2733   Object* elements;
2734   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2735     if (!maybe_elements->ToObject(&elements)) return false;
2736   }
2737   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2738   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2739   set_message_listeners(JSObject::cast(obj));
2740
2741   return true;
2742 }
2743
2744
2745 void Heap::CreateJSEntryStub() {
2746   JSEntryStub stub;
2747   set_js_entry_code(*stub.GetCode(isolate()));
2748 }
2749
2750
2751 void Heap::CreateJSConstructEntryStub() {
2752   JSConstructEntryStub stub;
2753   set_js_construct_entry_code(*stub.GetCode(isolate()));
2754 }
2755
2756
2757 void Heap::CreateFixedStubs() {
2758   // Here we create roots for fixed stubs. They are needed at GC
2759   // for cooking and uncooking (check out frames.cc).
2760   // The eliminates the need for doing dictionary lookup in the
2761   // stub cache for these stubs.
2762   HandleScope scope(isolate());
2763   // gcc-4.4 has problem generating correct code of following snippet:
2764   // {  JSEntryStub stub;
2765   //    js_entry_code_ = *stub.GetCode();
2766   // }
2767   // {  JSConstructEntryStub stub;
2768   //    js_construct_entry_code_ = *stub.GetCode();
2769   // }
2770   // To workaround the problem, make separate functions without inlining.
2771   Heap::CreateJSEntryStub();
2772   Heap::CreateJSConstructEntryStub();
2773
2774   // Create stubs that should be there, so we don't unexpectedly have to
2775   // create them if we need them during the creation of another stub.
2776   // Stub creation mixes raw pointers and handles in an unsafe manner so
2777   // we cannot create stubs while we are creating stubs.
2778   CodeStub::GenerateStubsAheadOfTime(isolate());
2779 }
2780
2781
2782 bool Heap::CreateInitialObjects() {
2783   Object* obj;
2784
2785   // The -0 value must be set before NumberFromDouble works.
2786   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2787     if (!maybe_obj->ToObject(&obj)) return false;
2788   }
2789   set_minus_zero_value(HeapNumber::cast(obj));
2790   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2791
2792   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2793     if (!maybe_obj->ToObject(&obj)) return false;
2794   }
2795   set_nan_value(HeapNumber::cast(obj));
2796
2797   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2798     if (!maybe_obj->ToObject(&obj)) return false;
2799   }
2800   set_infinity_value(HeapNumber::cast(obj));
2801
2802   // The hole has not been created yet, but we want to put something
2803   // predictable in the gaps in the string table, so lets make that Smi zero.
2804   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2805
2806   // Allocate initial string table.
2807   { MaybeObject* maybe_obj =
2808         StringTable::Allocate(this, kInitialStringTableSize);
2809     if (!maybe_obj->ToObject(&obj)) return false;
2810   }
2811   // Don't use set_string_table() due to asserts.
2812   roots_[kStringTableRootIndex] = obj;
2813
2814   // Finish initializing oddballs after creating the string table.
2815   { MaybeObject* maybe_obj =
2816         undefined_value()->Initialize("undefined",
2817                                       nan_value(),
2818                                       Oddball::kUndefined);
2819     if (!maybe_obj->ToObject(&obj)) return false;
2820   }
2821
2822   // Initialize the null_value.
2823   { MaybeObject* maybe_obj =
2824         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2825     if (!maybe_obj->ToObject(&obj)) return false;
2826   }
2827
2828   { MaybeObject* maybe_obj = CreateOddball("true",
2829                                            Smi::FromInt(1),
2830                                            Oddball::kTrue);
2831     if (!maybe_obj->ToObject(&obj)) return false;
2832   }
2833   set_true_value(Oddball::cast(obj));
2834
2835   { MaybeObject* maybe_obj = CreateOddball("false",
2836                                            Smi::FromInt(0),
2837                                            Oddball::kFalse);
2838     if (!maybe_obj->ToObject(&obj)) return false;
2839   }
2840   set_false_value(Oddball::cast(obj));
2841
2842   { MaybeObject* maybe_obj = CreateOddball("hole",
2843                                            Smi::FromInt(-1),
2844                                            Oddball::kTheHole);
2845     if (!maybe_obj->ToObject(&obj)) return false;
2846   }
2847   set_the_hole_value(Oddball::cast(obj));
2848
2849   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2850                                            Smi::FromInt(-4),
2851                                            Oddball::kArgumentMarker);
2852     if (!maybe_obj->ToObject(&obj)) return false;
2853   }
2854   set_arguments_marker(Oddball::cast(obj));
2855
2856   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2857                                            Smi::FromInt(-2),
2858                                            Oddball::kOther);
2859     if (!maybe_obj->ToObject(&obj)) return false;
2860   }
2861   set_no_interceptor_result_sentinel(obj);
2862
2863   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2864                                            Smi::FromInt(-3),
2865                                            Oddball::kOther);
2866     if (!maybe_obj->ToObject(&obj)) return false;
2867   }
2868   set_termination_exception(obj);
2869
2870   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2871     { MaybeObject* maybe_obj =
2872           InternalizeUtf8String(constant_string_table[i].contents);
2873       if (!maybe_obj->ToObject(&obj)) return false;
2874     }
2875     roots_[constant_string_table[i].index] = String::cast(obj);
2876   }
2877
2878   // Allocate the hidden string which is used to identify the hidden properties
2879   // in JSObjects. The hash code has a special value so that it will not match
2880   // the empty string when searching for the property. It cannot be part of the
2881   // loop above because it needs to be allocated manually with the special
2882   // hash code in place. The hash code for the hidden_string is zero to ensure
2883   // that it will always be at the first entry in property descriptors.
2884   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
2885       OneByteVector("", 0), String::kEmptyStringHash);
2886     if (!maybe_obj->ToObject(&obj)) return false;
2887   }
2888   hidden_string_ = String::cast(obj);
2889
2890   // Allocate the code_stubs dictionary. The initial size is set to avoid
2891   // expanding the dictionary during bootstrapping.
2892   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
2893     if (!maybe_obj->ToObject(&obj)) return false;
2894   }
2895   set_code_stubs(UnseededNumberDictionary::cast(obj));
2896
2897
2898   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2899   // is set to avoid expanding the dictionary during bootstrapping.
2900   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
2901     if (!maybe_obj->ToObject(&obj)) return false;
2902   }
2903   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2904
2905   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2906     if (!maybe_obj->ToObject(&obj)) return false;
2907   }
2908   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2909
2910   set_instanceof_cache_function(Smi::FromInt(0));
2911   set_instanceof_cache_map(Smi::FromInt(0));
2912   set_instanceof_cache_answer(Smi::FromInt(0));
2913
2914   CreateFixedStubs();
2915
2916   // Allocate the dictionary of intrinsic function names.
2917   { MaybeObject* maybe_obj =
2918         NameDictionary::Allocate(this, Runtime::kNumFunctions);
2919     if (!maybe_obj->ToObject(&obj)) return false;
2920   }
2921   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2922                                                                        obj);
2923     if (!maybe_obj->ToObject(&obj)) return false;
2924   }
2925   set_intrinsic_function_names(NameDictionary::cast(obj));
2926
2927   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2928     if (!maybe_obj->ToObject(&obj)) return false;
2929   }
2930   set_number_string_cache(FixedArray::cast(obj));
2931
2932   // Allocate cache for single character one byte strings.
2933   { MaybeObject* maybe_obj =
2934         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
2935     if (!maybe_obj->ToObject(&obj)) return false;
2936   }
2937   set_single_character_string_cache(FixedArray::cast(obj));
2938
2939   // Allocate cache for string split.
2940   { MaybeObject* maybe_obj = AllocateFixedArray(
2941       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2942     if (!maybe_obj->ToObject(&obj)) return false;
2943   }
2944   set_string_split_cache(FixedArray::cast(obj));
2945
2946   { MaybeObject* maybe_obj = AllocateFixedArray(
2947       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2948     if (!maybe_obj->ToObject(&obj)) return false;
2949   }
2950   set_regexp_multiple_cache(FixedArray::cast(obj));
2951
2952   // Allocate cache for external strings pointing to native source code.
2953   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2954     if (!maybe_obj->ToObject(&obj)) return false;
2955   }
2956   set_natives_source_cache(FixedArray::cast(obj));
2957
2958   // Allocate object to hold object observation state.
2959   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2960     if (!maybe_obj->ToObject(&obj)) return false;
2961   }
2962   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2963     if (!maybe_obj->ToObject(&obj)) return false;
2964   }
2965   set_observation_state(JSObject::cast(obj));
2966
2967   // Handling of script id generation is in FACTORY->NewScript.
2968   set_last_script_id(undefined_value());
2969
2970   // Initialize keyed lookup cache.
2971   isolate_->keyed_lookup_cache()->Clear();
2972
2973   // Initialize context slot cache.
2974   isolate_->context_slot_cache()->Clear();
2975
2976   // Initialize descriptor cache.
2977   isolate_->descriptor_lookup_cache()->Clear();
2978
2979   // Initialize compilation cache.
2980   isolate_->compilation_cache()->Clear();
2981
2982   return true;
2983 }
2984
2985
2986 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2987   RootListIndex writable_roots[] = {
2988     kStoreBufferTopRootIndex,
2989     kStackLimitRootIndex,
2990     kNumberStringCacheRootIndex,
2991     kInstanceofCacheFunctionRootIndex,
2992     kInstanceofCacheMapRootIndex,
2993     kInstanceofCacheAnswerRootIndex,
2994     kCodeStubsRootIndex,
2995     kNonMonomorphicCacheRootIndex,
2996     kPolymorphicCodeCacheRootIndex,
2997     kLastScriptIdRootIndex,
2998     kEmptyScriptRootIndex,
2999     kRealStackLimitRootIndex,
3000     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3001     kConstructStubDeoptPCOffsetRootIndex,
3002     kGetterStubDeoptPCOffsetRootIndex,
3003     kSetterStubDeoptPCOffsetRootIndex,
3004     kStringTableRootIndex,
3005   };
3006
3007   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3008     if (root_index == writable_roots[i])
3009       return true;
3010   }
3011   return false;
3012 }
3013
3014
3015 Object* RegExpResultsCache::Lookup(Heap* heap,
3016                                    String* key_string,
3017                                    Object* key_pattern,
3018                                    ResultsCacheType type) {
3019   FixedArray* cache;
3020   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3021   if (type == STRING_SPLIT_SUBSTRINGS) {
3022     ASSERT(key_pattern->IsString());
3023     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3024     cache = heap->string_split_cache();
3025   } else {
3026     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3027     ASSERT(key_pattern->IsFixedArray());
3028     cache = heap->regexp_multiple_cache();
3029   }
3030
3031   uint32_t hash = key_string->Hash();
3032   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3033       ~(kArrayEntriesPerCacheEntry - 1));
3034   if (cache->get(index + kStringOffset) == key_string &&
3035       cache->get(index + kPatternOffset) == key_pattern) {
3036     return cache->get(index + kArrayOffset);
3037   }
3038   index =
3039       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3040   if (cache->get(index + kStringOffset) == key_string &&
3041       cache->get(index + kPatternOffset) == key_pattern) {
3042     return cache->get(index + kArrayOffset);
3043   }
3044   return Smi::FromInt(0);
3045 }
3046
3047
3048 void RegExpResultsCache::Enter(Heap* heap,
3049                                String* key_string,
3050                                Object* key_pattern,
3051                                FixedArray* value_array,
3052                                ResultsCacheType type) {
3053   FixedArray* cache;
3054   if (!key_string->IsInternalizedString()) return;
3055   if (type == STRING_SPLIT_SUBSTRINGS) {
3056     ASSERT(key_pattern->IsString());
3057     if (!key_pattern->IsInternalizedString()) return;
3058     cache = heap->string_split_cache();
3059   } else {
3060     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3061     ASSERT(key_pattern->IsFixedArray());
3062     cache = heap->regexp_multiple_cache();
3063   }
3064
3065   uint32_t hash = key_string->Hash();
3066   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3067       ~(kArrayEntriesPerCacheEntry - 1));
3068   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3069     cache->set(index + kStringOffset, key_string);
3070     cache->set(index + kPatternOffset, key_pattern);
3071     cache->set(index + kArrayOffset, value_array);
3072   } else {
3073     uint32_t index2 =
3074         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3075     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3076       cache->set(index2 + kStringOffset, key_string);
3077       cache->set(index2 + kPatternOffset, key_pattern);
3078       cache->set(index2 + kArrayOffset, value_array);
3079     } else {
3080       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3081       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3082       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3083       cache->set(index + kStringOffset, key_string);
3084       cache->set(index + kPatternOffset, key_pattern);
3085       cache->set(index + kArrayOffset, value_array);
3086     }
3087   }
3088   // If the array is a reasonably short list of substrings, convert it into a
3089   // list of internalized strings.
3090   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3091     for (int i = 0; i < value_array->length(); i++) {
3092       String* str = String::cast(value_array->get(i));
3093       Object* internalized_str;
3094       MaybeObject* maybe_string = heap->InternalizeString(str);
3095       if (maybe_string->ToObject(&internalized_str)) {
3096         value_array->set(i, internalized_str);
3097       }
3098     }
3099   }
3100   // Convert backing store to a copy-on-write array.
3101   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3102 }
3103
3104
3105 void RegExpResultsCache::Clear(FixedArray* cache) {
3106   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3107     cache->set(i, Smi::FromInt(0));
3108   }
3109 }
3110
3111
3112 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3113   MaybeObject* maybe_obj =
3114       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3115   return maybe_obj;
3116 }
3117
3118
3119 int Heap::FullSizeNumberStringCacheLength() {
3120   // Compute the size of the number string cache based on the max newspace size.
3121   // The number string cache has a minimum size based on twice the initial cache
3122   // size to ensure that it is bigger after being made 'full size'.
3123   int number_string_cache_size = max_semispace_size_ / 512;
3124   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3125                                  Min(0x4000, number_string_cache_size));
3126   // There is a string and a number per entry so the length is twice the number
3127   // of entries.
3128   return number_string_cache_size * 2;
3129 }
3130
3131
3132 void Heap::AllocateFullSizeNumberStringCache() {
3133   // The idea is to have a small number string cache in the snapshot to keep
3134   // boot-time memory usage down.  If we expand the number string cache already
3135   // while creating the snapshot then that didn't work out.
3136   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3137   MaybeObject* maybe_obj =
3138       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3139   Object* new_cache;
3140   if (maybe_obj->ToObject(&new_cache)) {
3141     // We don't bother to repopulate the cache with entries from the old cache.
3142     // It will be repopulated soon enough with new strings.
3143     set_number_string_cache(FixedArray::cast(new_cache));
3144   }
3145   // If allocation fails then we just return without doing anything.  It is only
3146   // a cache, so best effort is OK here.
3147 }
3148
3149
3150 void Heap::FlushNumberStringCache() {
3151   // Flush the number to string cache.
3152   int len = number_string_cache()->length();
3153   for (int i = 0; i < len; i++) {
3154     number_string_cache()->set_undefined(this, i);
3155   }
3156 }
3157
3158
3159 static inline int double_get_hash(double d) {
3160   DoubleRepresentation rep(d);
3161   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3162 }
3163
3164
3165 static inline int smi_get_hash(Smi* smi) {
3166   return smi->value();
3167 }
3168
3169
3170 Object* Heap::GetNumberStringCache(Object* number) {
3171   int hash;
3172   int mask = (number_string_cache()->length() >> 1) - 1;
3173   if (number->IsSmi()) {
3174     hash = smi_get_hash(Smi::cast(number)) & mask;
3175   } else {
3176     hash = double_get_hash(number->Number()) & mask;
3177   }
3178   Object* key = number_string_cache()->get(hash * 2);
3179   if (key == number) {
3180     return String::cast(number_string_cache()->get(hash * 2 + 1));
3181   } else if (key->IsHeapNumber() &&
3182              number->IsHeapNumber() &&
3183              key->Number() == number->Number()) {
3184     return String::cast(number_string_cache()->get(hash * 2 + 1));
3185   }
3186   return undefined_value();
3187 }
3188
3189
3190 void Heap::SetNumberStringCache(Object* number, String* string) {
3191   int hash;
3192   int mask = (number_string_cache()->length() >> 1) - 1;
3193   if (number->IsSmi()) {
3194     hash = smi_get_hash(Smi::cast(number)) & mask;
3195   } else {
3196     hash = double_get_hash(number->Number()) & mask;
3197   }
3198   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3199       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3200     // The first time we have a hash collision, we move to the full sized
3201     // number string cache.
3202     AllocateFullSizeNumberStringCache();
3203     return;
3204   }
3205   number_string_cache()->set(hash * 2, number);
3206   number_string_cache()->set(hash * 2 + 1, string);
3207 }
3208
3209
3210 MaybeObject* Heap::NumberToString(Object* number,
3211                                   bool check_number_string_cache,
3212                                   PretenureFlag pretenure) {
3213   isolate_->counters()->number_to_string_runtime()->Increment();
3214   if (check_number_string_cache) {
3215     Object* cached = GetNumberStringCache(number);
3216     if (cached != undefined_value()) {
3217       return cached;
3218     }
3219   }
3220
3221   char arr[100];
3222   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3223   const char* str;
3224   if (number->IsSmi()) {
3225     int num = Smi::cast(number)->value();
3226     str = IntToCString(num, buffer);
3227   } else {
3228     double num = HeapNumber::cast(number)->value();
3229     str = DoubleToCString(num, buffer);
3230   }
3231
3232   Object* js_string;
3233   MaybeObject* maybe_js_string =
3234       AllocateStringFromOneByte(CStrVector(str), pretenure);
3235   if (maybe_js_string->ToObject(&js_string)) {
3236     SetNumberStringCache(number, String::cast(js_string));
3237   }
3238   return maybe_js_string;
3239 }
3240
3241
3242 MaybeObject* Heap::Uint32ToString(uint32_t value,
3243                                   bool check_number_string_cache) {
3244   Object* number;
3245   MaybeObject* maybe = NumberFromUint32(value);
3246   if (!maybe->To<Object>(&number)) return maybe;
3247   return NumberToString(number, check_number_string_cache);
3248 }
3249
3250
3251 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3252   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3253 }
3254
3255
3256 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3257     ExternalArrayType array_type) {
3258   switch (array_type) {
3259     case kExternalByteArray:
3260       return kExternalByteArrayMapRootIndex;
3261     case kExternalUnsignedByteArray:
3262       return kExternalUnsignedByteArrayMapRootIndex;
3263     case kExternalShortArray:
3264       return kExternalShortArrayMapRootIndex;
3265     case kExternalUnsignedShortArray:
3266       return kExternalUnsignedShortArrayMapRootIndex;
3267     case kExternalIntArray:
3268       return kExternalIntArrayMapRootIndex;
3269     case kExternalUnsignedIntArray:
3270       return kExternalUnsignedIntArrayMapRootIndex;
3271     case kExternalFloatArray:
3272       return kExternalFloatArrayMapRootIndex;
3273     case kExternalDoubleArray:
3274       return kExternalDoubleArrayMapRootIndex;
3275     case kExternalPixelArray:
3276       return kExternalPixelArrayMapRootIndex;
3277     default:
3278       UNREACHABLE();
3279       return kUndefinedValueRootIndex;
3280   }
3281 }
3282
3283 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3284     ElementsKind elementsKind) {
3285   switch (elementsKind) {
3286     case EXTERNAL_BYTE_ELEMENTS:
3287       return kEmptyExternalByteArrayRootIndex;
3288     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3289       return kEmptyExternalUnsignedByteArrayRootIndex;
3290     case EXTERNAL_SHORT_ELEMENTS:
3291       return kEmptyExternalShortArrayRootIndex;
3292     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3293       return kEmptyExternalUnsignedShortArrayRootIndex;
3294     case EXTERNAL_INT_ELEMENTS:
3295       return kEmptyExternalIntArrayRootIndex;
3296     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3297       return kEmptyExternalUnsignedIntArrayRootIndex;
3298     case EXTERNAL_FLOAT_ELEMENTS:
3299       return kEmptyExternalFloatArrayRootIndex;
3300     case EXTERNAL_DOUBLE_ELEMENTS:
3301       return kEmptyExternalDoubleArrayRootIndex;
3302     case EXTERNAL_PIXEL_ELEMENTS:
3303       return kEmptyExternalPixelArrayRootIndex;
3304     default:
3305       UNREACHABLE();
3306       return kUndefinedValueRootIndex;
3307   }
3308 }
3309
3310 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3311   return ExternalArray::cast(
3312       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3313 }
3314
3315
3316
3317
3318 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3319   // We need to distinguish the minus zero value and this cannot be
3320   // done after conversion to int. Doing this by comparing bit
3321   // patterns is faster than using fpclassify() et al.
3322   static const DoubleRepresentation minus_zero(-0.0);
3323
3324   DoubleRepresentation rep(value);
3325   if (rep.bits == minus_zero.bits) {
3326     return AllocateHeapNumber(-0.0, pretenure);
3327   }
3328
3329   int int_value = FastD2I(value);
3330   if (value == int_value && Smi::IsValid(int_value)) {
3331     return Smi::FromInt(int_value);
3332   }
3333
3334   // Materialize the value in the heap.
3335   return AllocateHeapNumber(value, pretenure);
3336 }
3337
3338
3339 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3340   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3341   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3342   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3343   Foreign* result;
3344   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3345   if (!maybe_result->To(&result)) return maybe_result;
3346   result->set_foreign_address(address);
3347   return result;
3348 }
3349
3350
3351 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3352   SharedFunctionInfo* share;
3353   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3354   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3355
3356   // Set pointer fields.
3357   share->set_name(name);
3358   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3359   share->set_code(illegal);
3360   share->set_optimized_code_map(Smi::FromInt(0));
3361   share->set_scope_info(ScopeInfo::Empty(isolate_));
3362   Code* construct_stub =
3363       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3364   share->set_construct_stub(construct_stub);
3365   share->set_instance_class_name(Object_string());
3366   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3367   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3368   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3369   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3370   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3371   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3372   share->set_ast_node_count(0);
3373   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3374   share->set_counters(0);
3375
3376   // Set integer fields (smi or int, depending on the architecture).
3377   share->set_length(0);
3378   share->set_formal_parameter_count(0);
3379   share->set_expected_nof_properties(0);
3380   share->set_num_literals(0);
3381   share->set_start_position_and_type(0);
3382   share->set_end_position(0);
3383   share->set_function_token_position(0);
3384   // All compiler hints default to false or 0.
3385   share->set_compiler_hints(0);
3386   share->set_this_property_assignments_count(0);
3387   share->set_opt_count(0);
3388
3389   return share;
3390 }
3391
3392
3393 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3394                                            JSArray* arguments,
3395                                            int start_position,
3396                                            int end_position,
3397                                            Object* script,
3398                                            Object* stack_trace,
3399                                            Object* stack_frames) {
3400   Object* result;
3401   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3402     if (!maybe_result->ToObject(&result)) return maybe_result;
3403   }
3404   JSMessageObject* message = JSMessageObject::cast(result);
3405   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3406   message->initialize_elements();
3407   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3408   message->set_type(type);
3409   message->set_arguments(arguments);
3410   message->set_start_position(start_position);
3411   message->set_end_position(end_position);
3412   message->set_script(script);
3413   message->set_stack_trace(stack_trace);
3414   message->set_stack_frames(stack_frames);
3415   return result;
3416 }
3417
3418
3419
3420 // Returns true for a character in a range.  Both limits are inclusive.
3421 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3422   // This makes uses of the the unsigned wraparound.
3423   return character - from <= to - from;
3424 }
3425
3426
3427 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3428     Heap* heap,
3429     uint16_t c1,
3430     uint16_t c2) {
3431   String* result;
3432   // Numeric strings have a different hash algorithm not known by
3433   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3434   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3435       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3436     return result;
3437   // Now we know the length is 2, we might as well make use of that fact
3438   // when building the new string.
3439   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3440     // We can do this.
3441     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3442     Object* result;
3443     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3444       if (!maybe_result->ToObject(&result)) return maybe_result;
3445     }
3446     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3447     dest[0] = static_cast<uint8_t>(c1);
3448     dest[1] = static_cast<uint8_t>(c2);
3449     return result;
3450   } else {
3451     Object* result;
3452     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3453       if (!maybe_result->ToObject(&result)) return maybe_result;
3454     }
3455     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3456     dest[0] = c1;
3457     dest[1] = c2;
3458     return result;
3459   }
3460 }
3461
3462
3463 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3464   int first_length = first->length();
3465   if (first_length == 0) {
3466     return second;
3467   }
3468
3469   int second_length = second->length();
3470   if (second_length == 0) {
3471     return first;
3472   }
3473
3474   int length = first_length + second_length;
3475
3476   // Optimization for 2-byte strings often used as keys in a decompression
3477   // dictionary.  Check whether we already have the string in the string
3478   // table to prevent creation of many unneccesary strings.
3479   if (length == 2) {
3480     uint16_t c1 = first->Get(0);
3481     uint16_t c2 = second->Get(0);
3482     return MakeOrFindTwoCharacterString(this, c1, c2);
3483   }
3484
3485   bool first_is_one_byte = first->IsOneByteRepresentation();
3486   bool second_is_one_byte = second->IsOneByteRepresentation();
3487   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3488   // Make sure that an out of memory exception is thrown if the length
3489   // of the new cons string is too large.
3490   if (length > String::kMaxLength || length < 0) {
3491     isolate()->context()->mark_out_of_memory();
3492     return Failure::OutOfMemoryException(0x4);
3493   }
3494
3495   bool is_one_byte_data_in_two_byte_string = false;
3496   if (!is_one_byte) {
3497     // At least one of the strings uses two-byte representation so we
3498     // can't use the fast case code for short ASCII strings below, but
3499     // we can try to save memory if all chars actually fit in ASCII.
3500     is_one_byte_data_in_two_byte_string =
3501         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3502     if (is_one_byte_data_in_two_byte_string) {
3503       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3504     }
3505   }
3506
3507   // If the resulting string is small make a flat string.
3508   if (length < ConsString::kMinLength) {
3509     // Note that neither of the two inputs can be a slice because:
3510     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3511     ASSERT(first->IsFlat());
3512     ASSERT(second->IsFlat());
3513     if (is_one_byte) {
3514       Object* result;
3515       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3516         if (!maybe_result->ToObject(&result)) return maybe_result;
3517       }
3518       // Copy the characters into the new object.
3519       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3520       // Copy first part.
3521       const uint8_t* src;
3522       if (first->IsExternalString()) {
3523         src = ExternalAsciiString::cast(first)->GetChars();
3524       } else {
3525         src = SeqOneByteString::cast(first)->GetChars();
3526       }
3527       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3528       // Copy second part.
3529       if (second->IsExternalString()) {
3530         src = ExternalAsciiString::cast(second)->GetChars();
3531       } else {
3532         src = SeqOneByteString::cast(second)->GetChars();
3533       }
3534       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3535       return result;
3536     } else {
3537       if (is_one_byte_data_in_two_byte_string) {
3538         Object* result;
3539         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3540           if (!maybe_result->ToObject(&result)) return maybe_result;
3541         }
3542         // Copy the characters into the new object.
3543         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3544         String::WriteToFlat(first, dest, 0, first_length);
3545         String::WriteToFlat(second, dest + first_length, 0, second_length);
3546         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3547         return result;
3548       }
3549
3550       Object* result;
3551       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3552         if (!maybe_result->ToObject(&result)) return maybe_result;
3553       }
3554       // Copy the characters into the new object.
3555       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3556       String::WriteToFlat(first, dest, 0, first_length);
3557       String::WriteToFlat(second, dest + first_length, 0, second_length);
3558       return result;
3559     }
3560   }
3561
3562   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3563       cons_ascii_string_map() : cons_string_map();
3564
3565   Object* result;
3566   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3567     if (!maybe_result->ToObject(&result)) return maybe_result;
3568   }
3569
3570   AssertNoAllocation no_gc;
3571   ConsString* cons_string = ConsString::cast(result);
3572   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3573   cons_string->set_length(length);
3574   cons_string->set_hash_field(String::kEmptyHashField);
3575   cons_string->set_first(first, mode);
3576   cons_string->set_second(second, mode);
3577   return result;
3578 }
3579
3580
3581 MaybeObject* Heap::AllocateSubString(String* buffer,
3582                                      int start,
3583                                      int end,
3584                                      PretenureFlag pretenure) {
3585   int length = end - start;
3586   if (length <= 0) {
3587     return empty_string();
3588   } else if (length == 1) {
3589     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3590   } else if (length == 2) {
3591     // Optimization for 2-byte strings often used as keys in a decompression
3592     // dictionary.  Check whether we already have the string in the string
3593     // table to prevent creation of many unnecessary strings.
3594     uint16_t c1 = buffer->Get(start);
3595     uint16_t c2 = buffer->Get(start + 1);
3596     return MakeOrFindTwoCharacterString(this, c1, c2);
3597   }
3598
3599   // Make an attempt to flatten the buffer to reduce access time.
3600   buffer = buffer->TryFlattenGetString();
3601
3602   if (!FLAG_string_slices ||
3603       !buffer->IsFlat() ||
3604       length < SlicedString::kMinLength ||
3605       pretenure == TENURED) {
3606     Object* result;
3607     // WriteToFlat takes care of the case when an indirect string has a
3608     // different encoding from its underlying string.  These encodings may
3609     // differ because of externalization.
3610     bool is_one_byte = buffer->IsOneByteRepresentation();
3611     { MaybeObject* maybe_result = is_one_byte
3612                                   ? AllocateRawOneByteString(length, pretenure)
3613                                   : AllocateRawTwoByteString(length, pretenure);
3614       if (!maybe_result->ToObject(&result)) return maybe_result;
3615     }
3616     String* string_result = String::cast(result);
3617     // Copy the characters into the new object.
3618     if (is_one_byte) {
3619       ASSERT(string_result->IsOneByteRepresentation());
3620       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3621       String::WriteToFlat(buffer, dest, start, end);
3622     } else {
3623       ASSERT(string_result->IsTwoByteRepresentation());
3624       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3625       String::WriteToFlat(buffer, dest, start, end);
3626     }
3627     return result;
3628   }
3629
3630   ASSERT(buffer->IsFlat());
3631 #if VERIFY_HEAP
3632   if (FLAG_verify_heap) {
3633     buffer->StringVerify();
3634   }
3635 #endif
3636
3637   Object* result;
3638   // When slicing an indirect string we use its encoding for a newly created
3639   // slice and don't check the encoding of the underlying string.  This is safe
3640   // even if the encodings are different because of externalization.  If an
3641   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3642   // codes of the underlying string must still fit into ASCII (because
3643   // externalization must not change char codes).
3644   { Map* map = buffer->IsOneByteRepresentation()
3645                  ? sliced_ascii_string_map()
3646                  : sliced_string_map();
3647     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3648     if (!maybe_result->ToObject(&result)) return maybe_result;
3649   }
3650
3651   AssertNoAllocation no_gc;
3652   SlicedString* sliced_string = SlicedString::cast(result);
3653   sliced_string->set_length(length);
3654   sliced_string->set_hash_field(String::kEmptyHashField);
3655   if (buffer->IsConsString()) {
3656     ConsString* cons = ConsString::cast(buffer);
3657     ASSERT(cons->second()->length() == 0);
3658     sliced_string->set_parent(cons->first());
3659     sliced_string->set_offset(start);
3660   } else if (buffer->IsSlicedString()) {
3661     // Prevent nesting sliced strings.
3662     SlicedString* parent_slice = SlicedString::cast(buffer);
3663     sliced_string->set_parent(parent_slice->parent());
3664     sliced_string->set_offset(start + parent_slice->offset());
3665   } else {
3666     sliced_string->set_parent(buffer);
3667     sliced_string->set_offset(start);
3668   }
3669   ASSERT(sliced_string->parent()->IsSeqString() ||
3670          sliced_string->parent()->IsExternalString());
3671   return result;
3672 }
3673
3674
3675 MaybeObject* Heap::AllocateExternalStringFromAscii(
3676     const ExternalAsciiString::Resource* resource) {
3677   size_t length = resource->length();
3678   if (length > static_cast<size_t>(String::kMaxLength)) {
3679     isolate()->context()->mark_out_of_memory();
3680     return Failure::OutOfMemoryException(0x5);
3681   }
3682
3683   Map* map = external_ascii_string_map();
3684   Object* result;
3685   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3686     if (!maybe_result->ToObject(&result)) return maybe_result;
3687   }
3688
3689   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3690   external_string->set_length(static_cast<int>(length));
3691   external_string->set_hash_field(String::kEmptyHashField);
3692   external_string->set_resource(resource);
3693
3694   return result;
3695 }
3696
3697
3698 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3699     const ExternalTwoByteString::Resource* resource) {
3700   size_t length = resource->length();
3701   if (length > static_cast<size_t>(String::kMaxLength)) {
3702     isolate()->context()->mark_out_of_memory();
3703     return Failure::OutOfMemoryException(0x6);
3704   }
3705
3706   // For small strings we check whether the resource contains only
3707   // one byte characters.  If yes, we use a different string map.
3708   static const size_t kOneByteCheckLengthLimit = 32;
3709   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3710       String::IsOneByte(resource->data(), static_cast<int>(length));
3711   Map* map = is_one_byte ?
3712       external_string_with_one_byte_data_map() : external_string_map();
3713   Object* result;
3714   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3715     if (!maybe_result->ToObject(&result)) return maybe_result;
3716   }
3717
3718   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3719   external_string->set_length(static_cast<int>(length));
3720   external_string->set_hash_field(String::kEmptyHashField);
3721   external_string->set_resource(resource);
3722
3723   return result;
3724 }
3725
3726
3727 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3728   if (code <= String::kMaxOneByteCharCode) {
3729     Object* value = single_character_string_cache()->get(code);
3730     if (value != undefined_value()) return value;
3731
3732     uint8_t buffer[1];
3733     buffer[0] = static_cast<uint8_t>(code);
3734     Object* result;
3735     MaybeObject* maybe_result =
3736         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3737
3738     if (!maybe_result->ToObject(&result)) return maybe_result;
3739     single_character_string_cache()->set(code, result);
3740     return result;
3741   }
3742
3743   Object* result;
3744   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3745     if (!maybe_result->ToObject(&result)) return maybe_result;
3746   }
3747   String* answer = String::cast(result);
3748   answer->Set(0, code);
3749   return answer;
3750 }
3751
3752
3753 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3754   if (length < 0 || length > ByteArray::kMaxLength) {
3755     return Failure::OutOfMemoryException(0x7);
3756   }
3757   if (pretenure == NOT_TENURED) {
3758     return AllocateByteArray(length);
3759   }
3760   int size = ByteArray::SizeFor(length);
3761   Object* result;
3762   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3763                    ? old_data_space_->AllocateRaw(size)
3764                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3765     if (!maybe_result->ToObject(&result)) return maybe_result;
3766   }
3767
3768   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3769       byte_array_map());
3770   reinterpret_cast<ByteArray*>(result)->set_length(length);
3771   return result;
3772 }
3773
3774
3775 MaybeObject* Heap::AllocateByteArray(int length) {
3776   if (length < 0 || length > ByteArray::kMaxLength) {
3777     return Failure::OutOfMemoryException(0x8);
3778   }
3779   int size = ByteArray::SizeFor(length);
3780   AllocationSpace space =
3781       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3782   Object* result;
3783   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3784     if (!maybe_result->ToObject(&result)) return maybe_result;
3785   }
3786
3787   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3788       byte_array_map());
3789   reinterpret_cast<ByteArray*>(result)->set_length(length);
3790   return result;
3791 }
3792
3793
3794 void Heap::CreateFillerObjectAt(Address addr, int size) {
3795   if (size == 0) return;
3796   HeapObject* filler = HeapObject::FromAddress(addr);
3797   if (size == kPointerSize) {
3798     filler->set_map_no_write_barrier(one_pointer_filler_map());
3799   } else if (size == 2 * kPointerSize) {
3800     filler->set_map_no_write_barrier(two_pointer_filler_map());
3801   } else {
3802     filler->set_map_no_write_barrier(free_space_map());
3803     FreeSpace::cast(filler)->set_size(size);
3804   }
3805 }
3806
3807
3808 MaybeObject* Heap::AllocateExternalArray(int length,
3809                                          ExternalArrayType array_type,
3810                                          void* external_pointer,
3811                                          PretenureFlag pretenure) {
3812   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3813   Object* result;
3814   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3815                                             space,
3816                                             OLD_DATA_SPACE);
3817     if (!maybe_result->ToObject(&result)) return maybe_result;
3818   }
3819
3820   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3821       MapForExternalArrayType(array_type));
3822   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3823   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3824       external_pointer);
3825
3826   return result;
3827 }
3828
3829
3830 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3831                               Code::Flags flags,
3832                               Handle<Object> self_reference,
3833                               bool immovable,
3834                               bool crankshafted) {
3835   // Allocate ByteArray before the Code object, so that we do not risk
3836   // leaving uninitialized Code object (and breaking the heap).
3837   ByteArray* reloc_info;
3838   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3839   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3840
3841   // Compute size.
3842   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3843   int obj_size = Code::SizeFor(body_size);
3844   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3845   MaybeObject* maybe_result;
3846   // Large code objects and code objects which should stay at a fixed address
3847   // are allocated in large object space.
3848   HeapObject* result;
3849   bool force_lo_space = obj_size > code_space()->AreaSize();
3850   if (force_lo_space) {
3851     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3852   } else {
3853     maybe_result = code_space_->AllocateRaw(obj_size);
3854   }
3855   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3856
3857   if (immovable && !force_lo_space &&
3858       // Objects on the first page of each space are never moved.
3859       !code_space_->FirstPage()->Contains(result->address())) {
3860     // Discard the first code allocation, which was on a page where it could be
3861     // moved.
3862     CreateFillerObjectAt(result->address(), obj_size);
3863     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3864     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3865   }
3866
3867   // Initialize the object
3868   result->set_map_no_write_barrier(code_map());
3869   Code* code = Code::cast(result);
3870   ASSERT(!isolate_->code_range()->exists() ||
3871       isolate_->code_range()->contains(code->address()));
3872   code->set_instruction_size(desc.instr_size);
3873   code->set_relocation_info(reloc_info);
3874   code->set_flags(flags);
3875   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3876     code->set_check_type(RECEIVER_MAP_CHECK);
3877   }
3878   code->set_is_crankshafted(crankshafted);
3879   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3880   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3881   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3882   code->set_gc_metadata(Smi::FromInt(0));
3883   code->set_ic_age(global_ic_age_);
3884   code->set_prologue_offset(kPrologueOffsetNotSet);
3885   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3886     code->set_marked_for_deoptimization(false);
3887   }
3888   // Allow self references to created code object by patching the handle to
3889   // point to the newly allocated Code object.
3890   if (!self_reference.is_null()) {
3891     *(self_reference.location()) = code;
3892   }
3893   // Migrate generated code.
3894   // The generated code can contain Object** values (typically from handles)
3895   // that are dereferenced during the copy to point directly to the actual heap
3896   // objects. These pointers can include references to the code object itself,
3897   // through the self_reference parameter.
3898   code->CopyFrom(desc);
3899
3900 #ifdef VERIFY_HEAP
3901   if (FLAG_verify_heap) {
3902     code->Verify();
3903   }
3904 #endif
3905   return code;
3906 }
3907
3908
3909 MaybeObject* Heap::CopyCode(Code* code) {
3910   // Allocate an object the same size as the code object.
3911   int obj_size = code->Size();
3912   MaybeObject* maybe_result;
3913   if (obj_size > code_space()->AreaSize()) {
3914     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3915   } else {
3916     maybe_result = code_space_->AllocateRaw(obj_size);
3917   }
3918
3919   Object* result;
3920   if (!maybe_result->ToObject(&result)) return maybe_result;
3921
3922   // Copy code object.
3923   Address old_addr = code->address();
3924   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3925   CopyBlock(new_addr, old_addr, obj_size);
3926   // Relocate the copy.
3927   Code* new_code = Code::cast(result);
3928   ASSERT(!isolate_->code_range()->exists() ||
3929       isolate_->code_range()->contains(code->address()));
3930   new_code->Relocate(new_addr - old_addr);
3931   return new_code;
3932 }
3933
3934
3935 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3936   // Allocate ByteArray before the Code object, so that we do not risk
3937   // leaving uninitialized Code object (and breaking the heap).
3938   Object* reloc_info_array;
3939   { MaybeObject* maybe_reloc_info_array =
3940         AllocateByteArray(reloc_info.length(), TENURED);
3941     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3942       return maybe_reloc_info_array;
3943     }
3944   }
3945
3946   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3947
3948   int new_obj_size = Code::SizeFor(new_body_size);
3949
3950   Address old_addr = code->address();
3951
3952   size_t relocation_offset =
3953       static_cast<size_t>(code->instruction_end() - old_addr);
3954
3955   MaybeObject* maybe_result;
3956   if (new_obj_size > code_space()->AreaSize()) {
3957     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3958   } else {
3959     maybe_result = code_space_->AllocateRaw(new_obj_size);
3960   }
3961
3962   Object* result;
3963   if (!maybe_result->ToObject(&result)) return maybe_result;
3964
3965   // Copy code object.
3966   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3967
3968   // Copy header and instructions.
3969   CopyBytes(new_addr, old_addr, relocation_offset);
3970
3971   Code* new_code = Code::cast(result);
3972   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3973
3974   // Copy patched rinfo.
3975   CopyBytes(new_code->relocation_start(),
3976             reloc_info.start(),
3977             static_cast<size_t>(reloc_info.length()));
3978
3979   // Relocate the copy.
3980   ASSERT(!isolate_->code_range()->exists() ||
3981       isolate_->code_range()->contains(code->address()));
3982   new_code->Relocate(new_addr - old_addr);
3983
3984 #ifdef VERIFY_HEAP
3985   if (FLAG_verify_heap) {
3986     code->Verify();
3987   }
3988 #endif
3989   return new_code;
3990 }
3991
3992
3993 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
3994     Handle<Object> allocation_site_info_payload) {
3995   ASSERT(gc_state_ == NOT_IN_GC);
3996   ASSERT(map->instance_type() != MAP_TYPE);
3997   // If allocation failures are disallowed, we may allocate in a different
3998   // space when new space is full and the object is not a large object.
3999   AllocationSpace retry_space =
4000       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4001   int size = map->instance_size() + AllocationSiteInfo::kSize;
4002   Object* result;
4003   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4004   if (!maybe_result->ToObject(&result)) return maybe_result;
4005   // No need for write barrier since object is white and map is in old space.
4006   HeapObject::cast(result)->set_map_no_write_barrier(map);
4007   AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4008       reinterpret_cast<Address>(result) + map->instance_size());
4009   alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4010   alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
4011   return result;
4012 }
4013
4014
4015 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4016   ASSERT(gc_state_ == NOT_IN_GC);
4017   ASSERT(map->instance_type() != MAP_TYPE);
4018   // If allocation failures are disallowed, we may allocate in a different
4019   // space when new space is full and the object is not a large object.
4020   AllocationSpace retry_space =
4021       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4022   int size = map->instance_size();
4023   Object* result;
4024   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4025   if (!maybe_result->ToObject(&result)) return maybe_result;
4026   // No need for write barrier since object is white and map is in old space.
4027   HeapObject::cast(result)->set_map_no_write_barrier(map);
4028   return result;
4029 }
4030
4031
4032 void Heap::InitializeFunction(JSFunction* function,
4033                               SharedFunctionInfo* shared,
4034                               Object* prototype) {
4035   ASSERT(!prototype->IsMap());
4036   function->initialize_properties();
4037   function->initialize_elements();
4038   function->set_shared(shared);
4039   function->set_code(shared->code());
4040   function->set_prototype_or_initial_map(prototype);
4041   function->set_context(undefined_value());
4042   function->set_literals_or_bindings(empty_fixed_array());
4043   function->set_next_function_link(undefined_value());
4044 }
4045
4046
4047 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4048   // Make sure to use globals from the function's context, since the function
4049   // can be from a different context.
4050   Context* native_context = function->context()->native_context();
4051   Map* new_map;
4052   if (function->shared()->is_generator()) {
4053     // Generator prototypes can share maps since they don't have "constructor"
4054     // properties.
4055     new_map = native_context->generator_object_prototype_map();
4056   } else {
4057     // Each function prototype gets a fresh map to avoid unwanted sharing of
4058     // maps between prototypes of different constructors.
4059     JSFunction* object_function = native_context->object_function();
4060     ASSERT(object_function->has_initial_map());
4061     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4062     if (!maybe_map->To(&new_map)) return maybe_map;
4063   }
4064
4065   Object* prototype;
4066   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4067   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4068
4069   if (!function->shared()->is_generator()) {
4070     MaybeObject* maybe_failure =
4071         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4072             constructor_string(), function, DONT_ENUM);
4073     if (maybe_failure->IsFailure()) return maybe_failure;
4074   }
4075
4076   return prototype;
4077 }
4078
4079
4080 MaybeObject* Heap::AllocateFunction(Map* function_map,
4081                                     SharedFunctionInfo* shared,
4082                                     Object* prototype,
4083                                     PretenureFlag pretenure) {
4084   AllocationSpace space =
4085       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4086   Object* result;
4087   { MaybeObject* maybe_result = Allocate(function_map, space);
4088     if (!maybe_result->ToObject(&result)) return maybe_result;
4089   }
4090   InitializeFunction(JSFunction::cast(result), shared, prototype);
4091   return result;
4092 }
4093
4094
4095 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4096   // To get fast allocation and map sharing for arguments objects we
4097   // allocate them based on an arguments boilerplate.
4098
4099   JSObject* boilerplate;
4100   int arguments_object_size;
4101   bool strict_mode_callee = callee->IsJSFunction() &&
4102       !JSFunction::cast(callee)->shared()->is_classic_mode();
4103   if (strict_mode_callee) {
4104     boilerplate =
4105         isolate()->context()->native_context()->
4106             strict_mode_arguments_boilerplate();
4107     arguments_object_size = kArgumentsObjectSizeStrict;
4108   } else {
4109     boilerplate =
4110         isolate()->context()->native_context()->arguments_boilerplate();
4111     arguments_object_size = kArgumentsObjectSize;
4112   }
4113
4114   // This calls Copy directly rather than using Heap::AllocateRaw so we
4115   // duplicate the check here.
4116   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
4117
4118   // Check that the size of the boilerplate matches our
4119   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4120   // on the size being a known constant.
4121   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4122
4123   // Do the allocation.
4124   Object* result;
4125   { MaybeObject* maybe_result =
4126         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4127     if (!maybe_result->ToObject(&result)) return maybe_result;
4128   }
4129
4130   // Copy the content. The arguments boilerplate doesn't have any
4131   // fields that point to new space so it's safe to skip the write
4132   // barrier here.
4133   CopyBlock(HeapObject::cast(result)->address(),
4134             boilerplate->address(),
4135             JSObject::kHeaderSize);
4136
4137   // Set the length property.
4138   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4139                                                 Smi::FromInt(length),
4140                                                 SKIP_WRITE_BARRIER);
4141   // Set the callee property for non-strict mode arguments object only.
4142   if (!strict_mode_callee) {
4143     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4144                                                   callee);
4145   }
4146
4147   // Check the state of the object
4148   ASSERT(JSObject::cast(result)->HasFastProperties());
4149   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4150
4151   return result;
4152 }
4153
4154
4155 static bool HasDuplicates(DescriptorArray* descriptors) {
4156   int count = descriptors->number_of_descriptors();
4157   if (count > 1) {
4158     Name* prev_key = descriptors->GetKey(0);
4159     for (int i = 1; i != count; i++) {
4160       Name* current_key = descriptors->GetKey(i);
4161       if (prev_key == current_key) return true;
4162       prev_key = current_key;
4163     }
4164   }
4165   return false;
4166 }
4167
4168
4169 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4170   ASSERT(!fun->has_initial_map());
4171
4172   // First create a new map with the size and number of in-object properties
4173   // suggested by the function.
4174   InstanceType instance_type;
4175   int instance_size;
4176   int in_object_properties;
4177   if (fun->shared()->is_generator()) {
4178     instance_type = JS_GENERATOR_OBJECT_TYPE;
4179     instance_size = JSGeneratorObject::kSize;
4180     in_object_properties = 0;
4181   } else {
4182     instance_type = JS_OBJECT_TYPE;
4183     instance_size = fun->shared()->CalculateInstanceSize();
4184     in_object_properties = fun->shared()->CalculateInObjectProperties();
4185   }
4186   Map* map;
4187   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4188   if (!maybe_map->To(&map)) return maybe_map;
4189
4190   // Fetch or allocate prototype.
4191   Object* prototype;
4192   if (fun->has_instance_prototype()) {
4193     prototype = fun->instance_prototype();
4194   } else {
4195     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4196     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4197   }
4198   map->set_inobject_properties(in_object_properties);
4199   map->set_unused_property_fields(in_object_properties);
4200   map->set_prototype(prototype);
4201   ASSERT(map->has_fast_object_elements());
4202
4203   // If the function has only simple this property assignments add
4204   // field descriptors for these to the initial map as the object
4205   // cannot be constructed without having these properties.  Guard by
4206   // the inline_new flag so we only change the map if we generate a
4207   // specialized construct stub.
4208   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4209   if (!fun->shared()->is_generator() &&
4210       fun->shared()->CanGenerateInlineConstructor(prototype)) {
4211     int count = fun->shared()->this_property_assignments_count();
4212     if (count > in_object_properties) {
4213       // Inline constructor can only handle inobject properties.
4214       fun->shared()->ForbidInlineConstructor();
4215     } else {
4216       DescriptorArray* descriptors;
4217       MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4218       if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4219
4220       DescriptorArray::WhitenessWitness witness(descriptors);
4221       for (int i = 0; i < count; i++) {
4222         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4223         ASSERT(name->IsInternalizedString());
4224         // TODO(verwaest): Since we cannot update the boilerplate's map yet,
4225         // initialize to the worst case.
4226         FieldDescriptor field(name, i, NONE, Representation::Tagged());
4227         descriptors->Set(i, &field, witness);
4228       }
4229       descriptors->Sort();
4230
4231       // The descriptors may contain duplicates because the compiler does not
4232       // guarantee the uniqueness of property names (it would have required
4233       // quadratic time). Once the descriptors are sorted we can check for
4234       // duplicates in linear time.
4235       if (HasDuplicates(descriptors)) {
4236         fun->shared()->ForbidInlineConstructor();
4237       } else {
4238         map->InitializeDescriptors(descriptors);
4239         map->set_pre_allocated_property_fields(count);
4240         map->set_unused_property_fields(in_object_properties - count);
4241       }
4242     }
4243   }
4244
4245   if (!fun->shared()->is_generator()) {
4246     fun->shared()->StartInobjectSlackTracking(map);
4247   }
4248
4249   return map;
4250 }
4251
4252
4253 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4254                                      FixedArray* properties,
4255                                      Map* map) {
4256   obj->set_properties(properties);
4257   obj->initialize_elements();
4258   // TODO(1240798): Initialize the object's body using valid initial values
4259   // according to the object's initial map.  For example, if the map's
4260   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4261   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4262   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4263   // verification code has to cope with (temporarily) invalid objects.  See
4264   // for example, JSArray::JSArrayVerify).
4265   Object* filler;
4266   // We cannot always fill with one_pointer_filler_map because objects
4267   // created from API functions expect their internal fields to be initialized
4268   // with undefined_value.
4269   // Pre-allocated fields need to be initialized with undefined_value as well
4270   // so that object accesses before the constructor completes (e.g. in the
4271   // debugger) will not cause a crash.
4272   if (map->constructor()->IsJSFunction() &&
4273       JSFunction::cast(map->constructor())->shared()->
4274           IsInobjectSlackTrackingInProgress()) {
4275     // We might want to shrink the object later.
4276     ASSERT(obj->GetInternalFieldCount() == 0);
4277     filler = Heap::one_pointer_filler_map();
4278   } else {
4279     filler = Heap::undefined_value();
4280   }
4281   obj->InitializeBody(map, Heap::undefined_value(), filler);
4282 }
4283
4284
4285 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4286   // JSFunctions should be allocated using AllocateFunction to be
4287   // properly initialized.
4288   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4289
4290   // Both types of global objects should be allocated using
4291   // AllocateGlobalObject to be properly initialized.
4292   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4293   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4294
4295   // Allocate the backing storage for the properties.
4296   int prop_size =
4297       map->pre_allocated_property_fields() +
4298       map->unused_property_fields() -
4299       map->inobject_properties();
4300   ASSERT(prop_size >= 0);
4301   Object* properties;
4302   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4303     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4304   }
4305
4306   // Allocate the JSObject.
4307   AllocationSpace space =
4308       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4309   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4310   Object* obj;
4311   MaybeObject* maybe_obj = Allocate(map, space);
4312   if (!maybe_obj->To(&obj)) return maybe_obj;
4313
4314   // Initialize the JSObject.
4315   InitializeJSObjectFromMap(JSObject::cast(obj),
4316                             FixedArray::cast(properties),
4317                             map);
4318   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4319          JSObject::cast(obj)->HasExternalArrayElements());
4320   return obj;
4321 }
4322
4323
4324 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4325     Handle<Object> allocation_site_info_payload) {
4326   // JSFunctions should be allocated using AllocateFunction to be
4327   // properly initialized.
4328   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4329
4330   // Both types of global objects should be allocated using
4331   // AllocateGlobalObject to be properly initialized.
4332   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4333   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4334
4335   // Allocate the backing storage for the properties.
4336   int prop_size =
4337       map->pre_allocated_property_fields() +
4338       map->unused_property_fields() -
4339       map->inobject_properties();
4340   ASSERT(prop_size >= 0);
4341   Object* properties;
4342   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4343     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4344   }
4345
4346   // Allocate the JSObject.
4347   AllocationSpace space = NEW_SPACE;
4348   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4349   Object* obj;
4350   MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4351       allocation_site_info_payload);
4352   if (!maybe_obj->To(&obj)) return maybe_obj;
4353
4354   // Initialize the JSObject.
4355   InitializeJSObjectFromMap(JSObject::cast(obj),
4356                             FixedArray::cast(properties),
4357                             map);
4358   ASSERT(JSObject::cast(obj)->HasFastElements());
4359   return obj;
4360 }
4361
4362
4363 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4364                                     PretenureFlag pretenure) {
4365   // Allocate the initial map if absent.
4366   if (!constructor->has_initial_map()) {
4367     Object* initial_map;
4368     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4369       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4370     }
4371     constructor->set_initial_map(Map::cast(initial_map));
4372     Map::cast(initial_map)->set_constructor(constructor);
4373   }
4374   // Allocate the object based on the constructors initial map.
4375   MaybeObject* result = AllocateJSObjectFromMap(
4376       constructor->initial_map(), pretenure);
4377 #ifdef DEBUG
4378   // Make sure result is NOT a global object if valid.
4379   Object* non_failure;
4380   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4381 #endif
4382   return result;
4383 }
4384
4385
4386 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4387     Handle<Object> allocation_site_info_payload) {
4388   // Allocate the initial map if absent.
4389   if (!constructor->has_initial_map()) {
4390     Object* initial_map;
4391     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4392       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4393     }
4394     constructor->set_initial_map(Map::cast(initial_map));
4395     Map::cast(initial_map)->set_constructor(constructor);
4396   }
4397   // Allocate the object based on the constructors initial map, or the payload
4398   // advice
4399   Map* initial_map = constructor->initial_map();
4400
4401   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4402       *allocation_site_info_payload);
4403   Smi* smi = Smi::cast(cell->value());
4404   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4405   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4406   if (to_kind != initial_map->elements_kind()) {
4407     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4408     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4409     // Possibly alter the mode, since we found an updated elements kind
4410     // in the type info cell.
4411     mode = AllocationSiteInfo::GetMode(to_kind);
4412   }
4413
4414   MaybeObject* result;
4415   if (mode == TRACK_ALLOCATION_SITE) {
4416     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4417         allocation_site_info_payload);
4418   } else {
4419     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4420   }
4421 #ifdef DEBUG
4422   // Make sure result is NOT a global object if valid.
4423   Object* non_failure;
4424   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4425 #endif
4426   return result;
4427 }
4428
4429
4430 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4431   ASSERT(function->shared()->is_generator());
4432   Map *map;
4433   if (function->has_initial_map()) {
4434     map = function->initial_map();
4435   } else {
4436     // Allocate the initial map if absent.
4437     MaybeObject* maybe_map = AllocateInitialMap(function);
4438     if (!maybe_map->To(&map)) return maybe_map;
4439     function->set_initial_map(map);
4440     map->set_constructor(function);
4441   }
4442   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4443   return AllocateJSObjectFromMap(map);
4444 }
4445
4446
4447 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4448   // Allocate a fresh map. Modules do not have a prototype.
4449   Map* map;
4450   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4451   if (!maybe_map->To(&map)) return maybe_map;
4452   // Allocate the object based on the map.
4453   JSModule* module;
4454   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4455   if (!maybe_module->To(&module)) return maybe_module;
4456   module->set_context(context);
4457   module->set_scope_info(scope_info);
4458   return module;
4459 }
4460
4461
4462 MaybeObject* Heap::AllocateJSArrayAndStorage(
4463     ElementsKind elements_kind,
4464     int length,
4465     int capacity,
4466     ArrayStorageAllocationMode mode,
4467     PretenureFlag pretenure) {
4468   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4469   JSArray* array;
4470   if (!maybe_array->To(&array)) return maybe_array;
4471
4472   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4473   // for performance reasons.
4474   ASSERT(capacity >= length);
4475
4476   if (capacity == 0) {
4477     array->set_length(Smi::FromInt(0));
4478     array->set_elements(empty_fixed_array());
4479     return array;
4480   }
4481
4482   FixedArrayBase* elms;
4483   MaybeObject* maybe_elms = NULL;
4484   if (IsFastDoubleElementsKind(elements_kind)) {
4485     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4486       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4487     } else {
4488       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4489       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4490     }
4491   } else {
4492     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4493     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4494       maybe_elms = AllocateUninitializedFixedArray(capacity);
4495     } else {
4496       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4497       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4498     }
4499   }
4500   if (!maybe_elms->To(&elms)) return maybe_elms;
4501
4502   array->set_elements(elms);
4503   array->set_length(Smi::FromInt(length));
4504   return array;
4505 }
4506
4507
4508 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4509     ElementsKind elements_kind,
4510     int length,
4511     int capacity,
4512     Handle<Object> allocation_site_payload,
4513     ArrayStorageAllocationMode mode) {
4514   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4515       allocation_site_payload);
4516   JSArray* array;
4517   if (!maybe_array->To(&array)) return maybe_array;
4518   return AllocateJSArrayStorage(array, length, capacity, mode);
4519 }
4520
4521
4522 MaybeObject* Heap::AllocateJSArrayStorage(
4523     JSArray* array,
4524     int length,
4525     int capacity,
4526     ArrayStorageAllocationMode mode) {
4527   ASSERT(capacity >= length);
4528
4529   if (capacity == 0) {
4530     array->set_length(Smi::FromInt(0));
4531     array->set_elements(empty_fixed_array());
4532     return array;
4533   }
4534
4535   FixedArrayBase* elms;
4536   MaybeObject* maybe_elms = NULL;
4537   ElementsKind elements_kind = array->GetElementsKind();
4538   if (IsFastDoubleElementsKind(elements_kind)) {
4539     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4540       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4541     } else {
4542       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4543       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4544     }
4545   } else {
4546     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4547     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4548       maybe_elms = AllocateUninitializedFixedArray(capacity);
4549     } else {
4550       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4551       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4552     }
4553   }
4554   if (!maybe_elms->To(&elms)) return maybe_elms;
4555
4556   array->set_elements(elms);
4557   array->set_length(Smi::FromInt(length));
4558   return array;
4559 }
4560
4561
4562 MaybeObject* Heap::AllocateJSArrayWithElements(
4563     FixedArrayBase* elements,
4564     ElementsKind elements_kind,
4565     int length,
4566     PretenureFlag pretenure) {
4567   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4568   JSArray* array;
4569   if (!maybe_array->To(&array)) return maybe_array;
4570
4571   array->set_elements(elements);
4572   array->set_length(Smi::FromInt(length));
4573   array->ValidateElements();
4574   return array;
4575 }
4576
4577
4578 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4579   // Allocate map.
4580   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4581   // maps. Will probably depend on the identity of the handler object, too.
4582   Map* map;
4583   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4584   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4585   map->set_prototype(prototype);
4586
4587   // Allocate the proxy object.
4588   JSProxy* result;
4589   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4590   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4591   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4592   result->set_handler(handler);
4593   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4594   return result;
4595 }
4596
4597
4598 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4599                                            Object* call_trap,
4600                                            Object* construct_trap,
4601                                            Object* prototype) {
4602   // Allocate map.
4603   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4604   // maps. Will probably depend on the identity of the handler object, too.
4605   Map* map;
4606   MaybeObject* maybe_map_obj =
4607       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4608   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4609   map->set_prototype(prototype);
4610
4611   // Allocate the proxy object.
4612   JSFunctionProxy* result;
4613   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4614   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4615   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4616   result->set_handler(handler);
4617   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4618   result->set_call_trap(call_trap);
4619   result->set_construct_trap(construct_trap);
4620   return result;
4621 }
4622
4623
4624 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4625   ASSERT(constructor->has_initial_map());
4626   Map* map = constructor->initial_map();
4627   ASSERT(map->is_dictionary_map());
4628
4629   // Make sure no field properties are described in the initial map.
4630   // This guarantees us that normalizing the properties does not
4631   // require us to change property values to JSGlobalPropertyCells.
4632   ASSERT(map->NextFreePropertyIndex() == 0);
4633
4634   // Make sure we don't have a ton of pre-allocated slots in the
4635   // global objects. They will be unused once we normalize the object.
4636   ASSERT(map->unused_property_fields() == 0);
4637   ASSERT(map->inobject_properties() == 0);
4638
4639   // Initial size of the backing store to avoid resize of the storage during
4640   // bootstrapping. The size differs between the JS global object ad the
4641   // builtins object.
4642   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4643
4644   // Allocate a dictionary object for backing storage.
4645   NameDictionary* dictionary;
4646   MaybeObject* maybe_dictionary =
4647       NameDictionary::Allocate(
4648           this,
4649           map->NumberOfOwnDescriptors() * 2 + initial_size);
4650   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4651
4652   // The global object might be created from an object template with accessors.
4653   // Fill these accessors into the dictionary.
4654   DescriptorArray* descs = map->instance_descriptors();
4655   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4656     PropertyDetails details = descs->GetDetails(i);
4657     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4658     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4659     Object* value = descs->GetCallbacksObject(i);
4660     MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4661     if (!maybe_value->ToObject(&value)) return maybe_value;
4662
4663     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4664     if (!maybe_added->To(&dictionary)) return maybe_added;
4665   }
4666
4667   // Allocate the global object and initialize it with the backing store.
4668   JSObject* global;
4669   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4670   if (!maybe_global->To(&global)) return maybe_global;
4671
4672   InitializeJSObjectFromMap(global, dictionary, map);
4673
4674   // Create a new map for the global object.
4675   Map* new_map;
4676   MaybeObject* maybe_map = map->CopyDropDescriptors();
4677   if (!maybe_map->To(&new_map)) return maybe_map;
4678   new_map->set_dictionary_map(true);
4679
4680   // Set up the global object as a normalized object.
4681   global->set_map(new_map);
4682   global->set_properties(dictionary);
4683
4684   // Make sure result is a global object with properties in dictionary.
4685   ASSERT(global->IsGlobalObject());
4686   ASSERT(!global->HasFastProperties());
4687   return global;
4688 }
4689
4690
4691 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4692   // Never used to copy functions.  If functions need to be copied we
4693   // have to be careful to clear the literals array.
4694   SLOW_ASSERT(!source->IsJSFunction());
4695
4696   // Make the clone.
4697   Map* map = source->map();
4698   int object_size = map->instance_size();
4699   Object* clone;
4700
4701   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4702
4703   // If we're forced to always allocate, we use the general allocation
4704   // functions which may leave us with an object in old space.
4705   if (always_allocate()) {
4706     { MaybeObject* maybe_clone =
4707           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4708       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4709     }
4710     Address clone_address = HeapObject::cast(clone)->address();
4711     CopyBlock(clone_address,
4712               source->address(),
4713               object_size);
4714     // Update write barrier for all fields that lie beyond the header.
4715     RecordWrites(clone_address,
4716                  JSObject::kHeaderSize,
4717                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4718   } else {
4719     wb_mode = SKIP_WRITE_BARRIER;
4720
4721     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4722       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4723     }
4724     SLOW_ASSERT(InNewSpace(clone));
4725     // Since we know the clone is allocated in new space, we can copy
4726     // the contents without worrying about updating the write barrier.
4727     CopyBlock(HeapObject::cast(clone)->address(),
4728               source->address(),
4729               object_size);
4730   }
4731
4732   SLOW_ASSERT(
4733       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4734   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4735   FixedArray* properties = FixedArray::cast(source->properties());
4736   // Update elements if necessary.
4737   if (elements->length() > 0) {
4738     Object* elem;
4739     { MaybeObject* maybe_elem;
4740       if (elements->map() == fixed_cow_array_map()) {
4741         maybe_elem = FixedArray::cast(elements);
4742       } else if (source->HasFastDoubleElements()) {
4743         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4744       } else {
4745         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4746       }
4747       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4748     }
4749     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4750   }
4751   // Update properties if necessary.
4752   if (properties->length() > 0) {
4753     Object* prop;
4754     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4755       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4756     }
4757     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4758   }
4759   // Return the new clone.
4760   return clone;
4761 }
4762
4763
4764 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4765   // Never used to copy functions.  If functions need to be copied we
4766   // have to be careful to clear the literals array.
4767   SLOW_ASSERT(!source->IsJSFunction());
4768
4769   // Make the clone.
4770   Map* map = source->map();
4771   int object_size = map->instance_size();
4772   Object* clone;
4773
4774   ASSERT(map->CanTrackAllocationSite());
4775   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4776   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4777
4778   // If we're forced to always allocate, we use the general allocation
4779   // functions which may leave us with an object in old space.
4780   int adjusted_object_size = object_size;
4781   if (always_allocate()) {
4782     // We'll only track origin if we are certain to allocate in new space
4783     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4784     if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4785       adjusted_object_size += AllocationSiteInfo::kSize;
4786     }
4787
4788     { MaybeObject* maybe_clone =
4789           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4790       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4791     }
4792     Address clone_address = HeapObject::cast(clone)->address();
4793     CopyBlock(clone_address,
4794               source->address(),
4795               object_size);
4796     // Update write barrier for all fields that lie beyond the header.
4797     int write_barrier_offset = adjusted_object_size > object_size
4798         ? JSArray::kSize + AllocationSiteInfo::kSize
4799         : JSObject::kHeaderSize;
4800     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4801       RecordWrites(clone_address,
4802                    write_barrier_offset,
4803                    (object_size - write_barrier_offset) / kPointerSize);
4804     }
4805
4806     // Track allocation site information, if we failed to allocate it inline.
4807     if (InNewSpace(clone) &&
4808         adjusted_object_size == object_size) {
4809       MaybeObject* maybe_alloc_info =
4810           AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4811       AllocationSiteInfo* alloc_info;
4812       if (maybe_alloc_info->To(&alloc_info)) {
4813         alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4814         alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4815       }
4816     }
4817   } else {
4818     wb_mode = SKIP_WRITE_BARRIER;
4819     adjusted_object_size += AllocationSiteInfo::kSize;
4820
4821     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4822       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4823     }
4824     SLOW_ASSERT(InNewSpace(clone));
4825     // Since we know the clone is allocated in new space, we can copy
4826     // the contents without worrying about updating the write barrier.
4827     CopyBlock(HeapObject::cast(clone)->address(),
4828               source->address(),
4829               object_size);
4830   }
4831
4832   if (adjusted_object_size > object_size) {
4833     AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4834         reinterpret_cast<Address>(clone) + object_size);
4835     alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4836     alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4837   }
4838
4839   SLOW_ASSERT(
4840       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4841   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4842   FixedArray* properties = FixedArray::cast(source->properties());
4843   // Update elements if necessary.
4844   if (elements->length() > 0) {
4845     Object* elem;
4846     { MaybeObject* maybe_elem;
4847       if (elements->map() == fixed_cow_array_map()) {
4848         maybe_elem = FixedArray::cast(elements);
4849       } else if (source->HasFastDoubleElements()) {
4850         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4851       } else {
4852         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4853       }
4854       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4855     }
4856     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4857   }
4858   // Update properties if necessary.
4859   if (properties->length() > 0) {
4860     Object* prop;
4861     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4862       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4863     }
4864     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4865   }
4866   // Return the new clone.
4867   return clone;
4868 }
4869
4870
4871 MaybeObject* Heap::ReinitializeJSReceiver(
4872     JSReceiver* object, InstanceType type, int size) {
4873   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4874
4875   // Allocate fresh map.
4876   // TODO(rossberg): Once we optimize proxies, cache these maps.
4877   Map* map;
4878   MaybeObject* maybe = AllocateMap(type, size);
4879   if (!maybe->To<Map>(&map)) return maybe;
4880
4881   // Check that the receiver has at least the size of the fresh object.
4882   int size_difference = object->map()->instance_size() - map->instance_size();
4883   ASSERT(size_difference >= 0);
4884
4885   map->set_prototype(object->map()->prototype());
4886
4887   // Allocate the backing storage for the properties.
4888   int prop_size = map->unused_property_fields() - map->inobject_properties();
4889   Object* properties;
4890   maybe = AllocateFixedArray(prop_size, TENURED);
4891   if (!maybe->ToObject(&properties)) return maybe;
4892
4893   // Functions require some allocation, which might fail here.
4894   SharedFunctionInfo* shared = NULL;
4895   if (type == JS_FUNCTION_TYPE) {
4896     String* name;
4897     maybe =
4898         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4899     if (!maybe->To<String>(&name)) return maybe;
4900     maybe = AllocateSharedFunctionInfo(name);
4901     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4902   }
4903
4904   // Because of possible retries of this function after failure,
4905   // we must NOT fail after this point, where we have changed the type!
4906
4907   // Reset the map for the object.
4908   object->set_map(map);
4909   JSObject* jsobj = JSObject::cast(object);
4910
4911   // Reinitialize the object from the constructor map.
4912   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4913
4914   // Functions require some minimal initialization.
4915   if (type == JS_FUNCTION_TYPE) {
4916     map->set_function_with_prototype(true);
4917     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4918     JSFunction::cast(object)->set_context(
4919         isolate()->context()->native_context());
4920   }
4921
4922   // Put in filler if the new object is smaller than the old.
4923   if (size_difference > 0) {
4924     CreateFillerObjectAt(
4925         object->address() + map->instance_size(), size_difference);
4926   }
4927
4928   return object;
4929 }
4930
4931
4932 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4933                                              JSGlobalProxy* object) {
4934   ASSERT(constructor->has_initial_map());
4935   Map* map = constructor->initial_map();
4936
4937   // Check that the already allocated object has the same size and type as
4938   // objects allocated using the constructor.
4939   ASSERT(map->instance_size() == object->map()->instance_size());
4940   ASSERT(map->instance_type() == object->map()->instance_type());
4941
4942   // Allocate the backing storage for the properties.
4943   int prop_size = map->unused_property_fields() - map->inobject_properties();
4944   Object* properties;
4945   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4946     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4947   }
4948
4949   // Reset the map for the object.
4950   object->set_map(constructor->initial_map());
4951
4952   // Reinitialize the object from the constructor map.
4953   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4954   return object;
4955 }
4956
4957
4958 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4959                                            PretenureFlag pretenure) {
4960   int length = string.length();
4961   if (length == 1) {
4962     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4963   }
4964   Object* result;
4965   { MaybeObject* maybe_result =
4966         AllocateRawOneByteString(string.length(), pretenure);
4967     if (!maybe_result->ToObject(&result)) return maybe_result;
4968   }
4969
4970   // Copy the characters into the new object.
4971   CopyChars(SeqOneByteString::cast(result)->GetChars(),
4972             string.start(),
4973             length);
4974   return result;
4975 }
4976
4977
4978 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4979                                               int non_ascii_start,
4980                                               PretenureFlag pretenure) {
4981   // Continue counting the number of characters in the UTF-8 string, starting
4982   // from the first non-ascii character or word.
4983   Access<UnicodeCache::Utf8Decoder>
4984       decoder(isolate_->unicode_cache()->utf8_decoder());
4985   decoder->Reset(string.start() + non_ascii_start,
4986                  string.length() - non_ascii_start);
4987   int utf16_length = decoder->Utf16Length();
4988   ASSERT(utf16_length > 0);
4989   // Allocate string.
4990   Object* result;
4991   {
4992     int chars = non_ascii_start + utf16_length;
4993     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4994     if (!maybe_result->ToObject(&result)) return maybe_result;
4995   }
4996   // Convert and copy the characters into the new object.
4997   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4998   // Copy ascii portion.
4999   uint16_t* data = twobyte->GetChars();
5000   if (non_ascii_start != 0) {
5001     const char* ascii_data = string.start();
5002     for (int i = 0; i < non_ascii_start; i++) {
5003       *data++ = *ascii_data++;
5004     }
5005   }
5006   // Now write the remainder.
5007   decoder->WriteUtf16(data, utf16_length);
5008   return result;
5009 }
5010
5011
5012 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5013                                              PretenureFlag pretenure) {
5014   // Check if the string is an ASCII string.
5015   Object* result;
5016   int length = string.length();
5017   const uc16* start = string.start();
5018
5019   if (String::IsOneByte(start, length)) {
5020     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5021     if (!maybe_result->ToObject(&result)) return maybe_result;
5022     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5023   } else {  // It's not a one byte string.
5024     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5025     if (!maybe_result->ToObject(&result)) return maybe_result;
5026     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5027   }
5028   return result;
5029 }
5030
5031
5032 Map* Heap::InternalizedStringMapForString(String* string) {
5033   // If the string is in new space it cannot be used as internalized.
5034   if (InNewSpace(string)) return NULL;
5035
5036   // Find the corresponding internalized string map for strings.
5037   switch (string->map()->instance_type()) {
5038     case STRING_TYPE: return internalized_string_map();
5039     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5040     case CONS_STRING_TYPE: return cons_internalized_string_map();
5041     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5042     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5043     case EXTERNAL_ASCII_STRING_TYPE:
5044       return external_ascii_internalized_string_map();
5045     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5046       return external_internalized_string_with_one_byte_data_map();
5047     case SHORT_EXTERNAL_STRING_TYPE:
5048       return short_external_internalized_string_map();
5049     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5050       return short_external_ascii_internalized_string_map();
5051     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5052       return short_external_internalized_string_with_one_byte_data_map();
5053     default: return NULL;  // No match found.
5054   }
5055 }
5056
5057
5058 static inline void WriteOneByteData(Vector<const char> vector,
5059                                     uint8_t* chars,
5060                                     int len) {
5061   // Only works for ascii.
5062   ASSERT(vector.length() == len);
5063   OS::MemCopy(chars, vector.start(), len);
5064 }
5065
5066 static inline void WriteTwoByteData(Vector<const char> vector,
5067                                     uint16_t* chars,
5068                                     int len) {
5069   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5070   unsigned stream_length = vector.length();
5071   while (stream_length != 0) {
5072     unsigned consumed = 0;
5073     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5074     ASSERT(c != unibrow::Utf8::kBadChar);
5075     ASSERT(consumed <= stream_length);
5076     stream_length -= consumed;
5077     stream += consumed;
5078     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5079       len -= 2;
5080       if (len < 0) break;
5081       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5082       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5083     } else {
5084       len -= 1;
5085       if (len < 0) break;
5086       *chars++ = c;
5087     }
5088   }
5089   ASSERT(stream_length == 0);
5090   ASSERT(len == 0);
5091 }
5092
5093
5094 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5095   ASSERT(s->length() == len);
5096   String::WriteToFlat(s, chars, 0, len);
5097 }
5098
5099 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5100   ASSERT(s->length() == len);
5101   String::WriteToFlat(s, chars, 0, len);
5102 }
5103
5104
5105 template<bool is_one_byte, typename T>
5106 MaybeObject* Heap::AllocateInternalizedStringImpl(
5107     T t, int chars, uint32_t hash_field) {
5108   ASSERT(chars >= 0);
5109   // Compute map and object size.
5110   int size;
5111   Map* map;
5112
5113   if (is_one_byte) {
5114     if (chars > SeqOneByteString::kMaxLength) {
5115       return Failure::OutOfMemoryException(0x9);
5116     }
5117     map = ascii_internalized_string_map();
5118     size = SeqOneByteString::SizeFor(chars);
5119   } else {
5120     if (chars > SeqTwoByteString::kMaxLength) {
5121       return Failure::OutOfMemoryException(0xa);
5122     }
5123     map = internalized_string_map();
5124     size = SeqTwoByteString::SizeFor(chars);
5125   }
5126
5127   // Allocate string.
5128   Object* result;
5129   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5130                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5131                    : old_data_space_->AllocateRaw(size);
5132     if (!maybe_result->ToObject(&result)) return maybe_result;
5133   }
5134
5135   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5136   // Set length and hash fields of the allocated string.
5137   String* answer = String::cast(result);
5138   answer->set_length(chars);
5139   answer->set_hash_field(hash_field);
5140
5141   ASSERT_EQ(size, answer->Size());
5142
5143   if (is_one_byte) {
5144     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5145   } else {
5146     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5147   }
5148   return answer;
5149 }
5150
5151
5152 // Need explicit instantiations.
5153 template
5154 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5155 template
5156 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5157     String*, int, uint32_t);
5158 template
5159 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5160     Vector<const char>, int, uint32_t);
5161
5162
5163 MaybeObject* Heap::AllocateRawOneByteString(int length,
5164                                             PretenureFlag pretenure) {
5165   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5166     return Failure::OutOfMemoryException(0xb);
5167   }
5168
5169   int size = SeqOneByteString::SizeFor(length);
5170   ASSERT(size <= SeqOneByteString::kMaxSize);
5171
5172   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5173   AllocationSpace retry_space = OLD_DATA_SPACE;
5174
5175   if (space == NEW_SPACE) {
5176     if (size > kMaxObjectSizeInNewSpace) {
5177       // Allocate in large object space, retry space will be ignored.
5178       space = LO_SPACE;
5179     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5180       // Allocate in new space, retry in large object space.
5181       retry_space = LO_SPACE;
5182     }
5183   } else if (space == OLD_DATA_SPACE &&
5184              size > Page::kMaxNonCodeHeapObjectSize) {
5185     space = LO_SPACE;
5186   }
5187   Object* result;
5188   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5189     if (!maybe_result->ToObject(&result)) return maybe_result;
5190   }
5191
5192   // Partially initialize the object.
5193   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5194   String::cast(result)->set_length(length);
5195   String::cast(result)->set_hash_field(String::kEmptyHashField);
5196   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5197
5198   return result;
5199 }
5200
5201
5202 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5203                                             PretenureFlag pretenure) {
5204   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5205     return Failure::OutOfMemoryException(0xc);
5206   }
5207   int size = SeqTwoByteString::SizeFor(length);
5208   ASSERT(size <= SeqTwoByteString::kMaxSize);
5209   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5210   AllocationSpace retry_space = OLD_DATA_SPACE;
5211
5212   if (space == NEW_SPACE) {
5213     if (size > kMaxObjectSizeInNewSpace) {
5214       // Allocate in large object space, retry space will be ignored.
5215       space = LO_SPACE;
5216     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5217       // Allocate in new space, retry in large object space.
5218       retry_space = LO_SPACE;
5219     }
5220   } else if (space == OLD_DATA_SPACE &&
5221              size > Page::kMaxNonCodeHeapObjectSize) {
5222     space = LO_SPACE;
5223   }
5224   Object* result;
5225   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5226     if (!maybe_result->ToObject(&result)) return maybe_result;
5227   }
5228
5229   // Partially initialize the object.
5230   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5231   String::cast(result)->set_length(length);
5232   String::cast(result)->set_hash_field(String::kEmptyHashField);
5233   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5234   return result;
5235 }
5236
5237
5238 MaybeObject* Heap::AllocateJSArray(
5239     ElementsKind elements_kind,
5240     PretenureFlag pretenure) {
5241   Context* native_context = isolate()->context()->native_context();
5242   JSFunction* array_function = native_context->array_function();
5243   Map* map = array_function->initial_map();
5244   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5245   if (transition_map != NULL) map = transition_map;
5246   return AllocateJSObjectFromMap(map, pretenure);
5247 }
5248
5249
5250 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5251     ElementsKind elements_kind,
5252     Handle<Object> allocation_site_info_payload) {
5253   Context* native_context = isolate()->context()->native_context();
5254   JSFunction* array_function = native_context->array_function();
5255   Map* map = array_function->initial_map();
5256   Object* maybe_map_array = native_context->js_array_maps();
5257   if (!maybe_map_array->IsUndefined()) {
5258     Object* maybe_transitioned_map =
5259         FixedArray::cast(maybe_map_array)->get(elements_kind);
5260     if (!maybe_transitioned_map->IsUndefined()) {
5261       map = Map::cast(maybe_transitioned_map);
5262     }
5263   }
5264   return AllocateJSObjectFromMapWithAllocationSite(map,
5265       allocation_site_info_payload);
5266 }
5267
5268
5269 MaybeObject* Heap::AllocateEmptyFixedArray() {
5270   int size = FixedArray::SizeFor(0);
5271   Object* result;
5272   { MaybeObject* maybe_result =
5273         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5274     if (!maybe_result->ToObject(&result)) return maybe_result;
5275   }
5276   // Initialize the object.
5277   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5278       fixed_array_map());
5279   reinterpret_cast<FixedArray*>(result)->set_length(0);
5280   return result;
5281 }
5282
5283 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5284   return AllocateExternalArray(0, array_type, NULL, TENURED);
5285 }
5286
5287
5288 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5289   if (length < 0 || length > FixedArray::kMaxLength) {
5290     return Failure::OutOfMemoryException(0xd);
5291   }
5292   ASSERT(length > 0);
5293   // Use the general function if we're forced to always allocate.
5294   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5295   // Allocate the raw data for a fixed array.
5296   int size = FixedArray::SizeFor(length);
5297   return size <= kMaxObjectSizeInNewSpace
5298       ? new_space_.AllocateRaw(size)
5299       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5300 }
5301
5302
5303 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5304   int len = src->length();
5305   Object* obj;
5306   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5307     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5308   }
5309   if (InNewSpace(obj)) {
5310     HeapObject* dst = HeapObject::cast(obj);
5311     dst->set_map_no_write_barrier(map);
5312     CopyBlock(dst->address() + kPointerSize,
5313               src->address() + kPointerSize,
5314               FixedArray::SizeFor(len) - kPointerSize);
5315     return obj;
5316   }
5317   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5318   FixedArray* result = FixedArray::cast(obj);
5319   result->set_length(len);
5320
5321   // Copy the content
5322   AssertNoAllocation no_gc;
5323   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5324   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5325   return result;
5326 }
5327
5328
5329 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5330                                                Map* map) {
5331   int len = src->length();
5332   Object* obj;
5333   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5334     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5335   }
5336   HeapObject* dst = HeapObject::cast(obj);
5337   dst->set_map_no_write_barrier(map);
5338   CopyBlock(
5339       dst->address() + FixedDoubleArray::kLengthOffset,
5340       src->address() + FixedDoubleArray::kLengthOffset,
5341       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5342   return obj;
5343 }
5344
5345
5346 MaybeObject* Heap::AllocateFixedArray(int length) {
5347   ASSERT(length >= 0);
5348   if (length == 0) return empty_fixed_array();
5349   Object* result;
5350   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5351     if (!maybe_result->ToObject(&result)) return maybe_result;
5352   }
5353   // Initialize header.
5354   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5355   array->set_map_no_write_barrier(fixed_array_map());
5356   array->set_length(length);
5357   // Initialize body.
5358   ASSERT(!InNewSpace(undefined_value()));
5359   MemsetPointer(array->data_start(), undefined_value(), length);
5360   return result;
5361 }
5362
5363
5364 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5365   if (length < 0 || length > FixedArray::kMaxLength) {
5366     return Failure::OutOfMemoryException(0xe);
5367   }
5368
5369   AllocationSpace space =
5370       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5371   int size = FixedArray::SizeFor(length);
5372   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5373     // Too big for new space.
5374     space = LO_SPACE;
5375   } else if (space == OLD_POINTER_SPACE &&
5376              size > Page::kMaxNonCodeHeapObjectSize) {
5377     // Too big for old pointer space.
5378     space = LO_SPACE;
5379   }
5380
5381   AllocationSpace retry_space =
5382       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5383
5384   return AllocateRaw(size, space, retry_space);
5385 }
5386
5387
5388 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5389     Heap* heap,
5390     int length,
5391     PretenureFlag pretenure,
5392     Object* filler) {
5393   ASSERT(length >= 0);
5394   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5395   if (length == 0) return heap->empty_fixed_array();
5396
5397   ASSERT(!heap->InNewSpace(filler));
5398   Object* result;
5399   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5400     if (!maybe_result->ToObject(&result)) return maybe_result;
5401   }
5402
5403   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5404   FixedArray* array = FixedArray::cast(result);
5405   array->set_length(length);
5406   MemsetPointer(array->data_start(), filler, length);
5407   return array;
5408 }
5409
5410
5411 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5412   return AllocateFixedArrayWithFiller(this,
5413                                       length,
5414                                       pretenure,
5415                                       undefined_value());
5416 }
5417
5418
5419 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5420                                                PretenureFlag pretenure) {
5421   return AllocateFixedArrayWithFiller(this,
5422                                       length,
5423                                       pretenure,
5424                                       the_hole_value());
5425 }
5426
5427
5428 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5429   if (length == 0) return empty_fixed_array();
5430
5431   Object* obj;
5432   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5433     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5434   }
5435
5436   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5437       fixed_array_map());
5438   FixedArray::cast(obj)->set_length(length);
5439   return obj;
5440 }
5441
5442
5443 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5444   int size = FixedDoubleArray::SizeFor(0);
5445   Object* result;
5446   { MaybeObject* maybe_result =
5447         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5448     if (!maybe_result->ToObject(&result)) return maybe_result;
5449   }
5450   // Initialize the object.
5451   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5452       fixed_double_array_map());
5453   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5454   return result;
5455 }
5456
5457
5458 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5459     int length,
5460     PretenureFlag pretenure) {
5461   if (length == 0) return empty_fixed_array();
5462
5463   Object* elements_object;
5464   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5465   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5466   FixedDoubleArray* elements =
5467       reinterpret_cast<FixedDoubleArray*>(elements_object);
5468
5469   elements->set_map_no_write_barrier(fixed_double_array_map());
5470   elements->set_length(length);
5471   return elements;
5472 }
5473
5474
5475 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5476     int length,
5477     PretenureFlag pretenure) {
5478   if (length == 0) return empty_fixed_array();
5479
5480   Object* elements_object;
5481   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5482   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5483   FixedDoubleArray* elements =
5484       reinterpret_cast<FixedDoubleArray*>(elements_object);
5485
5486   for (int i = 0; i < length; ++i) {
5487     elements->set_the_hole(i);
5488   }
5489
5490   elements->set_map_no_write_barrier(fixed_double_array_map());
5491   elements->set_length(length);
5492   return elements;
5493 }
5494
5495
5496 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5497                                                PretenureFlag pretenure) {
5498   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5499     return Failure::OutOfMemoryException(0xf);
5500   }
5501
5502   AllocationSpace space =
5503       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5504   int size = FixedDoubleArray::SizeFor(length);
5505
5506 #ifndef V8_HOST_ARCH_64_BIT
5507   size += kPointerSize;
5508 #endif
5509
5510   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5511     // Too big for new space.
5512     space = LO_SPACE;
5513   } else if (space == OLD_DATA_SPACE &&
5514              size > Page::kMaxNonCodeHeapObjectSize) {
5515     // Too big for old data space.
5516     space = LO_SPACE;
5517   }
5518
5519   AllocationSpace retry_space =
5520       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5521
5522   HeapObject* object;
5523   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5524     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5525   }
5526
5527   return EnsureDoubleAligned(this, object, size);
5528 }
5529
5530
5531 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5532   Object* result;
5533   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5534     if (!maybe_result->ToObject(&result)) return maybe_result;
5535   }
5536   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5537       hash_table_map());
5538   ASSERT(result->IsHashTable());
5539   return result;
5540 }
5541
5542
5543 MaybeObject* Heap::AllocateSymbol() {
5544   // Statically ensure that it is safe to allocate symbols in paged spaces.
5545   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5546
5547   Object* result;
5548   MaybeObject* maybe =
5549       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5550   if (!maybe->ToObject(&result)) return maybe;
5551
5552   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5553
5554   // Generate a random hash value.
5555   int hash;
5556   int attempts = 0;
5557   do {
5558     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5559     attempts++;
5560   } while (hash == 0 && attempts < 30);
5561   if (hash == 0) hash = 1;  // never return 0
5562
5563   Symbol::cast(result)->set_hash_field(
5564       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5565   Symbol::cast(result)->set_name(undefined_value());
5566
5567   ASSERT(result->IsSymbol());
5568   return result;
5569 }
5570
5571
5572 MaybeObject* Heap::AllocateNativeContext() {
5573   Object* result;
5574   { MaybeObject* maybe_result =
5575         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5576     if (!maybe_result->ToObject(&result)) return maybe_result;
5577   }
5578   Context* context = reinterpret_cast<Context*>(result);
5579   context->set_map_no_write_barrier(native_context_map());
5580   context->set_js_array_maps(undefined_value());
5581   ASSERT(context->IsNativeContext());
5582   ASSERT(result->IsContext());
5583   return result;
5584 }
5585
5586
5587 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5588                                          ScopeInfo* scope_info) {
5589   Object* result;
5590   { MaybeObject* maybe_result =
5591         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5592     if (!maybe_result->ToObject(&result)) return maybe_result;
5593   }
5594   Context* context = reinterpret_cast<Context*>(result);
5595   context->set_map_no_write_barrier(global_context_map());
5596   context->set_closure(function);
5597   context->set_previous(function->context());
5598   context->set_extension(scope_info);
5599   context->set_global_object(function->context()->global_object());
5600   ASSERT(context->IsGlobalContext());
5601   ASSERT(result->IsContext());
5602   return context;
5603 }
5604
5605
5606 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5607   Object* result;
5608   { MaybeObject* maybe_result =
5609         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5610     if (!maybe_result->ToObject(&result)) return maybe_result;
5611   }
5612   Context* context = reinterpret_cast<Context*>(result);
5613   context->set_map_no_write_barrier(module_context_map());
5614   // Instance link will be set later.
5615   context->set_extension(Smi::FromInt(0));
5616   return context;
5617 }
5618
5619
5620 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5621   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5622   Object* result;
5623   { MaybeObject* maybe_result = AllocateFixedArray(length);
5624     if (!maybe_result->ToObject(&result)) return maybe_result;
5625   }
5626   Context* context = reinterpret_cast<Context*>(result);
5627   context->set_map_no_write_barrier(function_context_map());
5628   context->set_closure(function);
5629   context->set_previous(function->context());
5630   context->set_extension(Smi::FromInt(0));
5631   context->set_global_object(function->context()->global_object());
5632   return context;
5633 }
5634
5635
5636 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5637                                         Context* previous,
5638                                         String* name,
5639                                         Object* thrown_object) {
5640   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5641   Object* result;
5642   { MaybeObject* maybe_result =
5643         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5644     if (!maybe_result->ToObject(&result)) return maybe_result;
5645   }
5646   Context* context = reinterpret_cast<Context*>(result);
5647   context->set_map_no_write_barrier(catch_context_map());
5648   context->set_closure(function);
5649   context->set_previous(previous);
5650   context->set_extension(name);
5651   context->set_global_object(previous->global_object());
5652   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5653   return context;
5654 }
5655
5656
5657 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5658                                        Context* previous,
5659                                        JSObject* extension) {
5660   Object* result;
5661   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5662     if (!maybe_result->ToObject(&result)) return maybe_result;
5663   }
5664   Context* context = reinterpret_cast<Context*>(result);
5665   context->set_map_no_write_barrier(with_context_map());
5666   context->set_closure(function);
5667   context->set_previous(previous);
5668   context->set_extension(extension);
5669   context->set_global_object(previous->global_object());
5670   return context;
5671 }
5672
5673
5674 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5675                                         Context* previous,
5676                                         ScopeInfo* scope_info) {
5677   Object* result;
5678   { MaybeObject* maybe_result =
5679         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5680     if (!maybe_result->ToObject(&result)) return maybe_result;
5681   }
5682   Context* context = reinterpret_cast<Context*>(result);
5683   context->set_map_no_write_barrier(block_context_map());
5684   context->set_closure(function);
5685   context->set_previous(previous);
5686   context->set_extension(scope_info);
5687   context->set_global_object(previous->global_object());
5688   return context;
5689 }
5690
5691
5692 MaybeObject* Heap::AllocateScopeInfo(int length) {
5693   FixedArray* scope_info;
5694   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5695   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5696   scope_info->set_map_no_write_barrier(scope_info_map());
5697   return scope_info;
5698 }
5699
5700
5701 MaybeObject* Heap::AllocateExternal(void* value) {
5702   Foreign* foreign;
5703   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5704     if (!maybe_result->To(&foreign)) return maybe_result;
5705   }
5706   JSObject* external;
5707   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5708     if (!maybe_result->To(&external)) return maybe_result;
5709   }
5710   external->SetInternalField(0, foreign);
5711   return external;
5712 }
5713
5714
5715 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5716   Map* map;
5717   switch (type) {
5718 #define MAKE_CASE(NAME, Name, name) \
5719     case NAME##_TYPE: map = name##_map(); break;
5720 STRUCT_LIST(MAKE_CASE)
5721 #undef MAKE_CASE
5722     default:
5723       UNREACHABLE();
5724       return Failure::InternalError();
5725   }
5726   int size = map->instance_size();
5727   AllocationSpace space =
5728       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5729   Object* result;
5730   { MaybeObject* maybe_result = Allocate(map, space);
5731     if (!maybe_result->ToObject(&result)) return maybe_result;
5732   }
5733   Struct::cast(result)->InitializeBody(size);
5734   return result;
5735 }
5736
5737
5738 bool Heap::IsHeapIterable() {
5739   return (!old_pointer_space()->was_swept_conservatively() &&
5740           !old_data_space()->was_swept_conservatively());
5741 }
5742
5743
5744 void Heap::EnsureHeapIsIterable() {
5745   ASSERT(IsAllocationAllowed());
5746   if (!IsHeapIterable()) {
5747     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5748   }
5749   ASSERT(IsHeapIterable());
5750 }
5751
5752
5753 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5754   incremental_marking()->Step(step_size,
5755                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5756
5757   if (incremental_marking()->IsComplete()) {
5758     bool uncommit = false;
5759     if (gc_count_at_last_idle_gc_ == gc_count_) {
5760       // No GC since the last full GC, the mutator is probably not active.
5761       isolate_->compilation_cache()->Clear();
5762       uncommit = true;
5763     }
5764     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5765     gc_count_at_last_idle_gc_ = gc_count_;
5766     if (uncommit) {
5767       new_space_.Shrink();
5768       UncommitFromSpace();
5769     }
5770   }
5771 }
5772
5773
5774 bool Heap::IdleNotification(int hint) {
5775   // Hints greater than this value indicate that
5776   // the embedder is requesting a lot of GC work.
5777   const int kMaxHint = 1000;
5778   // Minimal hint that allows to do full GC.
5779   const int kMinHintForFullGC = 100;
5780   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5781   // The size factor is in range [5..250]. The numbers here are chosen from
5782   // experiments. If you changes them, make sure to test with
5783   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5784   intptr_t step_size =
5785       size_factor * IncrementalMarking::kAllocatedThreshold;
5786
5787   if (contexts_disposed_ > 0) {
5788     if (hint >= kMaxHint) {
5789       // The embedder is requesting a lot of GC work after context disposal,
5790       // we age inline caches so that they don't keep objects from
5791       // the old context alive.
5792       AgeInlineCaches();
5793     }
5794     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5795     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5796         incremental_marking()->IsStopped()) {
5797       HistogramTimerScope scope(isolate_->counters()->gc_context());
5798       CollectAllGarbage(kReduceMemoryFootprintMask,
5799                         "idle notification: contexts disposed");
5800     } else {
5801       AdvanceIdleIncrementalMarking(step_size);
5802       contexts_disposed_ = 0;
5803     }
5804     // After context disposal there is likely a lot of garbage remaining, reset
5805     // the idle notification counters in order to trigger more incremental GCs
5806     // on subsequent idle notifications.
5807     StartIdleRound();
5808     return false;
5809   }
5810
5811   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5812     return IdleGlobalGC();
5813   }
5814
5815   // By doing small chunks of GC work in each IdleNotification,
5816   // perform a round of incremental GCs and after that wait until
5817   // the mutator creates enough garbage to justify a new round.
5818   // An incremental GC progresses as follows:
5819   // 1. many incremental marking steps,
5820   // 2. one old space mark-sweep-compact,
5821   // 3. many lazy sweep steps.
5822   // Use mark-sweep-compact events to count incremental GCs in a round.
5823
5824   if (incremental_marking()->IsStopped()) {
5825     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5826         !IsSweepingComplete() &&
5827         !AdvanceSweepers(static_cast<int>(step_size))) {
5828       return false;
5829     }
5830   }
5831
5832   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5833     if (EnoughGarbageSinceLastIdleRound()) {
5834       StartIdleRound();
5835     } else {
5836       return true;
5837     }
5838   }
5839
5840   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5841   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5842   ms_count_at_last_idle_notification_ = ms_count_;
5843
5844   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5845                               mark_sweeps_since_idle_round_started_;
5846
5847   if (remaining_mark_sweeps <= 0) {
5848     FinishIdleRound();
5849     return true;
5850   }
5851
5852   if (incremental_marking()->IsStopped()) {
5853     // If there are no more than two GCs left in this idle round and we are
5854     // allowed to do a full GC, then make those GCs full in order to compact
5855     // the code space.
5856     // TODO(ulan): Once we enable code compaction for incremental marking,
5857     // we can get rid of this special case and always start incremental marking.
5858     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5859       CollectAllGarbage(kReduceMemoryFootprintMask,
5860                         "idle notification: finalize idle round");
5861     } else {
5862       incremental_marking()->Start();
5863     }
5864   }
5865   if (!incremental_marking()->IsStopped()) {
5866     AdvanceIdleIncrementalMarking(step_size);
5867   }
5868   return false;
5869 }
5870
5871
5872 bool Heap::IdleGlobalGC() {
5873   static const int kIdlesBeforeScavenge = 4;
5874   static const int kIdlesBeforeMarkSweep = 7;
5875   static const int kIdlesBeforeMarkCompact = 8;
5876   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5877   static const unsigned int kGCsBetweenCleanup = 4;
5878
5879   if (!last_idle_notification_gc_count_init_) {
5880     last_idle_notification_gc_count_ = gc_count_;
5881     last_idle_notification_gc_count_init_ = true;
5882   }
5883
5884   bool uncommit = true;
5885   bool finished = false;
5886
5887   // Reset the number of idle notifications received when a number of
5888   // GCs have taken place. This allows another round of cleanup based
5889   // on idle notifications if enough work has been carried out to
5890   // provoke a number of garbage collections.
5891   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5892     number_idle_notifications_ =
5893         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5894   } else {
5895     number_idle_notifications_ = 0;
5896     last_idle_notification_gc_count_ = gc_count_;
5897   }
5898
5899   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5900     CollectGarbage(NEW_SPACE, "idle notification");
5901     new_space_.Shrink();
5902     last_idle_notification_gc_count_ = gc_count_;
5903   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5904     // Before doing the mark-sweep collections we clear the
5905     // compilation cache to avoid hanging on to source code and
5906     // generated code for cached functions.
5907     isolate_->compilation_cache()->Clear();
5908
5909     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5910     new_space_.Shrink();
5911     last_idle_notification_gc_count_ = gc_count_;
5912
5913   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5914     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5915     new_space_.Shrink();
5916     last_idle_notification_gc_count_ = gc_count_;
5917     number_idle_notifications_ = 0;
5918     finished = true;
5919   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5920     // If we have received more than kIdlesBeforeMarkCompact idle
5921     // notifications we do not perform any cleanup because we don't
5922     // expect to gain much by doing so.
5923     finished = true;
5924   }
5925
5926   if (uncommit) UncommitFromSpace();
5927
5928   return finished;
5929 }
5930
5931
5932 #ifdef DEBUG
5933
5934 void Heap::Print() {
5935   if (!HasBeenSetUp()) return;
5936   isolate()->PrintStack(stdout);
5937   AllSpaces spaces(this);
5938   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5939     space->Print();
5940   }
5941 }
5942
5943
5944 void Heap::ReportCodeStatistics(const char* title) {
5945   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5946   PagedSpace::ResetCodeStatistics();
5947   // We do not look for code in new space, map space, or old space.  If code
5948   // somehow ends up in those spaces, we would miss it here.
5949   code_space_->CollectCodeStatistics();
5950   lo_space_->CollectCodeStatistics();
5951   PagedSpace::ReportCodeStatistics();
5952 }
5953
5954
5955 // This function expects that NewSpace's allocated objects histogram is
5956 // populated (via a call to CollectStatistics or else as a side effect of a
5957 // just-completed scavenge collection).
5958 void Heap::ReportHeapStatistics(const char* title) {
5959   USE(title);
5960   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5961          title, gc_count_);
5962   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5963          old_gen_promotion_limit_);
5964   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5965          old_gen_allocation_limit_);
5966
5967   PrintF("\n");
5968   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5969   isolate_->global_handles()->PrintStats();
5970   PrintF("\n");
5971
5972   PrintF("Heap statistics : ");
5973   isolate_->memory_allocator()->ReportStatistics();
5974   PrintF("To space : ");
5975   new_space_.ReportStatistics();
5976   PrintF("Old pointer space : ");
5977   old_pointer_space_->ReportStatistics();
5978   PrintF("Old data space : ");
5979   old_data_space_->ReportStatistics();
5980   PrintF("Code space : ");
5981   code_space_->ReportStatistics();
5982   PrintF("Map space : ");
5983   map_space_->ReportStatistics();
5984   PrintF("Cell space : ");
5985   cell_space_->ReportStatistics();
5986   PrintF("Large object space : ");
5987   lo_space_->ReportStatistics();
5988   PrintF(">>>>>> ========================================= >>>>>>\n");
5989 }
5990
5991 #endif  // DEBUG
5992
5993 bool Heap::Contains(HeapObject* value) {
5994   return Contains(value->address());
5995 }
5996
5997
5998 bool Heap::Contains(Address addr) {
5999   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6000   return HasBeenSetUp() &&
6001     (new_space_.ToSpaceContains(addr) ||
6002      old_pointer_space_->Contains(addr) ||
6003      old_data_space_->Contains(addr) ||
6004      code_space_->Contains(addr) ||
6005      map_space_->Contains(addr) ||
6006      cell_space_->Contains(addr) ||
6007      lo_space_->SlowContains(addr));
6008 }
6009
6010
6011 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6012   return InSpace(value->address(), space);
6013 }
6014
6015
6016 bool Heap::InSpace(Address addr, AllocationSpace space) {
6017   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6018   if (!HasBeenSetUp()) return false;
6019
6020   switch (space) {
6021     case NEW_SPACE:
6022       return new_space_.ToSpaceContains(addr);
6023     case OLD_POINTER_SPACE:
6024       return old_pointer_space_->Contains(addr);
6025     case OLD_DATA_SPACE:
6026       return old_data_space_->Contains(addr);
6027     case CODE_SPACE:
6028       return code_space_->Contains(addr);
6029     case MAP_SPACE:
6030       return map_space_->Contains(addr);
6031     case CELL_SPACE:
6032       return cell_space_->Contains(addr);
6033     case LO_SPACE:
6034       return lo_space_->SlowContains(addr);
6035   }
6036
6037   return false;
6038 }
6039
6040
6041 #ifdef VERIFY_HEAP
6042 void Heap::Verify() {
6043   CHECK(HasBeenSetUp());
6044
6045   store_buffer()->Verify();
6046
6047   VerifyPointersVisitor visitor;
6048   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6049
6050   new_space_.Verify();
6051
6052   old_pointer_space_->Verify(&visitor);
6053   map_space_->Verify(&visitor);
6054
6055   VerifyPointersVisitor no_dirty_regions_visitor;
6056   old_data_space_->Verify(&no_dirty_regions_visitor);
6057   code_space_->Verify(&no_dirty_regions_visitor);
6058   cell_space_->Verify(&no_dirty_regions_visitor);
6059
6060   lo_space_->Verify();
6061 }
6062 #endif
6063
6064
6065 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6066   Object* result = NULL;
6067   Object* new_table;
6068   { MaybeObject* maybe_new_table =
6069         string_table()->LookupUtf8String(string, &result);
6070     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6071   }
6072   // Can't use set_string_table because StringTable::cast knows that
6073   // StringTable is a singleton and checks for identity.
6074   roots_[kStringTableRootIndex] = new_table;
6075   ASSERT(result != NULL);
6076   return result;
6077 }
6078
6079
6080 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6081   Object* result = NULL;
6082   Object* new_table;
6083   { MaybeObject* maybe_new_table =
6084         string_table()->LookupOneByteString(string, &result);
6085     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6086   }
6087   // Can't use set_string_table because StringTable::cast knows that
6088   // StringTable is a singleton and checks for identity.
6089   roots_[kStringTableRootIndex] = new_table;
6090   ASSERT(result != NULL);
6091   return result;
6092 }
6093
6094
6095 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6096                                      int from,
6097                                      int length) {
6098   Object* result = NULL;
6099   Object* new_table;
6100   { MaybeObject* maybe_new_table =
6101         string_table()->LookupSubStringOneByteString(string,
6102                                                    from,
6103                                                    length,
6104                                                    &result);
6105     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6106   }
6107   // Can't use set_string_table because StringTable::cast knows that
6108   // StringTable is a singleton and checks for identity.
6109   roots_[kStringTableRootIndex] = new_table;
6110   ASSERT(result != NULL);
6111   return result;
6112 }
6113
6114
6115 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6116   Object* result = NULL;
6117   Object* new_table;
6118   { MaybeObject* maybe_new_table =
6119         string_table()->LookupTwoByteString(string, &result);
6120     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6121   }
6122   // Can't use set_string_table because StringTable::cast knows that
6123   // StringTable is a singleton and checks for identity.
6124   roots_[kStringTableRootIndex] = new_table;
6125   ASSERT(result != NULL);
6126   return result;
6127 }
6128
6129
6130 MaybeObject* Heap::InternalizeString(String* string) {
6131   if (string->IsInternalizedString()) return string;
6132   Object* result = NULL;
6133   Object* new_table;
6134   { MaybeObject* maybe_new_table =
6135         string_table()->LookupString(string, &result);
6136     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6137   }
6138   // Can't use set_string_table because StringTable::cast knows that
6139   // StringTable is a singleton and checks for identity.
6140   roots_[kStringTableRootIndex] = new_table;
6141   ASSERT(result != NULL);
6142   return result;
6143 }
6144
6145
6146 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6147   if (string->IsInternalizedString()) {
6148     *result = string;
6149     return true;
6150   }
6151   return string_table()->LookupStringIfExists(string, result);
6152 }
6153
6154
6155 void Heap::ZapFromSpace() {
6156   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6157                           new_space_.FromSpaceEnd());
6158   while (it.has_next()) {
6159     NewSpacePage* page = it.next();
6160     for (Address cursor = page->area_start(), limit = page->area_end();
6161          cursor < limit;
6162          cursor += kPointerSize) {
6163       Memory::Address_at(cursor) = kFromSpaceZapValue;
6164     }
6165   }
6166 }
6167
6168
6169 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6170                                              Address end,
6171                                              ObjectSlotCallback callback) {
6172   Address slot_address = start;
6173
6174   // We are not collecting slots on new space objects during mutation
6175   // thus we have to scan for pointers to evacuation candidates when we
6176   // promote objects. But we should not record any slots in non-black
6177   // objects. Grey object's slots would be rescanned.
6178   // White object might not survive until the end of collection
6179   // it would be a violation of the invariant to record it's slots.
6180   bool record_slots = false;
6181   if (incremental_marking()->IsCompacting()) {
6182     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6183     record_slots = Marking::IsBlack(mark_bit);
6184   }
6185
6186   while (slot_address < end) {
6187     Object** slot = reinterpret_cast<Object**>(slot_address);
6188     Object* object = *slot;
6189     // If the store buffer becomes overfull we mark pages as being exempt from
6190     // the store buffer.  These pages are scanned to find pointers that point
6191     // to the new space.  In that case we may hit newly promoted objects and
6192     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6193     if (object->IsHeapObject()) {
6194       if (Heap::InFromSpace(object)) {
6195         callback(reinterpret_cast<HeapObject**>(slot),
6196                  HeapObject::cast(object));
6197         Object* new_object = *slot;
6198         if (InNewSpace(new_object)) {
6199           SLOW_ASSERT(Heap::InToSpace(new_object));
6200           SLOW_ASSERT(new_object->IsHeapObject());
6201           store_buffer_.EnterDirectlyIntoStoreBuffer(
6202               reinterpret_cast<Address>(slot));
6203         }
6204         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6205       } else if (record_slots &&
6206                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6207         mark_compact_collector()->RecordSlot(slot, slot, object);
6208       }
6209     }
6210     slot_address += kPointerSize;
6211   }
6212 }
6213
6214
6215 #ifdef DEBUG
6216 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6217
6218
6219 bool IsAMapPointerAddress(Object** addr) {
6220   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6221   int mod = a % Map::kSize;
6222   return mod >= Map::kPointerFieldsBeginOffset &&
6223          mod < Map::kPointerFieldsEndOffset;
6224 }
6225
6226
6227 bool EverythingsAPointer(Object** addr) {
6228   return true;
6229 }
6230
6231
6232 static void CheckStoreBuffer(Heap* heap,
6233                              Object** current,
6234                              Object** limit,
6235                              Object**** store_buffer_position,
6236                              Object*** store_buffer_top,
6237                              CheckStoreBufferFilter filter,
6238                              Address special_garbage_start,
6239                              Address special_garbage_end) {
6240   Map* free_space_map = heap->free_space_map();
6241   for ( ; current < limit; current++) {
6242     Object* o = *current;
6243     Address current_address = reinterpret_cast<Address>(current);
6244     // Skip free space.
6245     if (o == free_space_map) {
6246       Address current_address = reinterpret_cast<Address>(current);
6247       FreeSpace* free_space =
6248           FreeSpace::cast(HeapObject::FromAddress(current_address));
6249       int skip = free_space->Size();
6250       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6251       ASSERT(skip > 0);
6252       current_address += skip - kPointerSize;
6253       current = reinterpret_cast<Object**>(current_address);
6254       continue;
6255     }
6256     // Skip the current linear allocation space between top and limit which is
6257     // unmarked with the free space map, but can contain junk.
6258     if (current_address == special_garbage_start &&
6259         special_garbage_end != special_garbage_start) {
6260       current_address = special_garbage_end - kPointerSize;
6261       current = reinterpret_cast<Object**>(current_address);
6262       continue;
6263     }
6264     if (!(*filter)(current)) continue;
6265     ASSERT(current_address < special_garbage_start ||
6266            current_address >= special_garbage_end);
6267     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6268     // We have to check that the pointer does not point into new space
6269     // without trying to cast it to a heap object since the hash field of
6270     // a string can contain values like 1 and 3 which are tagged null
6271     // pointers.
6272     if (!heap->InNewSpace(o)) continue;
6273     while (**store_buffer_position < current &&
6274            *store_buffer_position < store_buffer_top) {
6275       (*store_buffer_position)++;
6276     }
6277     if (**store_buffer_position != current ||
6278         *store_buffer_position == store_buffer_top) {
6279       Object** obj_start = current;
6280       while (!(*obj_start)->IsMap()) obj_start--;
6281       UNREACHABLE();
6282     }
6283   }
6284 }
6285
6286
6287 // Check that the store buffer contains all intergenerational pointers by
6288 // scanning a page and ensuring that all pointers to young space are in the
6289 // store buffer.
6290 void Heap::OldPointerSpaceCheckStoreBuffer() {
6291   OldSpace* space = old_pointer_space();
6292   PageIterator pages(space);
6293
6294   store_buffer()->SortUniq();
6295
6296   while (pages.has_next()) {
6297     Page* page = pages.next();
6298     Object** current = reinterpret_cast<Object**>(page->area_start());
6299
6300     Address end = page->area_end();
6301
6302     Object*** store_buffer_position = store_buffer()->Start();
6303     Object*** store_buffer_top = store_buffer()->Top();
6304
6305     Object** limit = reinterpret_cast<Object**>(end);
6306     CheckStoreBuffer(this,
6307                      current,
6308                      limit,
6309                      &store_buffer_position,
6310                      store_buffer_top,
6311                      &EverythingsAPointer,
6312                      space->top(),
6313                      space->limit());
6314   }
6315 }
6316
6317
6318 void Heap::MapSpaceCheckStoreBuffer() {
6319   MapSpace* space = map_space();
6320   PageIterator pages(space);
6321
6322   store_buffer()->SortUniq();
6323
6324   while (pages.has_next()) {
6325     Page* page = pages.next();
6326     Object** current = reinterpret_cast<Object**>(page->area_start());
6327
6328     Address end = page->area_end();
6329
6330     Object*** store_buffer_position = store_buffer()->Start();
6331     Object*** store_buffer_top = store_buffer()->Top();
6332
6333     Object** limit = reinterpret_cast<Object**>(end);
6334     CheckStoreBuffer(this,
6335                      current,
6336                      limit,
6337                      &store_buffer_position,
6338                      store_buffer_top,
6339                      &IsAMapPointerAddress,
6340                      space->top(),
6341                      space->limit());
6342   }
6343 }
6344
6345
6346 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6347   LargeObjectIterator it(lo_space());
6348   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6349     // We only have code, sequential strings, or fixed arrays in large
6350     // object space, and only fixed arrays can possibly contain pointers to
6351     // the young generation.
6352     if (object->IsFixedArray()) {
6353       Object*** store_buffer_position = store_buffer()->Start();
6354       Object*** store_buffer_top = store_buffer()->Top();
6355       Object** current = reinterpret_cast<Object**>(object->address());
6356       Object** limit =
6357           reinterpret_cast<Object**>(object->address() + object->Size());
6358       CheckStoreBuffer(this,
6359                        current,
6360                        limit,
6361                        &store_buffer_position,
6362                        store_buffer_top,
6363                        &EverythingsAPointer,
6364                        NULL,
6365                        NULL);
6366     }
6367   }
6368 }
6369 #endif
6370
6371
6372 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6373   IterateStrongRoots(v, mode);
6374   IterateWeakRoots(v, mode);
6375 }
6376
6377
6378 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6379   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6380   v->Synchronize(VisitorSynchronization::kStringTable);
6381   if (mode != VISIT_ALL_IN_SCAVENGE &&
6382       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6383     // Scavenge collections have special processing for this.
6384     external_string_table_.Iterate(v);
6385     error_object_list_.Iterate(v);
6386   }
6387   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6388 }
6389
6390
6391 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6392   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6393   v->Synchronize(VisitorSynchronization::kStrongRootList);
6394
6395   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6396   v->Synchronize(VisitorSynchronization::kInternalizedString);
6397
6398   isolate_->bootstrapper()->Iterate(v);
6399   v->Synchronize(VisitorSynchronization::kBootstrapper);
6400   isolate_->Iterate(v);
6401   v->Synchronize(VisitorSynchronization::kTop);
6402   Relocatable::Iterate(v);
6403   v->Synchronize(VisitorSynchronization::kRelocatable);
6404
6405 #ifdef ENABLE_DEBUGGER_SUPPORT
6406   isolate_->debug()->Iterate(v);
6407   if (isolate_->deoptimizer_data() != NULL) {
6408     isolate_->deoptimizer_data()->Iterate(v);
6409   }
6410 #endif
6411   v->Synchronize(VisitorSynchronization::kDebug);
6412   isolate_->compilation_cache()->Iterate(v);
6413   v->Synchronize(VisitorSynchronization::kCompilationCache);
6414
6415   // Iterate over local handles in handle scopes.
6416   isolate_->handle_scope_implementer()->Iterate(v);
6417   isolate_->IterateDeferredHandles(v);
6418   v->Synchronize(VisitorSynchronization::kHandleScope);
6419
6420   // Iterate over the builtin code objects and code stubs in the
6421   // heap. Note that it is not necessary to iterate over code objects
6422   // on scavenge collections.
6423   if (mode != VISIT_ALL_IN_SCAVENGE) {
6424     isolate_->builtins()->IterateBuiltins(v);
6425   }
6426   v->Synchronize(VisitorSynchronization::kBuiltins);
6427
6428   // Iterate over global handles.
6429   switch (mode) {
6430     case VISIT_ONLY_STRONG:
6431       isolate_->global_handles()->IterateStrongRoots(v);
6432       break;
6433     case VISIT_ALL_IN_SCAVENGE:
6434       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6435       break;
6436     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6437     case VISIT_ALL:
6438       isolate_->global_handles()->IterateAllRoots(v);
6439       break;
6440   }
6441   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6442
6443   // Iterate over pointers being held by inactive threads.
6444   isolate_->thread_manager()->Iterate(v);
6445   v->Synchronize(VisitorSynchronization::kThreadManager);
6446
6447   // Iterate over the pointers the Serialization/Deserialization code is
6448   // holding.
6449   // During garbage collection this keeps the partial snapshot cache alive.
6450   // During deserialization of the startup snapshot this creates the partial
6451   // snapshot cache and deserializes the objects it refers to.  During
6452   // serialization this does nothing, since the partial snapshot cache is
6453   // empty.  However the next thing we do is create the partial snapshot,
6454   // filling up the partial snapshot cache with objects it needs as we go.
6455   SerializerDeserializer::Iterate(v);
6456   // We don't do a v->Synchronize call here, because in debug mode that will
6457   // output a flag to the snapshot.  However at this point the serializer and
6458   // deserializer are deliberately a little unsynchronized (see above) so the
6459   // checking of the sync flag in the snapshot would fail.
6460 }
6461
6462
6463 // TODO(1236194): Since the heap size is configurable on the command line
6464 // and through the API, we should gracefully handle the case that the heap
6465 // size is not big enough to fit all the initial objects.
6466 bool Heap::ConfigureHeap(int max_semispace_size,
6467                          intptr_t max_old_gen_size,
6468                          intptr_t max_executable_size) {
6469   if (HasBeenSetUp()) return false;
6470
6471   if (FLAG_stress_compaction) {
6472     // This will cause more frequent GCs when stressing.
6473     max_semispace_size_ = Page::kPageSize;
6474   }
6475
6476   if (max_semispace_size > 0) {
6477     if (max_semispace_size < Page::kPageSize) {
6478       max_semispace_size = Page::kPageSize;
6479       if (FLAG_trace_gc) {
6480         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6481                  Page::kPageSize >> 10);
6482       }
6483     }
6484     max_semispace_size_ = max_semispace_size;
6485   }
6486
6487   if (Snapshot::IsEnabled()) {
6488     // If we are using a snapshot we always reserve the default amount
6489     // of memory for each semispace because code in the snapshot has
6490     // write-barrier code that relies on the size and alignment of new
6491     // space.  We therefore cannot use a larger max semispace size
6492     // than the default reserved semispace size.
6493     if (max_semispace_size_ > reserved_semispace_size_) {
6494       max_semispace_size_ = reserved_semispace_size_;
6495       if (FLAG_trace_gc) {
6496         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6497                  reserved_semispace_size_ >> 10);
6498       }
6499     }
6500   } else {
6501     // If we are not using snapshots we reserve space for the actual
6502     // max semispace size.
6503     reserved_semispace_size_ = max_semispace_size_;
6504   }
6505
6506   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6507   if (max_executable_size > 0) {
6508     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6509   }
6510
6511   // The max executable size must be less than or equal to the max old
6512   // generation size.
6513   if (max_executable_size_ > max_old_generation_size_) {
6514     max_executable_size_ = max_old_generation_size_;
6515   }
6516
6517   // The new space size must be a power of two to support single-bit testing
6518   // for containment.
6519   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6520   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6521   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6522   external_allocation_limit_ = 16 * max_semispace_size_;
6523
6524   // The old generation is paged and needs at least one page for each space.
6525   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6526   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6527                                                        Page::kPageSize),
6528                                  RoundUp(max_old_generation_size_,
6529                                          Page::kPageSize));
6530
6531   configured_ = true;
6532   return true;
6533 }
6534
6535
6536 bool Heap::ConfigureHeapDefault() {
6537   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6538                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6539                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6540 }
6541
6542
6543 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6544   *stats->start_marker = HeapStats::kStartMarker;
6545   *stats->end_marker = HeapStats::kEndMarker;
6546   *stats->new_space_size = new_space_.SizeAsInt();
6547   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6548   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6549   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6550   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6551   *stats->old_data_space_capacity = old_data_space_->Capacity();
6552   *stats->code_space_size = code_space_->SizeOfObjects();
6553   *stats->code_space_capacity = code_space_->Capacity();
6554   *stats->map_space_size = map_space_->SizeOfObjects();
6555   *stats->map_space_capacity = map_space_->Capacity();
6556   *stats->cell_space_size = cell_space_->SizeOfObjects();
6557   *stats->cell_space_capacity = cell_space_->Capacity();
6558   *stats->lo_space_size = lo_space_->Size();
6559   isolate_->global_handles()->RecordStats(stats);
6560   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6561   *stats->memory_allocator_capacity =
6562       isolate()->memory_allocator()->Size() +
6563       isolate()->memory_allocator()->Available();
6564   *stats->os_error = OS::GetLastError();
6565       isolate()->memory_allocator()->Available();
6566   if (take_snapshot) {
6567     HeapIterator iterator(this);
6568     for (HeapObject* obj = iterator.next();
6569          obj != NULL;
6570          obj = iterator.next()) {
6571       InstanceType type = obj->map()->instance_type();
6572       ASSERT(0 <= type && type <= LAST_TYPE);
6573       stats->objects_per_type[type]++;
6574       stats->size_per_type[type] += obj->Size();
6575     }
6576   }
6577 }
6578
6579
6580 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6581   return old_pointer_space_->SizeOfObjects()
6582       + old_data_space_->SizeOfObjects()
6583       + code_space_->SizeOfObjects()
6584       + map_space_->SizeOfObjects()
6585       + cell_space_->SizeOfObjects()
6586       + lo_space_->SizeOfObjects();
6587 }
6588
6589
6590 intptr_t Heap::PromotedExternalMemorySize() {
6591   if (amount_of_external_allocated_memory_
6592       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6593   return amount_of_external_allocated_memory_
6594       - amount_of_external_allocated_memory_at_last_global_gc_;
6595 }
6596
6597
6598 V8_DECLARE_ONCE(initialize_gc_once);
6599
6600 static void InitializeGCOnce() {
6601   InitializeScavengingVisitorsTables();
6602   NewSpaceScavenger::Initialize();
6603   MarkCompactCollector::Initialize();
6604 }
6605
6606 bool Heap::SetUp() {
6607 #ifdef DEBUG
6608   allocation_timeout_ = FLAG_gc_interval;
6609 #endif
6610
6611   // Initialize heap spaces and initial maps and objects. Whenever something
6612   // goes wrong, just return false. The caller should check the results and
6613   // call Heap::TearDown() to release allocated memory.
6614   //
6615   // If the heap is not yet configured (e.g. through the API), configure it.
6616   // Configuration is based on the flags new-space-size (really the semispace
6617   // size) and old-space-size if set or the initial values of semispace_size_
6618   // and old_generation_size_ otherwise.
6619   if (!configured_) {
6620     if (!ConfigureHeapDefault()) return false;
6621   }
6622
6623   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6624
6625   MarkMapPointersAsEncoded(false);
6626
6627   // Set up memory allocator.
6628   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6629       return false;
6630
6631   // Set up new space.
6632   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6633     return false;
6634   }
6635
6636   // Initialize old pointer space.
6637   old_pointer_space_ =
6638       new OldSpace(this,
6639                    max_old_generation_size_,
6640                    OLD_POINTER_SPACE,
6641                    NOT_EXECUTABLE);
6642   if (old_pointer_space_ == NULL) return false;
6643   if (!old_pointer_space_->SetUp()) return false;
6644
6645   // Initialize old data space.
6646   old_data_space_ =
6647       new OldSpace(this,
6648                    max_old_generation_size_,
6649                    OLD_DATA_SPACE,
6650                    NOT_EXECUTABLE);
6651   if (old_data_space_ == NULL) return false;
6652   if (!old_data_space_->SetUp()) return false;
6653
6654   // Initialize the code space, set its maximum capacity to the old
6655   // generation size. It needs executable memory.
6656   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6657   // virtual address space, so that they can call each other with near calls.
6658   if (code_range_size_ > 0) {
6659     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6660       return false;
6661     }
6662   }
6663
6664   code_space_ =
6665       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6666   if (code_space_ == NULL) return false;
6667   if (!code_space_->SetUp()) return false;
6668
6669   // Initialize map space.
6670   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6671   if (map_space_ == NULL) return false;
6672   if (!map_space_->SetUp()) return false;
6673
6674   // Initialize global property cell space.
6675   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6676   if (cell_space_ == NULL) return false;
6677   if (!cell_space_->SetUp()) return false;
6678
6679   // The large object code space may contain code or data.  We set the memory
6680   // to be non-executable here for safety, but this means we need to enable it
6681   // explicitly when allocating large code objects.
6682   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6683   if (lo_space_ == NULL) return false;
6684   if (!lo_space_->SetUp()) return false;
6685
6686   // Set up the seed that is used to randomize the string hash function.
6687   ASSERT(hash_seed() == 0);
6688   if (FLAG_randomize_hashes) {
6689     if (FLAG_hash_seed == 0) {
6690       set_hash_seed(
6691           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6692     } else {
6693       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6694     }
6695   }
6696
6697   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6698   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6699
6700   store_buffer()->SetUp();
6701
6702   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6703 #ifdef DEBUG
6704   relocation_mutex_locked_by_optimizer_thread_ = false;
6705 #endif  // DEBUG
6706
6707   return true;
6708 }
6709
6710 bool Heap::CreateHeapObjects() {
6711   // Create initial maps.
6712   if (!CreateInitialMaps()) return false;
6713   if (!CreateApiObjects()) return false;
6714
6715   // Create initial objects
6716   if (!CreateInitialObjects()) return false;
6717
6718   native_contexts_list_ = undefined_value();
6719   return true;
6720 }
6721
6722
6723 void Heap::SetStackLimits() {
6724   ASSERT(isolate_ != NULL);
6725   ASSERT(isolate_ == isolate());
6726   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6727   // something that looks like an out of range Smi to the GC.
6728
6729   // Set up the special root array entries containing the stack limits.
6730   // These are actually addresses, but the tag makes the GC ignore it.
6731   roots_[kStackLimitRootIndex] =
6732       reinterpret_cast<Object*>(
6733           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6734   roots_[kRealStackLimitRootIndex] =
6735       reinterpret_cast<Object*>(
6736           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6737 }
6738
6739
6740 void Heap::TearDown() {
6741 #ifdef VERIFY_HEAP
6742   if (FLAG_verify_heap) {
6743     Verify();
6744   }
6745 #endif
6746
6747   if (FLAG_print_cumulative_gc_stat) {
6748     PrintF("\n");
6749     PrintF("gc_count=%d ", gc_count_);
6750     PrintF("mark_sweep_count=%d ", ms_count_);
6751     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6752     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6753     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6754     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6755            get_max_alive_after_gc());
6756     PrintF("total_marking_time=%.1f ", marking_time());
6757     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6758     PrintF("\n\n");
6759   }
6760
6761   isolate_->global_handles()->TearDown();
6762
6763   external_string_table_.TearDown();
6764
6765   error_object_list_.TearDown();
6766
6767   new_space_.TearDown();
6768
6769   if (old_pointer_space_ != NULL) {
6770     old_pointer_space_->TearDown();
6771     delete old_pointer_space_;
6772     old_pointer_space_ = NULL;
6773   }
6774
6775   if (old_data_space_ != NULL) {
6776     old_data_space_->TearDown();
6777     delete old_data_space_;
6778     old_data_space_ = NULL;
6779   }
6780
6781   if (code_space_ != NULL) {
6782     code_space_->TearDown();
6783     delete code_space_;
6784     code_space_ = NULL;
6785   }
6786
6787   if (map_space_ != NULL) {
6788     map_space_->TearDown();
6789     delete map_space_;
6790     map_space_ = NULL;
6791   }
6792
6793   if (cell_space_ != NULL) {
6794     cell_space_->TearDown();
6795     delete cell_space_;
6796     cell_space_ = NULL;
6797   }
6798
6799   if (lo_space_ != NULL) {
6800     lo_space_->TearDown();
6801     delete lo_space_;
6802     lo_space_ = NULL;
6803   }
6804
6805   store_buffer()->TearDown();
6806   incremental_marking()->TearDown();
6807
6808   isolate_->memory_allocator()->TearDown();
6809
6810   delete relocation_mutex_;
6811 }
6812
6813
6814 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6815   ASSERT(callback != NULL);
6816   GCPrologueCallbackPair pair(callback, gc_type);
6817   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6818   return gc_prologue_callbacks_.Add(pair);
6819 }
6820
6821
6822 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6823   ASSERT(callback != NULL);
6824   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6825     if (gc_prologue_callbacks_[i].callback == callback) {
6826       gc_prologue_callbacks_.Remove(i);
6827       return;
6828     }
6829   }
6830   UNREACHABLE();
6831 }
6832
6833
6834 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6835   ASSERT(callback != NULL);
6836   GCEpilogueCallbackPair pair(callback, gc_type);
6837   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6838   return gc_epilogue_callbacks_.Add(pair);
6839 }
6840
6841
6842 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6843   ASSERT(callback != NULL);
6844   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6845     if (gc_epilogue_callbacks_[i].callback == callback) {
6846       gc_epilogue_callbacks_.Remove(i);
6847       return;
6848     }
6849   }
6850   UNREACHABLE();
6851 }
6852
6853
6854 #ifdef DEBUG
6855
6856 class PrintHandleVisitor: public ObjectVisitor {
6857  public:
6858   void VisitPointers(Object** start, Object** end) {
6859     for (Object** p = start; p < end; p++)
6860       PrintF("  handle %p to %p\n",
6861              reinterpret_cast<void*>(p),
6862              reinterpret_cast<void*>(*p));
6863   }
6864 };
6865
6866 void Heap::PrintHandles() {
6867   PrintF("Handles:\n");
6868   PrintHandleVisitor v;
6869   isolate_->handle_scope_implementer()->Iterate(&v);
6870 }
6871
6872 #endif
6873
6874
6875 Space* AllSpaces::next() {
6876   switch (counter_++) {
6877     case NEW_SPACE:
6878       return heap_->new_space();
6879     case OLD_POINTER_SPACE:
6880       return heap_->old_pointer_space();
6881     case OLD_DATA_SPACE:
6882       return heap_->old_data_space();
6883     case CODE_SPACE:
6884       return heap_->code_space();
6885     case MAP_SPACE:
6886       return heap_->map_space();
6887     case CELL_SPACE:
6888       return heap_->cell_space();
6889     case LO_SPACE:
6890       return heap_->lo_space();
6891     default:
6892       return NULL;
6893   }
6894 }
6895
6896
6897 PagedSpace* PagedSpaces::next() {
6898   switch (counter_++) {
6899     case OLD_POINTER_SPACE:
6900       return heap_->old_pointer_space();
6901     case OLD_DATA_SPACE:
6902       return heap_->old_data_space();
6903     case CODE_SPACE:
6904       return heap_->code_space();
6905     case MAP_SPACE:
6906       return heap_->map_space();
6907     case CELL_SPACE:
6908       return heap_->cell_space();
6909     default:
6910       return NULL;
6911   }
6912 }
6913
6914
6915
6916 OldSpace* OldSpaces::next() {
6917   switch (counter_++) {
6918     case OLD_POINTER_SPACE:
6919       return heap_->old_pointer_space();
6920     case OLD_DATA_SPACE:
6921       return heap_->old_data_space();
6922     case CODE_SPACE:
6923       return heap_->code_space();
6924     default:
6925       return NULL;
6926   }
6927 }
6928
6929
6930 SpaceIterator::SpaceIterator(Heap* heap)
6931     : heap_(heap),
6932       current_space_(FIRST_SPACE),
6933       iterator_(NULL),
6934       size_func_(NULL) {
6935 }
6936
6937
6938 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6939     : heap_(heap),
6940       current_space_(FIRST_SPACE),
6941       iterator_(NULL),
6942       size_func_(size_func) {
6943 }
6944
6945
6946 SpaceIterator::~SpaceIterator() {
6947   // Delete active iterator if any.
6948   delete iterator_;
6949 }
6950
6951
6952 bool SpaceIterator::has_next() {
6953   // Iterate until no more spaces.
6954   return current_space_ != LAST_SPACE;
6955 }
6956
6957
6958 ObjectIterator* SpaceIterator::next() {
6959   if (iterator_ != NULL) {
6960     delete iterator_;
6961     iterator_ = NULL;
6962     // Move to the next space
6963     current_space_++;
6964     if (current_space_ > LAST_SPACE) {
6965       return NULL;
6966     }
6967   }
6968
6969   // Return iterator for the new current space.
6970   return CreateIterator();
6971 }
6972
6973
6974 // Create an iterator for the space to iterate.
6975 ObjectIterator* SpaceIterator::CreateIterator() {
6976   ASSERT(iterator_ == NULL);
6977
6978   switch (current_space_) {
6979     case NEW_SPACE:
6980       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6981       break;
6982     case OLD_POINTER_SPACE:
6983       iterator_ =
6984           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6985       break;
6986     case OLD_DATA_SPACE:
6987       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6988       break;
6989     case CODE_SPACE:
6990       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6991       break;
6992     case MAP_SPACE:
6993       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6994       break;
6995     case CELL_SPACE:
6996       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6997       break;
6998     case LO_SPACE:
6999       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7000       break;
7001   }
7002
7003   // Return the newly allocated iterator;
7004   ASSERT(iterator_ != NULL);
7005   return iterator_;
7006 }
7007
7008
7009 class HeapObjectsFilter {
7010  public:
7011   virtual ~HeapObjectsFilter() {}
7012   virtual bool SkipObject(HeapObject* object) = 0;
7013 };
7014
7015
7016 class UnreachableObjectsFilter : public HeapObjectsFilter {
7017  public:
7018   UnreachableObjectsFilter() {
7019     MarkReachableObjects();
7020   }
7021
7022   ~UnreachableObjectsFilter() {
7023     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7024   }
7025
7026   bool SkipObject(HeapObject* object) {
7027     MarkBit mark_bit = Marking::MarkBitFrom(object);
7028     return !mark_bit.Get();
7029   }
7030
7031  private:
7032   class MarkingVisitor : public ObjectVisitor {
7033    public:
7034     MarkingVisitor() : marking_stack_(10) {}
7035
7036     void VisitPointers(Object** start, Object** end) {
7037       for (Object** p = start; p < end; p++) {
7038         if (!(*p)->IsHeapObject()) continue;
7039         HeapObject* obj = HeapObject::cast(*p);
7040         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7041         if (!mark_bit.Get()) {
7042           mark_bit.Set();
7043           marking_stack_.Add(obj);
7044         }
7045       }
7046     }
7047
7048     void TransitiveClosure() {
7049       while (!marking_stack_.is_empty()) {
7050         HeapObject* obj = marking_stack_.RemoveLast();
7051         obj->Iterate(this);
7052       }
7053     }
7054
7055    private:
7056     List<HeapObject*> marking_stack_;
7057   };
7058
7059   void MarkReachableObjects() {
7060     Heap* heap = Isolate::Current()->heap();
7061     MarkingVisitor visitor;
7062     heap->IterateRoots(&visitor, VISIT_ALL);
7063     visitor.TransitiveClosure();
7064   }
7065
7066   AssertNoAllocation no_alloc;
7067 };
7068
7069
7070 HeapIterator::HeapIterator(Heap* heap)
7071     : heap_(heap),
7072       filtering_(HeapIterator::kNoFiltering),
7073       filter_(NULL) {
7074   Init();
7075 }
7076
7077
7078 HeapIterator::HeapIterator(Heap* heap,
7079                            HeapIterator::HeapObjectsFiltering filtering)
7080     : heap_(heap),
7081       filtering_(filtering),
7082       filter_(NULL) {
7083   Init();
7084 }
7085
7086
7087 HeapIterator::~HeapIterator() {
7088   Shutdown();
7089 }
7090
7091
7092 void HeapIterator::Init() {
7093   // Start the iteration.
7094   space_iterator_ = new SpaceIterator(heap_);
7095   switch (filtering_) {
7096     case kFilterUnreachable:
7097       filter_ = new UnreachableObjectsFilter;
7098       break;
7099     default:
7100       break;
7101   }
7102   object_iterator_ = space_iterator_->next();
7103 }
7104
7105
7106 void HeapIterator::Shutdown() {
7107 #ifdef DEBUG
7108   // Assert that in filtering mode we have iterated through all
7109   // objects. Otherwise, heap will be left in an inconsistent state.
7110   if (filtering_ != kNoFiltering) {
7111     ASSERT(object_iterator_ == NULL);
7112   }
7113 #endif
7114   // Make sure the last iterator is deallocated.
7115   delete space_iterator_;
7116   space_iterator_ = NULL;
7117   object_iterator_ = NULL;
7118   delete filter_;
7119   filter_ = NULL;
7120 }
7121
7122
7123 HeapObject* HeapIterator::next() {
7124   if (filter_ == NULL) return NextObject();
7125
7126   HeapObject* obj = NextObject();
7127   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7128   return obj;
7129 }
7130
7131
7132 HeapObject* HeapIterator::NextObject() {
7133   // No iterator means we are done.
7134   if (object_iterator_ == NULL) return NULL;
7135
7136   if (HeapObject* obj = object_iterator_->next_object()) {
7137     // If the current iterator has more objects we are fine.
7138     return obj;
7139   } else {
7140     // Go though the spaces looking for one that has objects.
7141     while (space_iterator_->has_next()) {
7142       object_iterator_ = space_iterator_->next();
7143       if (HeapObject* obj = object_iterator_->next_object()) {
7144         return obj;
7145       }
7146     }
7147   }
7148   // Done with the last space.
7149   object_iterator_ = NULL;
7150   return NULL;
7151 }
7152
7153
7154 void HeapIterator::reset() {
7155   // Restart the iterator.
7156   Shutdown();
7157   Init();
7158 }
7159
7160
7161 #ifdef DEBUG
7162
7163 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7164
7165 class PathTracer::MarkVisitor: public ObjectVisitor {
7166  public:
7167   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7168   void VisitPointers(Object** start, Object** end) {
7169     // Scan all HeapObject pointers in [start, end)
7170     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7171       if ((*p)->IsHeapObject())
7172         tracer_->MarkRecursively(p, this);
7173     }
7174   }
7175
7176  private:
7177   PathTracer* tracer_;
7178 };
7179
7180
7181 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7182  public:
7183   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7184   void VisitPointers(Object** start, Object** end) {
7185     // Scan all HeapObject pointers in [start, end)
7186     for (Object** p = start; p < end; p++) {
7187       if ((*p)->IsHeapObject())
7188         tracer_->UnmarkRecursively(p, this);
7189     }
7190   }
7191
7192  private:
7193   PathTracer* tracer_;
7194 };
7195
7196
7197 void PathTracer::VisitPointers(Object** start, Object** end) {
7198   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7199   // Visit all HeapObject pointers in [start, end)
7200   for (Object** p = start; !done && (p < end); p++) {
7201     if ((*p)->IsHeapObject()) {
7202       TracePathFrom(p);
7203       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7204     }
7205   }
7206 }
7207
7208
7209 void PathTracer::Reset() {
7210   found_target_ = false;
7211   object_stack_.Clear();
7212 }
7213
7214
7215 void PathTracer::TracePathFrom(Object** root) {
7216   ASSERT((search_target_ == kAnyGlobalObject) ||
7217          search_target_->IsHeapObject());
7218   found_target_in_trace_ = false;
7219   Reset();
7220
7221   MarkVisitor mark_visitor(this);
7222   MarkRecursively(root, &mark_visitor);
7223
7224   UnmarkVisitor unmark_visitor(this);
7225   UnmarkRecursively(root, &unmark_visitor);
7226
7227   ProcessResults();
7228 }
7229
7230
7231 static bool SafeIsNativeContext(HeapObject* obj) {
7232   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7233 }
7234
7235
7236 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7237   if (!(*p)->IsHeapObject()) return;
7238
7239   HeapObject* obj = HeapObject::cast(*p);
7240
7241   Object* map = obj->map();
7242
7243   if (!map->IsHeapObject()) return;  // visited before
7244
7245   if (found_target_in_trace_) return;  // stop if target found
7246   object_stack_.Add(obj);
7247   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7248       (obj == search_target_)) {
7249     found_target_in_trace_ = true;
7250     found_target_ = true;
7251     return;
7252   }
7253
7254   bool is_native_context = SafeIsNativeContext(obj);
7255
7256   // not visited yet
7257   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7258
7259   Address map_addr = map_p->address();
7260
7261   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7262
7263   // Scan the object body.
7264   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7265     // This is specialized to scan Context's properly.
7266     Object** start = reinterpret_cast<Object**>(obj->address() +
7267                                                 Context::kHeaderSize);
7268     Object** end = reinterpret_cast<Object**>(obj->address() +
7269         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7270     mark_visitor->VisitPointers(start, end);
7271   } else {
7272     obj->IterateBody(map_p->instance_type(),
7273                      obj->SizeFromMap(map_p),
7274                      mark_visitor);
7275   }
7276
7277   // Scan the map after the body because the body is a lot more interesting
7278   // when doing leak detection.
7279   MarkRecursively(&map, mark_visitor);
7280
7281   if (!found_target_in_trace_)  // don't pop if found the target
7282     object_stack_.RemoveLast();
7283 }
7284
7285
7286 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7287   if (!(*p)->IsHeapObject()) return;
7288
7289   HeapObject* obj = HeapObject::cast(*p);
7290
7291   Object* map = obj->map();
7292
7293   if (map->IsHeapObject()) return;  // unmarked already
7294
7295   Address map_addr = reinterpret_cast<Address>(map);
7296
7297   map_addr -= kMarkTag;
7298
7299   ASSERT_TAG_ALIGNED(map_addr);
7300
7301   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7302
7303   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7304
7305   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7306
7307   obj->IterateBody(Map::cast(map_p)->instance_type(),
7308                    obj->SizeFromMap(Map::cast(map_p)),
7309                    unmark_visitor);
7310 }
7311
7312
7313 void PathTracer::ProcessResults() {
7314   if (found_target_) {
7315     PrintF("=====================================\n");
7316     PrintF("====        Path to object       ====\n");
7317     PrintF("=====================================\n\n");
7318
7319     ASSERT(!object_stack_.is_empty());
7320     for (int i = 0; i < object_stack_.length(); i++) {
7321       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7322       Object* obj = object_stack_[i];
7323       obj->Print();
7324     }
7325     PrintF("=====================================\n");
7326   }
7327 }
7328
7329
7330 // Triggers a depth-first traversal of reachable objects from one
7331 // given root object and finds a path to a specific heap object and
7332 // prints it.
7333 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7334   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7335   tracer.VisitPointer(&root);
7336 }
7337
7338
7339 // Triggers a depth-first traversal of reachable objects from roots
7340 // and finds a path to a specific heap object and prints it.
7341 void Heap::TracePathToObject(Object* target) {
7342   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7343   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7344 }
7345
7346
7347 // Triggers a depth-first traversal of reachable objects from roots
7348 // and finds a path to any global object and prints it. Useful for
7349 // determining the source for leaks of global objects.
7350 void Heap::TracePathToGlobal() {
7351   PathTracer tracer(PathTracer::kAnyGlobalObject,
7352                     PathTracer::FIND_ALL,
7353                     VISIT_ALL);
7354   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7355 }
7356 #endif
7357
7358
7359 static intptr_t CountTotalHolesSize(Heap* heap) {
7360   intptr_t holes_size = 0;
7361   OldSpaces spaces(heap);
7362   for (OldSpace* space = spaces.next();
7363        space != NULL;
7364        space = spaces.next()) {
7365     holes_size += space->Waste() + space->Available();
7366   }
7367   return holes_size;
7368 }
7369
7370
7371 GCTracer::GCTracer(Heap* heap,
7372                    const char* gc_reason,
7373                    const char* collector_reason)
7374     : start_time_(0.0),
7375       start_object_size_(0),
7376       start_memory_size_(0),
7377       gc_count_(0),
7378       full_gc_count_(0),
7379       allocated_since_last_gc_(0),
7380       spent_in_mutator_(0),
7381       promoted_objects_size_(0),
7382       nodes_died_in_new_space_(0),
7383       nodes_copied_in_new_space_(0),
7384       nodes_promoted_(0),
7385       heap_(heap),
7386       gc_reason_(gc_reason),
7387       collector_reason_(collector_reason) {
7388   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7389   start_time_ = OS::TimeCurrentMillis();
7390   start_object_size_ = heap_->SizeOfObjects();
7391   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7392
7393   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7394     scopes_[i] = 0;
7395   }
7396
7397   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7398
7399   allocated_since_last_gc_ =
7400       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7401
7402   if (heap_->last_gc_end_timestamp_ > 0) {
7403     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7404   }
7405
7406   steps_count_ = heap_->incremental_marking()->steps_count();
7407   steps_took_ = heap_->incremental_marking()->steps_took();
7408   longest_step_ = heap_->incremental_marking()->longest_step();
7409   steps_count_since_last_gc_ =
7410       heap_->incremental_marking()->steps_count_since_last_gc();
7411   steps_took_since_last_gc_ =
7412       heap_->incremental_marking()->steps_took_since_last_gc();
7413 }
7414
7415
7416 GCTracer::~GCTracer() {
7417   // Printf ONE line iff flag is set.
7418   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7419
7420   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7421
7422   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7423   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7424
7425   double time = heap_->last_gc_end_timestamp_ - start_time_;
7426
7427   // Update cumulative GC statistics if required.
7428   if (FLAG_print_cumulative_gc_stat) {
7429     heap_->total_gc_time_ms_ += time;
7430     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7431     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7432                                      heap_->alive_after_last_gc_);
7433     if (!first_gc) {
7434       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7435                                    spent_in_mutator_);
7436     }
7437   } else if (FLAG_trace_gc_verbose) {
7438     heap_->total_gc_time_ms_ += time;
7439   }
7440
7441   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7442
7443   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7444
7445   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7446   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7447
7448   if (!FLAG_trace_gc_nvp) {
7449     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7450
7451     double end_memory_size_mb =
7452         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7453
7454     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7455            CollectorString(),
7456            static_cast<double>(start_object_size_) / MB,
7457            static_cast<double>(start_memory_size_) / MB,
7458            SizeOfHeapObjects(),
7459            end_memory_size_mb);
7460
7461     if (external_time > 0) PrintF("%d / ", external_time);
7462     PrintF("%.1f ms", time);
7463     if (steps_count_ > 0) {
7464       if (collector_ == SCAVENGER) {
7465         PrintF(" (+ %.1f ms in %d steps since last GC)",
7466                steps_took_since_last_gc_,
7467                steps_count_since_last_gc_);
7468       } else {
7469         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7470                    "biggest step %.1f ms)",
7471                steps_took_,
7472                steps_count_,
7473                longest_step_);
7474       }
7475     }
7476
7477     if (gc_reason_ != NULL) {
7478       PrintF(" [%s]", gc_reason_);
7479     }
7480
7481     if (collector_reason_ != NULL) {
7482       PrintF(" [%s]", collector_reason_);
7483     }
7484
7485     PrintF(".\n");
7486   } else {
7487     PrintF("pause=%.1f ", time);
7488     PrintF("mutator=%.1f ", spent_in_mutator_);
7489     PrintF("gc=");
7490     switch (collector_) {
7491       case SCAVENGER:
7492         PrintF("s");
7493         break;
7494       case MARK_COMPACTOR:
7495         PrintF("ms");
7496         break;
7497       default:
7498         UNREACHABLE();
7499     }
7500     PrintF(" ");
7501
7502     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7503     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7504     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7505     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7506     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7507     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7508     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7509     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7510     PrintF("compaction_ptrs=%.1f ",
7511         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7512     PrintF("intracompaction_ptrs=%.1f ",
7513         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7514     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7515
7516     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7517     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7518     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7519            in_free_list_or_wasted_before_gc_);
7520     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7521
7522     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7523     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7524     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7525     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7526     PrintF("nodes_promoted=%d ", nodes_promoted_);
7527
7528     if (collector_ == SCAVENGER) {
7529       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7530       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7531     } else {
7532       PrintF("stepscount=%d ", steps_count_);
7533       PrintF("stepstook=%.1f ", steps_took_);
7534       PrintF("longeststep=%.1f ", longest_step_);
7535     }
7536
7537     PrintF("\n");
7538   }
7539
7540   heap_->PrintShortHeapStatistics();
7541 }
7542
7543
7544 const char* GCTracer::CollectorString() {
7545   switch (collector_) {
7546     case SCAVENGER:
7547       return "Scavenge";
7548     case MARK_COMPACTOR:
7549       return "Mark-sweep";
7550   }
7551   return "Unknown GC";
7552 }
7553
7554
7555 int KeyedLookupCache::Hash(Map* map, Name* name) {
7556   // Uses only lower 32 bits if pointers are larger.
7557   uintptr_t addr_hash =
7558       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7559   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7560 }
7561
7562
7563 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7564   int index = (Hash(map, name) & kHashMask);
7565   for (int i = 0; i < kEntriesPerBucket; i++) {
7566     Key& key = keys_[index + i];
7567     if ((key.map == map) && key.name->Equals(name)) {
7568       return field_offsets_[index + i];
7569     }
7570   }
7571   return kNotFound;
7572 }
7573
7574
7575 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7576   if (!name->IsUniqueName()) {
7577     String* internalized_string;
7578     if (!HEAP->InternalizeStringIfExists(
7579             String::cast(name), &internalized_string)) {
7580       return;
7581     }
7582     name = internalized_string;
7583   }
7584   // This cache is cleared only between mark compact passes, so we expect the
7585   // cache to only contain old space names.
7586   ASSERT(!HEAP->InNewSpace(name));
7587
7588   int index = (Hash(map, name) & kHashMask);
7589   // After a GC there will be free slots, so we use them in order (this may
7590   // help to get the most frequently used one in position 0).
7591   for (int i = 0; i< kEntriesPerBucket; i++) {
7592     Key& key = keys_[index];
7593     Object* free_entry_indicator = NULL;
7594     if (key.map == free_entry_indicator) {
7595       key.map = map;
7596       key.name = name;
7597       field_offsets_[index + i] = field_offset;
7598       return;
7599     }
7600   }
7601   // No free entry found in this bucket, so we move them all down one and
7602   // put the new entry at position zero.
7603   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7604     Key& key = keys_[index + i];
7605     Key& key2 = keys_[index + i - 1];
7606     key = key2;
7607     field_offsets_[index + i] = field_offsets_[index + i - 1];
7608   }
7609
7610   // Write the new first entry.
7611   Key& key = keys_[index];
7612   key.map = map;
7613   key.name = name;
7614   field_offsets_[index] = field_offset;
7615 }
7616
7617
7618 void KeyedLookupCache::Clear() {
7619   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7620 }
7621
7622
7623 void DescriptorLookupCache::Clear() {
7624   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7625 }
7626
7627
7628 #ifdef DEBUG
7629 void Heap::GarbageCollectionGreedyCheck() {
7630   ASSERT(FLAG_gc_greedy);
7631   if (isolate_->bootstrapper()->IsActive()) return;
7632   if (disallow_allocation_failure()) return;
7633   CollectGarbage(NEW_SPACE);
7634 }
7635 #endif
7636
7637
7638 TranscendentalCache::SubCache::SubCache(Type t)
7639   : type_(t),
7640     isolate_(Isolate::Current()) {
7641   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7642   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7643   for (int i = 0; i < kCacheSize; i++) {
7644     elements_[i].in[0] = in0;
7645     elements_[i].in[1] = in1;
7646     elements_[i].output = NULL;
7647   }
7648 }
7649
7650
7651 void TranscendentalCache::Clear() {
7652   for (int i = 0; i < kNumberOfCaches; i++) {
7653     if (caches_[i] != NULL) {
7654       delete caches_[i];
7655       caches_[i] = NULL;
7656     }
7657   }
7658 }
7659
7660
7661 void ExternalStringTable::CleanUp() {
7662   int last = 0;
7663   for (int i = 0; i < new_space_strings_.length(); ++i) {
7664     if (new_space_strings_[i] == heap_->the_hole_value()) {
7665       continue;
7666     }
7667     if (heap_->InNewSpace(new_space_strings_[i])) {
7668       new_space_strings_[last++] = new_space_strings_[i];
7669     } else {
7670       old_space_strings_.Add(new_space_strings_[i]);
7671     }
7672   }
7673   new_space_strings_.Rewind(last);
7674   new_space_strings_.Trim();
7675
7676   last = 0;
7677   for (int i = 0; i < old_space_strings_.length(); ++i) {
7678     if (old_space_strings_[i] == heap_->the_hole_value()) {
7679       continue;
7680     }
7681     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7682     old_space_strings_[last++] = old_space_strings_[i];
7683   }
7684   old_space_strings_.Rewind(last);
7685   old_space_strings_.Trim();
7686 #ifdef VERIFY_HEAP
7687   if (FLAG_verify_heap) {
7688     Verify();
7689   }
7690 #endif
7691 }
7692
7693
7694 void ExternalStringTable::TearDown() {
7695   new_space_strings_.Free();
7696   old_space_strings_.Free();
7697 }
7698
7699
7700 // Update all references.
7701 void ErrorObjectList::UpdateReferences() {
7702   for (int i = 0; i < list_.length(); i++) {
7703     HeapObject* object = HeapObject::cast(list_[i]);
7704     MapWord first_word = object->map_word();
7705     if (first_word.IsForwardingAddress()) {
7706       list_[i] = first_word.ToForwardingAddress();
7707     }
7708   }
7709 }
7710
7711
7712 // Unforwarded objects in new space are dead and removed from the list.
7713 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7714   if (list_.is_empty()) return;
7715   if (!nested_) {
7716     int write_index = 0;
7717     for (int i = 0; i < list_.length(); i++) {
7718       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7719       if (first_word.IsForwardingAddress()) {
7720         list_[write_index++] = first_word.ToForwardingAddress();
7721       }
7722     }
7723     list_.Rewind(write_index);
7724   } else {
7725     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7726     // objects in the list, just remove dead ones, as to not confuse the
7727     // loop in DeferredFormatStackTrace.
7728     for (int i = 0; i < list_.length(); i++) {
7729       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7730       list_[i] = first_word.IsForwardingAddress()
7731                      ? first_word.ToForwardingAddress()
7732                      : heap->the_hole_value();
7733     }
7734   }
7735 }
7736
7737
7738 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7739   // If formatting the stack trace causes a GC, this method will be
7740   // recursively called.  In that case, skip the recursive call, since
7741   // the loop modifies the list while iterating over it.
7742   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7743   nested_ = true;
7744   HandleScope scope(isolate);
7745   Handle<String> stack_key = isolate->factory()->stack_string();
7746   int write_index = 0;
7747   int budget = kBudgetPerGC;
7748   for (int i = 0; i < list_.length(); i++) {
7749     Object* object = list_[i];
7750     JSFunction* getter_fun;
7751
7752     { AssertNoAllocation assert;
7753       // Skip possible holes in the list.
7754       if (object->IsTheHole()) continue;
7755       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7756         list_[write_index++] = object;
7757         continue;
7758       }
7759
7760       // Check whether the stack property is backed by the original getter.
7761       LookupResult lookup(isolate);
7762       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7763       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7764       Object* callback = lookup.GetCallbackObject();
7765       if (!callback->IsAccessorPair()) continue;
7766       Object* getter_obj = AccessorPair::cast(callback)->getter();
7767       if (!getter_obj->IsJSFunction()) continue;
7768       getter_fun = JSFunction::cast(getter_obj);
7769       String* key = isolate->heap()->hidden_stack_trace_string();
7770       Object* value = getter_fun->GetHiddenProperty(key);
7771       if (key != value) continue;
7772     }
7773
7774     budget--;
7775     HandleScope scope(isolate);
7776     bool has_exception = false;
7777 #ifdef DEBUG
7778     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7779 #endif
7780     Handle<Object> object_handle(object, isolate);
7781     Handle<Object> getter_handle(getter_fun, isolate);
7782     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7783     ASSERT(*map == HeapObject::cast(*object_handle)->map());
7784     if (has_exception) {
7785       // Hit an exception (most likely a stack overflow).
7786       // Wrap up this pass and retry after another GC.
7787       isolate->clear_pending_exception();
7788       // We use the handle since calling the getter might have caused a GC.
7789       list_[write_index++] = *object_handle;
7790       budget = 0;
7791     }
7792   }
7793   list_.Rewind(write_index);
7794   list_.Trim();
7795   nested_ = false;
7796 }
7797
7798
7799 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7800   for (int i = 0; i < list_.length(); i++) {
7801     HeapObject* object = HeapObject::cast(list_[i]);
7802     if (!Marking::MarkBitFrom(object).Get()) {
7803       list_[i] = heap->the_hole_value();
7804     }
7805   }
7806 }
7807
7808
7809 void ErrorObjectList::TearDown() {
7810   list_.Free();
7811 }
7812
7813
7814 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7815   chunk->set_next_chunk(chunks_queued_for_free_);
7816   chunks_queued_for_free_ = chunk;
7817 }
7818
7819
7820 void Heap::FreeQueuedChunks() {
7821   if (chunks_queued_for_free_ == NULL) return;
7822   MemoryChunk* next;
7823   MemoryChunk* chunk;
7824   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7825     next = chunk->next_chunk();
7826     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7827
7828     if (chunk->owner()->identity() == LO_SPACE) {
7829       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7830       // If FromAnyPointerAddress encounters a slot that belongs to a large
7831       // chunk queued for deletion it will fail to find the chunk because
7832       // it try to perform a search in the list of pages owned by of the large
7833       // object space and queued chunks were detached from that list.
7834       // To work around this we split large chunk into normal kPageSize aligned
7835       // pieces and initialize size, owner and flags field of every piece.
7836       // If FromAnyPointerAddress encounters a slot that belongs to one of
7837       // these smaller pieces it will treat it as a slot on a normal Page.
7838       Address chunk_end = chunk->address() + chunk->size();
7839       MemoryChunk* inner = MemoryChunk::FromAddress(
7840           chunk->address() + Page::kPageSize);
7841       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7842       while (inner <= inner_last) {
7843         // Size of a large chunk is always a multiple of
7844         // OS::AllocateAlignment() so there is always
7845         // enough space for a fake MemoryChunk header.
7846         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7847         // Guard against overflow.
7848         if (area_end < inner->address()) area_end = chunk_end;
7849         inner->SetArea(inner->address(), area_end);
7850         inner->set_size(Page::kPageSize);
7851         inner->set_owner(lo_space());
7852         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7853         inner = MemoryChunk::FromAddress(
7854             inner->address() + Page::kPageSize);
7855       }
7856     }
7857   }
7858   isolate_->heap()->store_buffer()->Compact();
7859   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7860   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7861     next = chunk->next_chunk();
7862     isolate_->memory_allocator()->Free(chunk);
7863   }
7864   chunks_queued_for_free_ = NULL;
7865 }
7866
7867
7868 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7869   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7870   // Tag the page pointer to make it findable in the dump file.
7871   if (compacted) {
7872     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7873   } else {
7874     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7875   }
7876   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7877       reinterpret_cast<Address>(p);
7878   remembered_unmapped_pages_index_++;
7879   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7880 }
7881
7882
7883 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7884   memset(object_counts_, 0, sizeof(object_counts_));
7885   memset(object_sizes_, 0, sizeof(object_sizes_));
7886   if (clear_last_time_stats) {
7887     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7888     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7889   }
7890 }
7891
7892
7893 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7894
7895
7896 void Heap::CheckpointObjectStats() {
7897   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7898   Counters* counters = isolate()->counters();
7899 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7900   counters->count_of_##name()->Increment(                                      \
7901       static_cast<int>(object_counts_[name]));                                 \
7902   counters->count_of_##name()->Decrement(                                      \
7903       static_cast<int>(object_counts_last_time_[name]));                       \
7904   counters->size_of_##name()->Increment(                                       \
7905       static_cast<int>(object_sizes_[name]));                                  \
7906   counters->size_of_##name()->Decrement(                                       \
7907       static_cast<int>(object_sizes_last_time_[name]));
7908   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7909 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7910   int index;
7911 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7912   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7913   counters->count_of_CODE_TYPE_##name()->Increment(       \
7914       static_cast<int>(object_counts_[index]));           \
7915   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7916       static_cast<int>(object_counts_last_time_[index])); \
7917   counters->size_of_CODE_TYPE_##name()->Increment(        \
7918       static_cast<int>(object_sizes_[index]));            \
7919   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7920       static_cast<int>(object_sizes_last_time_[index]));
7921   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7922 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7923 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7924   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7925   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7926       static_cast<int>(object_counts_[index]));           \
7927   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7928       static_cast<int>(object_counts_last_time_[index])); \
7929   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7930       static_cast<int>(object_sizes_[index]));            \
7931   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7932       static_cast<int>(object_sizes_last_time_[index]));
7933   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7934 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7935
7936   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7937   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7938   ClearObjectStats();
7939 }
7940
7941
7942 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7943   if (FLAG_parallel_recompilation) {
7944     heap_->relocation_mutex_->Lock();
7945 #ifdef DEBUG
7946     heap_->relocation_mutex_locked_by_optimizer_thread_ =
7947         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7948 #endif  // DEBUG
7949   }
7950 }
7951
7952 } }  // namespace v8::internal