Merge remote branch 'origin/v0.6'
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
47 #include "snapshot.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "vm-state-inl.h"
51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
52 #include "regexp-macro-assembler.h"
53 #include "arm/regexp-macro-assembler-arm.h"
54 #endif
55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
56 #include "regexp-macro-assembler.h"
57 #include "mips/regexp-macro-assembler-mips.h"
58 #endif
59
60 namespace v8 {
61 namespace internal {
62
63
64 static Mutex* gc_initializer_mutex = OS::CreateMutex();
65
66
67 Heap::Heap()
68     : isolate_(NULL),
69 // semispace_size_ should be a power of 2 and old_generation_size_ should be
70 // a multiple of Page::kPageSize.
71 #if defined(ANDROID)
72 #define LUMP_OF_MEMORY (128 * KB)
73       code_range_size_(0),
74 #elif defined(V8_TARGET_ARCH_X64)
75 #define LUMP_OF_MEMORY (2 * MB)
76       code_range_size_(512*MB),
77 #else
78 #define LUMP_OF_MEMORY MB
79       code_range_size_(0),
80 #endif
81       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83       initial_semispace_size_(Page::kPageSize),
84       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
85       max_executable_size_(128l * LUMP_OF_MEMORY),
86
87 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
89 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size.
91       survived_since_last_expansion_(0),
92       sweep_generation_(0),
93       always_allocate_scope_depth_(0),
94       linear_allocation_scope_depth_(0),
95       contexts_disposed_(0),
96       scan_on_scavenge_pages_(0),
97       new_space_(this),
98       old_pointer_space_(NULL),
99       old_data_space_(NULL),
100       code_space_(NULL),
101       map_space_(NULL),
102       cell_space_(NULL),
103       lo_space_(NULL),
104       gc_state_(NOT_IN_GC),
105       gc_post_processing_depth_(0),
106       ms_count_(0),
107       gc_count_(0),
108       unflattened_strings_length_(0),
109 #ifdef DEBUG
110       allocation_allowed_(true),
111       allocation_timeout_(0),
112       disallow_allocation_failure_(false),
113       debug_utils_(NULL),
114 #endif  // DEBUG
115       new_space_high_promotion_mode_active_(false),
116       old_gen_promotion_limit_(kMinimumPromotionLimit),
117       old_gen_allocation_limit_(kMinimumAllocationLimit),
118       old_gen_limit_factor_(1),
119       size_of_old_gen_at_last_old_space_gc_(0),
120       external_allocation_limit_(0),
121       amount_of_external_allocated_memory_(0),
122       amount_of_external_allocated_memory_at_last_global_gc_(0),
123       old_gen_exhausted_(false),
124       store_buffer_rebuilder_(store_buffer()),
125       hidden_symbol_(NULL),
126       global_gc_prologue_callback_(NULL),
127       global_gc_epilogue_callback_(NULL),
128       gc_safe_size_of_old_object_(NULL),
129       total_regexp_code_generated_(0),
130       tracer_(NULL),
131       young_survivors_after_last_gc_(0),
132       high_survival_rate_period_length_(0),
133       survival_rate_(0),
134       previous_survival_rate_trend_(Heap::STABLE),
135       survival_rate_trend_(Heap::STABLE),
136       max_gc_pause_(0),
137       max_alive_after_gc_(0),
138       min_in_mutator_(kMaxInt),
139       alive_after_last_gc_(0),
140       last_gc_end_timestamp_(0.0),
141       store_buffer_(this),
142       marking_(this),
143       incremental_marking_(this),
144       number_idle_notifications_(0),
145       last_idle_notification_gc_count_(0),
146       last_idle_notification_gc_count_init_(false),
147       idle_notification_will_schedule_next_gc_(false),
148       mark_sweeps_since_idle_round_started_(0),
149       ms_count_at_last_idle_notification_(0),
150       gc_count_at_last_idle_gc_(0),
151       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
152       promotion_queue_(this),
153       configured_(false),
154       chunks_queued_for_free_(NULL) {
155   // Allow build-time customization of the max semispace size. Building
156   // V8 with snapshots and a non-default max semispace size is much
157   // easier if you can define it as part of the build environment.
158 #if defined(V8_MAX_SEMISPACE_SIZE)
159   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
160 #endif
161
162   intptr_t max_virtual = OS::MaxVirtualMemory();
163
164   if (max_virtual > 0) {
165     if (code_range_size_ > 0) {
166       // Reserve no more than 1/8 of the memory for the code range.
167       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
168     }
169   }
170
171   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
172   global_contexts_list_ = NULL;
173   mark_compact_collector_.heap_ = this;
174   external_string_table_.heap_ = this;
175 }
176
177
178 intptr_t Heap::Capacity() {
179   if (!HasBeenSetUp()) return 0;
180
181   return new_space_.Capacity() +
182       old_pointer_space_->Capacity() +
183       old_data_space_->Capacity() +
184       code_space_->Capacity() +
185       map_space_->Capacity() +
186       cell_space_->Capacity();
187 }
188
189
190 intptr_t Heap::CommittedMemory() {
191   if (!HasBeenSetUp()) return 0;
192
193   return new_space_.CommittedMemory() +
194       old_pointer_space_->CommittedMemory() +
195       old_data_space_->CommittedMemory() +
196       code_space_->CommittedMemory() +
197       map_space_->CommittedMemory() +
198       cell_space_->CommittedMemory() +
199       lo_space_->Size();
200 }
201
202 intptr_t Heap::CommittedMemoryExecutable() {
203   if (!HasBeenSetUp()) return 0;
204
205   return isolate()->memory_allocator()->SizeExecutable();
206 }
207
208
209 intptr_t Heap::Available() {
210   if (!HasBeenSetUp()) return 0;
211
212   return new_space_.Available() +
213       old_pointer_space_->Available() +
214       old_data_space_->Available() +
215       code_space_->Available() +
216       map_space_->Available() +
217       cell_space_->Available();
218 }
219
220
221 bool Heap::HasBeenSetUp() {
222   return old_pointer_space_ != NULL &&
223          old_data_space_ != NULL &&
224          code_space_ != NULL &&
225          map_space_ != NULL &&
226          cell_space_ != NULL &&
227          lo_space_ != NULL;
228 }
229
230
231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
232   if (IntrusiveMarking::IsMarked(object)) {
233     return IntrusiveMarking::SizeOfMarkedObject(object);
234   }
235   return object->SizeFromMap(object->map());
236 }
237
238
239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
240   // Is global GC requested?
241   if (space != NEW_SPACE || FLAG_gc_global) {
242     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
243     return MARK_COMPACTOR;
244   }
245
246   // Is enough data promoted to justify a global GC?
247   if (OldGenerationPromotionLimitReached()) {
248     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
249     return MARK_COMPACTOR;
250   }
251
252   // Have allocation in OLD and LO failed?
253   if (old_gen_exhausted_) {
254     isolate_->counters()->
255         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
256     return MARK_COMPACTOR;
257   }
258
259   // Is there enough space left in OLD to guarantee that a scavenge can
260   // succeed?
261   //
262   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
263   // for object promotion. It counts only the bytes that the memory
264   // allocator has not yet allocated from the OS and assigned to any space,
265   // and does not count available bytes already in the old space or code
266   // space.  Undercounting is safe---we may get an unrequested full GC when
267   // a scavenge would have succeeded.
268   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
269     isolate_->counters()->
270         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
271     return MARK_COMPACTOR;
272   }
273
274   // Default
275   return SCAVENGER;
276 }
277
278
279 // TODO(1238405): Combine the infrastructure for --heap-stats and
280 // --log-gc to avoid the complicated preprocessor and flag testing.
281 void Heap::ReportStatisticsBeforeGC() {
282   // Heap::ReportHeapStatistics will also log NewSpace statistics when
283   // compiled --log-gc is set.  The following logic is used to avoid
284   // double logging.
285 #ifdef DEBUG
286   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
287   if (FLAG_heap_stats) {
288     ReportHeapStatistics("Before GC");
289   } else if (FLAG_log_gc) {
290     new_space_.ReportStatistics();
291   }
292   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
293 #else
294   if (FLAG_log_gc) {
295     new_space_.CollectStatistics();
296     new_space_.ReportStatistics();
297     new_space_.ClearHistograms();
298   }
299 #endif  // DEBUG
300 }
301
302
303 void Heap::PrintShortHeapStatistics() {
304   if (!FLAG_trace_gc_verbose) return;
305   PrintF("Memory allocator,   used: %8" V8_PTR_PREFIX "d"
306              ", available: %8" V8_PTR_PREFIX "d\n",
307          isolate_->memory_allocator()->Size(),
308          isolate_->memory_allocator()->Available());
309   PrintF("New space,          used: %8" V8_PTR_PREFIX "d"
310              ", available: %8" V8_PTR_PREFIX "d\n",
311          Heap::new_space_.Size(),
312          new_space_.Available());
313   PrintF("Old pointers,       used: %8" V8_PTR_PREFIX "d"
314              ", available: %8" V8_PTR_PREFIX "d"
315              ", waste: %8" V8_PTR_PREFIX "d\n",
316          old_pointer_space_->Size(),
317          old_pointer_space_->Available(),
318          old_pointer_space_->Waste());
319   PrintF("Old data space,     used: %8" V8_PTR_PREFIX "d"
320              ", available: %8" V8_PTR_PREFIX "d"
321              ", waste: %8" V8_PTR_PREFIX "d\n",
322          old_data_space_->Size(),
323          old_data_space_->Available(),
324          old_data_space_->Waste());
325   PrintF("Code space,         used: %8" V8_PTR_PREFIX "d"
326              ", available: %8" V8_PTR_PREFIX "d"
327              ", waste: %8" V8_PTR_PREFIX "d\n",
328          code_space_->Size(),
329          code_space_->Available(),
330          code_space_->Waste());
331   PrintF("Map space,          used: %8" V8_PTR_PREFIX "d"
332              ", available: %8" V8_PTR_PREFIX "d"
333              ", waste: %8" V8_PTR_PREFIX "d\n",
334          map_space_->Size(),
335          map_space_->Available(),
336          map_space_->Waste());
337   PrintF("Cell space,         used: %8" V8_PTR_PREFIX "d"
338              ", available: %8" V8_PTR_PREFIX "d"
339              ", waste: %8" V8_PTR_PREFIX "d\n",
340          cell_space_->Size(),
341          cell_space_->Available(),
342          cell_space_->Waste());
343   PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
344              ", available: %8" V8_PTR_PREFIX "d\n",
345          lo_space_->Size(),
346          lo_space_->Available());
347 }
348
349
350 // TODO(1238405): Combine the infrastructure for --heap-stats and
351 // --log-gc to avoid the complicated preprocessor and flag testing.
352 void Heap::ReportStatisticsAfterGC() {
353   // Similar to the before GC, we use some complicated logic to ensure that
354   // NewSpace statistics are logged exactly once when --log-gc is turned on.
355 #if defined(DEBUG)
356   if (FLAG_heap_stats) {
357     new_space_.CollectStatistics();
358     ReportHeapStatistics("After GC");
359   } else if (FLAG_log_gc) {
360     new_space_.ReportStatistics();
361   }
362 #else
363   if (FLAG_log_gc) new_space_.ReportStatistics();
364 #endif  // DEBUG
365 }
366
367
368 void Heap::GarbageCollectionPrologue() {
369   isolate_->transcendental_cache()->Clear();
370   ClearJSFunctionResultCaches();
371   gc_count_++;
372   unflattened_strings_length_ = 0;
373 #ifdef DEBUG
374   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
375   allow_allocation(false);
376
377   if (FLAG_verify_heap) {
378     Verify();
379   }
380
381   if (FLAG_gc_verbose) Print();
382 #endif  // DEBUG
383
384 #if defined(DEBUG)
385   ReportStatisticsBeforeGC();
386 #endif  // DEBUG
387
388   LiveObjectList::GCPrologue();
389   store_buffer()->GCPrologue();
390 }
391
392 intptr_t Heap::SizeOfObjects() {
393   intptr_t total = 0;
394   AllSpaces spaces;
395   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
396     total += space->SizeOfObjects();
397   }
398   return total;
399 }
400
401 void Heap::GarbageCollectionEpilogue() {
402   store_buffer()->GCEpilogue();
403   LiveObjectList::GCEpilogue();
404 #ifdef DEBUG
405   allow_allocation(true);
406   ZapFromSpace();
407
408   if (FLAG_verify_heap) {
409     Verify();
410   }
411
412   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
413   if (FLAG_print_handles) PrintHandles();
414   if (FLAG_gc_verbose) Print();
415   if (FLAG_code_stats) ReportCodeStatistics("After GC");
416 #endif
417
418   isolate_->counters()->alive_after_last_gc()->Set(
419       static_cast<int>(SizeOfObjects()));
420
421   isolate_->counters()->symbol_table_capacity()->Set(
422       symbol_table()->Capacity());
423   isolate_->counters()->number_of_symbols()->Set(
424       symbol_table()->NumberOfElements());
425 #if defined(DEBUG)
426   ReportStatisticsAfterGC();
427 #endif  // DEBUG
428 #ifdef ENABLE_DEBUGGER_SUPPORT
429   isolate_->debug()->AfterGarbageCollection();
430 #endif  // ENABLE_DEBUGGER_SUPPORT
431 }
432
433
434 void Heap::CollectAllGarbage(int flags) {
435   // Since we are ignoring the return value, the exact choice of space does
436   // not matter, so long as we do not specify NEW_SPACE, which would not
437   // cause a full GC.
438   mark_compact_collector_.SetFlags(flags);
439   CollectGarbage(OLD_POINTER_SPACE);
440   mark_compact_collector_.SetFlags(kNoGCFlags);
441 }
442
443
444 void Heap::CollectAllAvailableGarbage() {
445   // Since we are ignoring the return value, the exact choice of space does
446   // not matter, so long as we do not specify NEW_SPACE, which would not
447   // cause a full GC.
448   // Major GC would invoke weak handle callbacks on weakly reachable
449   // handles, but won't collect weakly reachable objects until next
450   // major GC.  Therefore if we collect aggressively and weak handle callback
451   // has been invoked, we rerun major GC to release objects which become
452   // garbage.
453   // Note: as weak callbacks can execute arbitrary code, we cannot
454   // hope that eventually there will be no weak callbacks invocations.
455   // Therefore stop recollecting after several attempts.
456   mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
457   isolate_->compilation_cache()->Clear();
458   const int kMaxNumberOfAttempts = 7;
459   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
460     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
461       break;
462     }
463   }
464   mark_compact_collector()->SetFlags(kNoGCFlags);
465   new_space_.Shrink();
466   UncommitFromSpace();
467   Shrink();
468   incremental_marking()->UncommitMarkingDeque();
469 }
470
471
472 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
473   // The VM is in the GC state until exiting this function.
474   VMState state(isolate_, GC);
475
476 #ifdef DEBUG
477   // Reset the allocation timeout to the GC interval, but make sure to
478   // allow at least a few allocations after a collection. The reason
479   // for this is that we have a lot of allocation sequences and we
480   // assume that a garbage collection will allow the subsequent
481   // allocation attempts to go through.
482   allocation_timeout_ = Max(6, FLAG_gc_interval);
483 #endif
484
485   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
486     if (FLAG_trace_incremental_marking) {
487       PrintF("[IncrementalMarking] Scavenge during marking.\n");
488     }
489   }
490
491   if (collector == MARK_COMPACTOR &&
492       !mark_compact_collector()->PreciseSweepingRequired() &&
493       !incremental_marking()->IsStopped() &&
494       !incremental_marking()->should_hurry() &&
495       FLAG_incremental_marking_steps) {
496     if (FLAG_trace_incremental_marking) {
497       PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
498     }
499     collector = SCAVENGER;
500   }
501
502   bool next_gc_likely_to_collect_more = false;
503
504   { GCTracer tracer(this);
505     GarbageCollectionPrologue();
506     // The GC count was incremented in the prologue.  Tell the tracer about
507     // it.
508     tracer.set_gc_count(gc_count_);
509
510     // Tell the tracer which collector we've selected.
511     tracer.set_collector(collector);
512
513     HistogramTimer* rate = (collector == SCAVENGER)
514         ? isolate_->counters()->gc_scavenger()
515         : isolate_->counters()->gc_compactor();
516     rate->Start();
517     next_gc_likely_to_collect_more =
518         PerformGarbageCollection(collector, &tracer);
519     rate->Stop();
520
521     GarbageCollectionEpilogue();
522   }
523
524   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
525   if (incremental_marking()->IsStopped()) {
526     if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
527       incremental_marking()->Start();
528     }
529   }
530
531   return next_gc_likely_to_collect_more;
532 }
533
534
535 void Heap::PerformScavenge() {
536   GCTracer tracer(this);
537   if (incremental_marking()->IsStopped()) {
538     PerformGarbageCollection(SCAVENGER, &tracer);
539   } else {
540     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
541   }
542 }
543
544
545 #ifdef DEBUG
546 // Helper class for verifying the symbol table.
547 class SymbolTableVerifier : public ObjectVisitor {
548  public:
549   void VisitPointers(Object** start, Object** end) {
550     // Visit all HeapObject pointers in [start, end).
551     for (Object** p = start; p < end; p++) {
552       if ((*p)->IsHeapObject()) {
553         // Check that the symbol is actually a symbol.
554         ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
555       }
556     }
557   }
558 };
559 #endif  // DEBUG
560
561
562 static void VerifySymbolTable() {
563 #ifdef DEBUG
564   SymbolTableVerifier verifier;
565   HEAP->symbol_table()->IterateElements(&verifier);
566 #endif  // DEBUG
567 }
568
569
570 void Heap::ReserveSpace(
571     int new_space_size,
572     int pointer_space_size,
573     int data_space_size,
574     int code_space_size,
575     int map_space_size,
576     int cell_space_size,
577     int large_object_size) {
578   NewSpace* new_space = Heap::new_space();
579   PagedSpace* old_pointer_space = Heap::old_pointer_space();
580   PagedSpace* old_data_space = Heap::old_data_space();
581   PagedSpace* code_space = Heap::code_space();
582   PagedSpace* map_space = Heap::map_space();
583   PagedSpace* cell_space = Heap::cell_space();
584   LargeObjectSpace* lo_space = Heap::lo_space();
585   bool gc_performed = true;
586   while (gc_performed) {
587     gc_performed = false;
588     if (!new_space->ReserveSpace(new_space_size)) {
589       Heap::CollectGarbage(NEW_SPACE);
590       gc_performed = true;
591     }
592     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
593       Heap::CollectGarbage(OLD_POINTER_SPACE);
594       gc_performed = true;
595     }
596     if (!(old_data_space->ReserveSpace(data_space_size))) {
597       Heap::CollectGarbage(OLD_DATA_SPACE);
598       gc_performed = true;
599     }
600     if (!(code_space->ReserveSpace(code_space_size))) {
601       Heap::CollectGarbage(CODE_SPACE);
602       gc_performed = true;
603     }
604     if (!(map_space->ReserveSpace(map_space_size))) {
605       Heap::CollectGarbage(MAP_SPACE);
606       gc_performed = true;
607     }
608     if (!(cell_space->ReserveSpace(cell_space_size))) {
609       Heap::CollectGarbage(CELL_SPACE);
610       gc_performed = true;
611     }
612     // We add a slack-factor of 2 in order to have space for a series of
613     // large-object allocations that are only just larger than the page size.
614     large_object_size *= 2;
615     // The ReserveSpace method on the large object space checks how much
616     // we can expand the old generation.  This includes expansion caused by
617     // allocation in the other spaces.
618     large_object_size += cell_space_size + map_space_size + code_space_size +
619         data_space_size + pointer_space_size;
620     if (!(lo_space->ReserveSpace(large_object_size))) {
621       Heap::CollectGarbage(LO_SPACE);
622       gc_performed = true;
623     }
624   }
625 }
626
627
628 void Heap::EnsureFromSpaceIsCommitted() {
629   if (new_space_.CommitFromSpaceIfNeeded()) return;
630
631   // Committing memory to from space failed.
632   // Try shrinking and try again.
633   Shrink();
634   if (new_space_.CommitFromSpaceIfNeeded()) return;
635
636   // Committing memory to from space failed again.
637   // Memory is exhausted and we will die.
638   V8::FatalProcessOutOfMemory("Committing semi space failed.");
639 }
640
641
642 void Heap::ClearJSFunctionResultCaches() {
643   if (isolate_->bootstrapper()->IsActive()) return;
644
645   Object* context = global_contexts_list_;
646   while (!context->IsUndefined()) {
647     // Get the caches for this context. GC can happen when the context
648     // is not fully initialized, so the caches can be undefined.
649     Object* caches_or_undefined =
650         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
651     if (!caches_or_undefined->IsUndefined()) {
652       FixedArray* caches = FixedArray::cast(caches_or_undefined);
653       // Clear the caches:
654       int length = caches->length();
655       for (int i = 0; i < length; i++) {
656         JSFunctionResultCache::cast(caches->get(i))->Clear();
657       }
658     }
659     // Get the next context:
660     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
661   }
662 }
663
664
665
666 void Heap::ClearNormalizedMapCaches() {
667   if (isolate_->bootstrapper()->IsActive() &&
668       !incremental_marking()->IsMarking()) {
669     return;
670   }
671
672   Object* context = global_contexts_list_;
673   while (!context->IsUndefined()) {
674     // GC can happen when the context is not fully initialized,
675     // so the cache can be undefined.
676     Object* cache =
677         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
678     if (!cache->IsUndefined()) {
679       NormalizedMapCache::cast(cache)->Clear();
680     }
681     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
682   }
683 }
684
685
686 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
687   double survival_rate =
688       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
689       start_new_space_size;
690
691   if (survival_rate > kYoungSurvivalRateThreshold) {
692     high_survival_rate_period_length_++;
693   } else {
694     high_survival_rate_period_length_ = 0;
695   }
696
697   double survival_rate_diff = survival_rate_ - survival_rate;
698
699   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
700     set_survival_rate_trend(DECREASING);
701   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
702     set_survival_rate_trend(INCREASING);
703   } else {
704     set_survival_rate_trend(STABLE);
705   }
706
707   survival_rate_ = survival_rate;
708 }
709
710 bool Heap::PerformGarbageCollection(GarbageCollector collector,
711                                     GCTracer* tracer) {
712   bool next_gc_likely_to_collect_more = false;
713
714   if (collector != SCAVENGER) {
715     PROFILE(isolate_, CodeMovingGCEvent());
716   }
717
718   if (FLAG_verify_heap) {
719     VerifySymbolTable();
720   }
721   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
722     ASSERT(!allocation_allowed_);
723     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
724     global_gc_prologue_callback_();
725   }
726
727   GCType gc_type =
728       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
729
730   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
731     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
732       gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
733     }
734   }
735
736   EnsureFromSpaceIsCommitted();
737
738   int start_new_space_size = Heap::new_space()->SizeAsInt();
739
740   if (IsHighSurvivalRate()) {
741     // We speed up the incremental marker if it is running so that it
742     // does not fall behind the rate of promotion, which would cause a
743     // constantly growing old space.
744     incremental_marking()->NotifyOfHighPromotionRate();
745   }
746
747   if (collector == MARK_COMPACTOR) {
748     // Perform mark-sweep with optional compaction.
749     MarkCompact(tracer);
750     sweep_generation_++;
751     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
752         IsStableOrIncreasingSurvivalTrend();
753
754     UpdateSurvivalRateTrend(start_new_space_size);
755
756     if (!new_space_high_promotion_mode_active_ &&
757         new_space_.Capacity() == new_space_.MaximumCapacity() &&
758         IsStableOrIncreasingSurvivalTrend() &&
759         IsHighSurvivalRate()) {
760       // Stable high survival rates even though young generation is at
761       // maximum capacity indicates that most objects will be promoted.
762       // To decrease scavenger pauses and final mark-sweep pauses, we
763       // have to limit maximal capacity of the young generation.
764       new_space_high_promotion_mode_active_ = true;
765       if (FLAG_trace_gc) {
766         PrintF("Limited new space size due to high promotion rate: %d MB\n",
767                new_space_.InitialCapacity() / MB);
768       }
769     } else if (new_space_high_promotion_mode_active_ &&
770         IsDecreasingSurvivalTrend() &&
771         !IsHighSurvivalRate()) {
772       // Decreasing low survival rates might indicate that the above high
773       // promotion mode is over and we should allow the young generation
774       // to grow again.
775       new_space_high_promotion_mode_active_ = false;
776       if (FLAG_trace_gc) {
777         PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
778                new_space_.MaximumCapacity() / MB);
779       }
780     }
781
782     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
783
784     if (high_survival_rate_during_scavenges &&
785         IsStableOrIncreasingSurvivalTrend()) {
786       // Stable high survival rates of young objects both during partial and
787       // full collection indicate that mutator is either building or modifying
788       // a structure with a long lifetime.
789       // In this case we aggressively raise old generation memory limits to
790       // postpone subsequent mark-sweep collection and thus trade memory
791       // space for the mutation speed.
792       old_gen_limit_factor_ = 2;
793     } else {
794       old_gen_limit_factor_ = 1;
795     }
796
797     old_gen_promotion_limit_ =
798         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
799     old_gen_allocation_limit_ =
800         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
801
802     old_gen_exhausted_ = false;
803   } else {
804     tracer_ = tracer;
805     Scavenge();
806     tracer_ = NULL;
807
808     UpdateSurvivalRateTrend(start_new_space_size);
809   }
810
811   if (new_space_high_promotion_mode_active_ &&
812       new_space_.Capacity() > new_space_.InitialCapacity()) {
813     new_space_.Shrink();
814   }
815
816   isolate_->counters()->objs_since_last_young()->Set(0);
817
818   gc_post_processing_depth_++;
819   { DisableAssertNoAllocation allow_allocation;
820     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
821     next_gc_likely_to_collect_more =
822         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
823   }
824   gc_post_processing_depth_--;
825
826   // Update relocatables.
827   Relocatable::PostGarbageCollectionProcessing();
828
829   if (collector == MARK_COMPACTOR) {
830     // Register the amount of external allocated memory.
831     amount_of_external_allocated_memory_at_last_global_gc_ =
832         amount_of_external_allocated_memory_;
833   }
834
835   GCCallbackFlags callback_flags = kNoGCCallbackFlags;
836   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
837     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
838       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
839     }
840   }
841
842   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
843     ASSERT(!allocation_allowed_);
844     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
845     global_gc_epilogue_callback_();
846   }
847   if (FLAG_verify_heap) {
848     VerifySymbolTable();
849   }
850
851   return next_gc_likely_to_collect_more;
852 }
853
854
855 void Heap::MarkCompact(GCTracer* tracer) {
856   gc_state_ = MARK_COMPACT;
857   LOG(isolate_, ResourceEvent("markcompact", "begin"));
858
859   mark_compact_collector_.Prepare(tracer);
860
861   ms_count_++;
862   tracer->set_full_gc_count(ms_count_);
863
864   MarkCompactPrologue();
865
866   mark_compact_collector_.CollectGarbage();
867
868   LOG(isolate_, ResourceEvent("markcompact", "end"));
869
870   gc_state_ = NOT_IN_GC;
871
872   isolate_->counters()->objs_since_last_full()->Set(0);
873
874   contexts_disposed_ = 0;
875 }
876
877
878 void Heap::MarkCompactPrologue() {
879   // At any old GC clear the keyed lookup cache to enable collection of unused
880   // maps.
881   isolate_->keyed_lookup_cache()->Clear();
882   isolate_->context_slot_cache()->Clear();
883   isolate_->descriptor_lookup_cache()->Clear();
884   StringSplitCache::Clear(string_split_cache());
885
886   isolate_->compilation_cache()->MarkCompactPrologue();
887
888   CompletelyClearInstanceofCache();
889
890   // TODO(1605) select heuristic for flushing NumberString cache with
891   // FlushNumberStringCache
892   if (FLAG_cleanup_code_caches_at_gc) {
893     polymorphic_code_cache()->set_cache(undefined_value());
894   }
895
896   ClearNormalizedMapCaches();
897 }
898
899
900 Object* Heap::FindCodeObject(Address a) {
901   return isolate()->inner_pointer_to_code_cache()->
902       GcSafeFindCodeForInnerPointer(a);
903 }
904
905
906 // Helper class for copying HeapObjects
907 class ScavengeVisitor: public ObjectVisitor {
908  public:
909   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
910
911   void VisitPointer(Object** p) { ScavengePointer(p); }
912
913   void VisitPointers(Object** start, Object** end) {
914     // Copy all HeapObject pointers in [start, end)
915     for (Object** p = start; p < end; p++) ScavengePointer(p);
916   }
917
918  private:
919   void ScavengePointer(Object** p) {
920     Object* object = *p;
921     if (!heap_->InNewSpace(object)) return;
922     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
923                          reinterpret_cast<HeapObject*>(object));
924   }
925
926   Heap* heap_;
927 };
928
929
930 #ifdef DEBUG
931 // Visitor class to verify pointers in code or data space do not point into
932 // new space.
933 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
934  public:
935   void VisitPointers(Object** start, Object**end) {
936     for (Object** current = start; current < end; current++) {
937       if ((*current)->IsHeapObject()) {
938         ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
939       }
940     }
941   }
942 };
943
944
945 static void VerifyNonPointerSpacePointers() {
946   // Verify that there are no pointers to new space in spaces where we
947   // do not expect them.
948   VerifyNonPointerSpacePointersVisitor v;
949   HeapObjectIterator code_it(HEAP->code_space());
950   for (HeapObject* object = code_it.Next();
951        object != NULL; object = code_it.Next())
952     object->Iterate(&v);
953
954   // The old data space was normally swept conservatively so that the iterator
955   // doesn't work, so we normally skip the next bit.
956   if (!HEAP->old_data_space()->was_swept_conservatively()) {
957     HeapObjectIterator data_it(HEAP->old_data_space());
958     for (HeapObject* object = data_it.Next();
959          object != NULL; object = data_it.Next())
960       object->Iterate(&v);
961   }
962 }
963 #endif
964
965
966 void Heap::CheckNewSpaceExpansionCriteria() {
967   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
968       survived_since_last_expansion_ > new_space_.Capacity() &&
969       !new_space_high_promotion_mode_active_) {
970     // Grow the size of new space if there is room to grow, enough data
971     // has survived scavenge since the last expansion and we are not in
972     // high promotion mode.
973     new_space_.Grow();
974     survived_since_last_expansion_ = 0;
975   }
976 }
977
978
979 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
980   return heap->InNewSpace(*p) &&
981       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
982 }
983
984
985 void Heap::ScavengeStoreBufferCallback(
986     Heap* heap,
987     MemoryChunk* page,
988     StoreBufferEvent event) {
989   heap->store_buffer_rebuilder_.Callback(page, event);
990 }
991
992
993 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
994   if (event == kStoreBufferStartScanningPagesEvent) {
995     start_of_current_page_ = NULL;
996     current_page_ = NULL;
997   } else if (event == kStoreBufferScanningPageEvent) {
998     if (current_page_ != NULL) {
999       // If this page already overflowed the store buffer during this iteration.
1000       if (current_page_->scan_on_scavenge()) {
1001         // Then we should wipe out the entries that have been added for it.
1002         store_buffer_->SetTop(start_of_current_page_);
1003       } else if (store_buffer_->Top() - start_of_current_page_ >=
1004                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1005         // Did we find too many pointers in the previous page?  The heuristic is
1006         // that no page can take more then 1/5 the remaining slots in the store
1007         // buffer.
1008         current_page_->set_scan_on_scavenge(true);
1009         store_buffer_->SetTop(start_of_current_page_);
1010       } else {
1011         // In this case the page we scanned took a reasonable number of slots in
1012         // the store buffer.  It has now been rehabilitated and is no longer
1013         // marked scan_on_scavenge.
1014         ASSERT(!current_page_->scan_on_scavenge());
1015       }
1016     }
1017     start_of_current_page_ = store_buffer_->Top();
1018     current_page_ = page;
1019   } else if (event == kStoreBufferFullEvent) {
1020     // The current page overflowed the store buffer again.  Wipe out its entries
1021     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1022     // several times while scanning.
1023     if (current_page_ == NULL) {
1024       // Store Buffer overflowed while scanning promoted objects.  These are not
1025       // in any particular page, though they are likely to be clustered by the
1026       // allocation routines.
1027       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1028     } else {
1029       // Store Buffer overflowed while scanning a particular old space page for
1030       // pointers to new space.
1031       ASSERT(current_page_ == page);
1032       ASSERT(page != NULL);
1033       current_page_->set_scan_on_scavenge(true);
1034       ASSERT(start_of_current_page_ != store_buffer_->Top());
1035       store_buffer_->SetTop(start_of_current_page_);
1036     }
1037   } else {
1038     UNREACHABLE();
1039   }
1040 }
1041
1042
1043 void PromotionQueue::Initialize() {
1044   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1045   // entries (where each is a pair of intptr_t). This allows us to simplify
1046   // the test fpr when to switch pages.
1047   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1048          == 0);
1049   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1050   front_ = rear_ =
1051       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1052   emergency_stack_ = NULL;
1053   guard_ = false;
1054 }
1055
1056
1057 void PromotionQueue::RelocateQueueHead() {
1058   ASSERT(emergency_stack_ == NULL);
1059
1060   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1061   intptr_t* head_start = rear_;
1062   intptr_t* head_end =
1063       Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
1064
1065   int entries_count =
1066       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1067
1068   emergency_stack_ = new List<Entry>(2 * entries_count);
1069
1070   while (head_start != head_end) {
1071     int size = static_cast<int>(*(head_start++));
1072     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1073     emergency_stack_->Add(Entry(obj, size));
1074   }
1075   rear_ = head_end;
1076 }
1077
1078
1079 void Heap::Scavenge() {
1080 #ifdef DEBUG
1081   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1082 #endif
1083
1084   gc_state_ = SCAVENGE;
1085
1086   // Implements Cheney's copying algorithm
1087   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1088
1089   // Clear descriptor cache.
1090   isolate_->descriptor_lookup_cache()->Clear();
1091
1092   // Used for updating survived_since_last_expansion_ at function end.
1093   intptr_t survived_watermark = PromotedSpaceSize();
1094
1095   CheckNewSpaceExpansionCriteria();
1096
1097   SelectScavengingVisitorsTable();
1098
1099   incremental_marking()->PrepareForScavenge();
1100
1101   AdvanceSweepers(static_cast<int>(new_space_.Size()));
1102
1103   // Flip the semispaces.  After flipping, to space is empty, from space has
1104   // live objects.
1105   new_space_.Flip();
1106   new_space_.ResetAllocationInfo();
1107
1108   // We need to sweep newly copied objects which can be either in the
1109   // to space or promoted to the old generation.  For to-space
1110   // objects, we treat the bottom of the to space as a queue.  Newly
1111   // copied and unswept objects lie between a 'front' mark and the
1112   // allocation pointer.
1113   //
1114   // Promoted objects can go into various old-generation spaces, and
1115   // can be allocated internally in the spaces (from the free list).
1116   // We treat the top of the to space as a queue of addresses of
1117   // promoted objects.  The addresses of newly promoted and unswept
1118   // objects lie between a 'front' mark and a 'rear' mark that is
1119   // updated as a side effect of promoting an object.
1120   //
1121   // There is guaranteed to be enough room at the top of the to space
1122   // for the addresses of promoted objects: every object promoted
1123   // frees up its size in bytes from the top of the new space, and
1124   // objects are at least one pointer in size.
1125   Address new_space_front = new_space_.ToSpaceStart();
1126   promotion_queue_.Initialize();
1127
1128 #ifdef DEBUG
1129   store_buffer()->Clean();
1130 #endif
1131
1132   ScavengeVisitor scavenge_visitor(this);
1133   // Copy roots.
1134   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1135
1136   // Copy objects reachable from the old generation.
1137   {
1138     StoreBufferRebuildScope scope(this,
1139                                   store_buffer(),
1140                                   &ScavengeStoreBufferCallback);
1141     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1142   }
1143
1144   // Copy objects reachable from cells by scavenging cell values directly.
1145   HeapObjectIterator cell_iterator(cell_space_);
1146   for (HeapObject* cell = cell_iterator.Next();
1147        cell != NULL; cell = cell_iterator.Next()) {
1148     if (cell->IsJSGlobalPropertyCell()) {
1149       Address value_address =
1150           reinterpret_cast<Address>(cell) +
1151           (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1152       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1153     }
1154   }
1155
1156   // Scavenge object reachable from the global contexts list directly.
1157   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1158
1159   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1160   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1161       &IsUnscavengedHeapObject);
1162   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1163       &scavenge_visitor);
1164   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1165
1166   UpdateNewSpaceReferencesInExternalStringTable(
1167       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1168
1169   promotion_queue_.Destroy();
1170
1171   LiveObjectList::UpdateReferencesForScavengeGC();
1172   isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1173   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1174
1175   ASSERT(new_space_front == new_space_.top());
1176
1177   // Set age mark.
1178   new_space_.set_age_mark(new_space_.top());
1179
1180   new_space_.LowerInlineAllocationLimit(
1181       new_space_.inline_allocation_limit_step());
1182
1183   // Update how much has survived scavenge.
1184   IncrementYoungSurvivorsCounter(static_cast<int>(
1185       (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1186
1187   LOG(isolate_, ResourceEvent("scavenge", "end"));
1188
1189   gc_state_ = NOT_IN_GC;
1190
1191   scavenges_since_last_idle_round_++;
1192 }
1193
1194
1195 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1196                                                                 Object** p) {
1197   MapWord first_word = HeapObject::cast(*p)->map_word();
1198
1199   if (!first_word.IsForwardingAddress()) {
1200     // Unreachable external string can be finalized.
1201     heap->FinalizeExternalString(String::cast(*p));
1202     return NULL;
1203   }
1204
1205   // String is still reachable.
1206   return String::cast(first_word.ToForwardingAddress());
1207 }
1208
1209
1210 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1211     ExternalStringTableUpdaterCallback updater_func) {
1212   if (FLAG_verify_heap) {
1213     external_string_table_.Verify();
1214   }
1215
1216   if (external_string_table_.new_space_strings_.is_empty()) return;
1217
1218   Object** start = &external_string_table_.new_space_strings_[0];
1219   Object** end = start + external_string_table_.new_space_strings_.length();
1220   Object** last = start;
1221
1222   for (Object** p = start; p < end; ++p) {
1223     ASSERT(InFromSpace(*p));
1224     String* target = updater_func(this, p);
1225
1226     if (target == NULL) continue;
1227
1228     ASSERT(target->IsExternalString());
1229
1230     if (InNewSpace(target)) {
1231       // String is still in new space.  Update the table entry.
1232       *last = target;
1233       ++last;
1234     } else {
1235       // String got promoted.  Move it to the old string list.
1236       external_string_table_.AddOldString(target);
1237     }
1238   }
1239
1240   ASSERT(last <= end);
1241   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1242 }
1243
1244
1245 void Heap::UpdateReferencesInExternalStringTable(
1246     ExternalStringTableUpdaterCallback updater_func) {
1247
1248   // Update old space string references.
1249   if (external_string_table_.old_space_strings_.length() > 0) {
1250     Object** start = &external_string_table_.old_space_strings_[0];
1251     Object** end = start + external_string_table_.old_space_strings_.length();
1252     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1253   }
1254
1255   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1256 }
1257
1258
1259 static Object* ProcessFunctionWeakReferences(Heap* heap,
1260                                              Object* function,
1261                                              WeakObjectRetainer* retainer) {
1262   Object* undefined = heap->undefined_value();
1263   Object* head = undefined;
1264   JSFunction* tail = NULL;
1265   Object* candidate = function;
1266   while (candidate != undefined) {
1267     // Check whether to keep the candidate in the list.
1268     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1269     Object* retain = retainer->RetainAs(candidate);
1270     if (retain != NULL) {
1271       if (head == undefined) {
1272         // First element in the list.
1273         head = retain;
1274       } else {
1275         // Subsequent elements in the list.
1276         ASSERT(tail != NULL);
1277         tail->set_next_function_link(retain);
1278       }
1279       // Retained function is new tail.
1280       candidate_function = reinterpret_cast<JSFunction*>(retain);
1281       tail = candidate_function;
1282
1283       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1284
1285       if (retain == undefined) break;
1286     }
1287
1288     // Move to next element in the list.
1289     candidate = candidate_function->next_function_link();
1290   }
1291
1292   // Terminate the list if there is one or more elements.
1293   if (tail != NULL) {
1294     tail->set_next_function_link(undefined);
1295   }
1296
1297   return head;
1298 }
1299
1300
1301 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1302   Object* undefined = undefined_value();
1303   Object* head = undefined;
1304   Context* tail = NULL;
1305   Object* candidate = global_contexts_list_;
1306   while (candidate != undefined) {
1307     // Check whether to keep the candidate in the list.
1308     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1309     Object* retain = retainer->RetainAs(candidate);
1310     if (retain != NULL) {
1311       if (head == undefined) {
1312         // First element in the list.
1313         head = retain;
1314       } else {
1315         // Subsequent elements in the list.
1316         ASSERT(tail != NULL);
1317         tail->set_unchecked(this,
1318                             Context::NEXT_CONTEXT_LINK,
1319                             retain,
1320                             UPDATE_WRITE_BARRIER);
1321       }
1322       // Retained context is new tail.
1323       candidate_context = reinterpret_cast<Context*>(retain);
1324       tail = candidate_context;
1325
1326       if (retain == undefined) break;
1327
1328       // Process the weak list of optimized functions for the context.
1329       Object* function_list_head =
1330           ProcessFunctionWeakReferences(
1331               this,
1332               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1333               retainer);
1334       candidate_context->set_unchecked(this,
1335                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1336                                        function_list_head,
1337                                        UPDATE_WRITE_BARRIER);
1338     }
1339
1340     // Move to next element in the list.
1341     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1342   }
1343
1344   // Terminate the list if there is one or more elements.
1345   if (tail != NULL) {
1346     tail->set_unchecked(this,
1347                         Context::NEXT_CONTEXT_LINK,
1348                         Heap::undefined_value(),
1349                         UPDATE_WRITE_BARRIER);
1350   }
1351
1352   // Update the head of the list of contexts.
1353   global_contexts_list_ = head;
1354 }
1355
1356
1357 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1358   AssertNoAllocation no_allocation;
1359
1360   class VisitorAdapter : public ObjectVisitor {
1361    public:
1362     explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1363         : visitor_(visitor) {}
1364     virtual void VisitPointers(Object** start, Object** end) {
1365       for (Object** p = start; p < end; p++) {
1366         if ((*p)->IsExternalString()) {
1367           visitor_->VisitExternalString(Utils::ToLocal(
1368               Handle<String>(String::cast(*p))));
1369         }
1370       }
1371     }
1372    private:
1373     v8::ExternalResourceVisitor* visitor_;
1374   } visitor_adapter(visitor);
1375   external_string_table_.Iterate(&visitor_adapter);
1376 }
1377
1378
1379 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1380  public:
1381   static inline void VisitPointer(Heap* heap, Object** p) {
1382     Object* object = *p;
1383     if (!heap->InNewSpace(object)) return;
1384     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1385                          reinterpret_cast<HeapObject*>(object));
1386   }
1387 };
1388
1389
1390 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1391                          Address new_space_front) {
1392   do {
1393     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1394     // The addresses new_space_front and new_space_.top() define a
1395     // queue of unprocessed copied objects.  Process them until the
1396     // queue is empty.
1397     while (new_space_front != new_space_.top()) {
1398       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1399         HeapObject* object = HeapObject::FromAddress(new_space_front);
1400         new_space_front +=
1401           NewSpaceScavenger::IterateBody(object->map(), object);
1402       } else {
1403         new_space_front =
1404             NewSpacePage::FromLimit(new_space_front)->next_page()->body();
1405       }
1406     }
1407
1408     // Promote and process all the to-be-promoted objects.
1409     {
1410       StoreBufferRebuildScope scope(this,
1411                                     store_buffer(),
1412                                     &ScavengeStoreBufferCallback);
1413       while (!promotion_queue()->is_empty()) {
1414         HeapObject* target;
1415         int size;
1416         promotion_queue()->remove(&target, &size);
1417
1418         // Promoted object might be already partially visited
1419         // during old space pointer iteration. Thus we search specificly
1420         // for pointers to from semispace instead of looking for pointers
1421         // to new space.
1422         ASSERT(!target->IsMap());
1423         IterateAndMarkPointersToFromSpace(target->address(),
1424                                           target->address() + size,
1425                                           &ScavengeObject);
1426       }
1427     }
1428
1429     // Take another spin if there are now unswept objects in new space
1430     // (there are currently no more unswept promoted objects).
1431   } while (new_space_front != new_space_.top());
1432
1433   return new_space_front;
1434 }
1435
1436
1437 enum LoggingAndProfiling {
1438   LOGGING_AND_PROFILING_ENABLED,
1439   LOGGING_AND_PROFILING_DISABLED
1440 };
1441
1442
1443 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1444
1445
1446 template<MarksHandling marks_handling,
1447          LoggingAndProfiling logging_and_profiling_mode>
1448 class ScavengingVisitor : public StaticVisitorBase {
1449  public:
1450   static void Initialize() {
1451     table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1452     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1453     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1454     table_.Register(kVisitByteArray, &EvacuateByteArray);
1455     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1456     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1457
1458     table_.Register(kVisitGlobalContext,
1459                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1460                         template VisitSpecialized<Context::kSize>);
1461
1462     table_.Register(kVisitConsString,
1463                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1464                         template VisitSpecialized<ConsString::kSize>);
1465
1466     table_.Register(kVisitSlicedString,
1467                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1468                         template VisitSpecialized<SlicedString::kSize>);
1469
1470     table_.Register(kVisitSharedFunctionInfo,
1471                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1472                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1473
1474     table_.Register(kVisitJSWeakMap,
1475                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1476                     Visit);
1477
1478     table_.Register(kVisitJSRegExp,
1479                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1480                     Visit);
1481
1482     if (marks_handling == IGNORE_MARKS) {
1483       table_.Register(kVisitJSFunction,
1484                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1485                           template VisitSpecialized<JSFunction::kSize>);
1486     } else {
1487       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1488     }
1489
1490     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1491                                    kVisitDataObject,
1492                                    kVisitDataObjectGeneric>();
1493
1494     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1495                                    kVisitJSObject,
1496                                    kVisitJSObjectGeneric>();
1497
1498     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1499                                    kVisitStruct,
1500                                    kVisitStructGeneric>();
1501   }
1502
1503   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1504     return &table_;
1505   }
1506
1507  private:
1508   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1509   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1510
1511   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1512     bool should_record = false;
1513 #ifdef DEBUG
1514     should_record = FLAG_heap_stats;
1515 #endif
1516     should_record = should_record || FLAG_log_gc;
1517     if (should_record) {
1518       if (heap->new_space()->Contains(obj)) {
1519         heap->new_space()->RecordAllocation(obj);
1520       } else {
1521         heap->new_space()->RecordPromotion(obj);
1522       }
1523     }
1524   }
1525
1526   // Helper function used by CopyObject to copy a source object to an
1527   // allocated target object and update the forwarding pointer in the source
1528   // object.  Returns the target object.
1529   INLINE(static void MigrateObject(Heap* heap,
1530                                    HeapObject* source,
1531                                    HeapObject* target,
1532                                    int size)) {
1533     // Copy the content of source to target.
1534     heap->CopyBlock(target->address(), source->address(), size);
1535
1536     // Set the forwarding address.
1537     source->set_map_word(MapWord::FromForwardingAddress(target));
1538
1539     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1540       // Update NewSpace stats if necessary.
1541       RecordCopiedObject(heap, target);
1542       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1543       Isolate* isolate = heap->isolate();
1544       if (isolate->logger()->is_logging() ||
1545           CpuProfiler::is_profiling(isolate)) {
1546         if (target->IsSharedFunctionInfo()) {
1547           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1548               source->address(), target->address()));
1549         }
1550       }
1551     }
1552
1553     if (marks_handling == TRANSFER_MARKS) {
1554       if (Marking::TransferColor(source, target)) {
1555         MemoryChunk::IncrementLiveBytes(target->address(), size);
1556       }
1557     }
1558   }
1559
1560   template<ObjectContents object_contents, SizeRestriction size_restriction>
1561   static inline void EvacuateObject(Map* map,
1562                                     HeapObject** slot,
1563                                     HeapObject* object,
1564                                     int object_size) {
1565     SLOW_ASSERT((size_restriction != SMALL) ||
1566                 (object_size <= Page::kMaxHeapObjectSize));
1567     SLOW_ASSERT(object->Size() == object_size);
1568
1569     Heap* heap = map->GetHeap();
1570     if (heap->ShouldBePromoted(object->address(), object_size)) {
1571       MaybeObject* maybe_result;
1572
1573       if ((size_restriction != SMALL) &&
1574           (object_size > Page::kMaxHeapObjectSize)) {
1575         maybe_result = heap->lo_space()->AllocateRaw(object_size,
1576                                                      NOT_EXECUTABLE);
1577       } else {
1578         if (object_contents == DATA_OBJECT) {
1579           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1580         } else {
1581           maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1582         }
1583       }
1584
1585       Object* result = NULL;  // Initialization to please compiler.
1586       if (maybe_result->ToObject(&result)) {
1587         HeapObject* target = HeapObject::cast(result);
1588
1589         // Order is important: slot might be inside of the target if target
1590         // was allocated over a dead object and slot comes from the store
1591         // buffer.
1592         *slot = target;
1593         MigrateObject(heap, object, target, object_size);
1594
1595         if (object_contents == POINTER_OBJECT) {
1596           heap->promotion_queue()->insert(target, object_size);
1597         }
1598
1599         heap->tracer()->increment_promoted_objects_size(object_size);
1600         return;
1601       }
1602     }
1603     MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1604     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1605     Object* result = allocation->ToObjectUnchecked();
1606     HeapObject* target = HeapObject::cast(result);
1607
1608     // Order is important: slot might be inside of the target if target
1609     // was allocated over a dead object and slot comes from the store
1610     // buffer.
1611     *slot = target;
1612     MigrateObject(heap, object, target, object_size);
1613     return;
1614   }
1615
1616
1617   static inline void EvacuateJSFunction(Map* map,
1618                                         HeapObject** slot,
1619                                         HeapObject* object) {
1620     ObjectEvacuationStrategy<POINTER_OBJECT>::
1621         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1622
1623     HeapObject* target = *slot;
1624     MarkBit mark_bit = Marking::MarkBitFrom(target);
1625     if (Marking::IsBlack(mark_bit)) {
1626       // This object is black and it might not be rescanned by marker.
1627       // We should explicitly record code entry slot for compaction because
1628       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1629       // miss it as it is not HeapObject-tagged.
1630       Address code_entry_slot =
1631           target->address() + JSFunction::kCodeEntryOffset;
1632       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1633       map->GetHeap()->mark_compact_collector()->
1634           RecordCodeEntrySlot(code_entry_slot, code);
1635     }
1636   }
1637
1638
1639   static inline void EvacuateFixedArray(Map* map,
1640                                         HeapObject** slot,
1641                                         HeapObject* object) {
1642     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1643     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1644                                                  slot,
1645                                                  object,
1646                                                  object_size);
1647   }
1648
1649
1650   static inline void EvacuateFixedDoubleArray(Map* map,
1651                                               HeapObject** slot,
1652                                               HeapObject* object) {
1653     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1654     int object_size = FixedDoubleArray::SizeFor(length);
1655     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1656                                               slot,
1657                                               object,
1658                                               object_size);
1659   }
1660
1661
1662   static inline void EvacuateByteArray(Map* map,
1663                                        HeapObject** slot,
1664                                        HeapObject* object) {
1665     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1666     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1667   }
1668
1669
1670   static inline void EvacuateSeqAsciiString(Map* map,
1671                                             HeapObject** slot,
1672                                             HeapObject* object) {
1673     int object_size = SeqAsciiString::cast(object)->
1674         SeqAsciiStringSize(map->instance_type());
1675     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1676   }
1677
1678
1679   static inline void EvacuateSeqTwoByteString(Map* map,
1680                                               HeapObject** slot,
1681                                               HeapObject* object) {
1682     int object_size = SeqTwoByteString::cast(object)->
1683         SeqTwoByteStringSize(map->instance_type());
1684     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1685   }
1686
1687
1688   static inline bool IsShortcutCandidate(int type) {
1689     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1690   }
1691
1692   static inline void EvacuateShortcutCandidate(Map* map,
1693                                                HeapObject** slot,
1694                                                HeapObject* object) {
1695     ASSERT(IsShortcutCandidate(map->instance_type()));
1696
1697     Heap* heap = map->GetHeap();
1698
1699     if (marks_handling == IGNORE_MARKS &&
1700         ConsString::cast(object)->unchecked_second() ==
1701         heap->empty_string()) {
1702       HeapObject* first =
1703           HeapObject::cast(ConsString::cast(object)->unchecked_first());
1704
1705       *slot = first;
1706
1707       if (!heap->InNewSpace(first)) {
1708         object->set_map_word(MapWord::FromForwardingAddress(first));
1709         return;
1710       }
1711
1712       MapWord first_word = first->map_word();
1713       if (first_word.IsForwardingAddress()) {
1714         HeapObject* target = first_word.ToForwardingAddress();
1715
1716         *slot = target;
1717         object->set_map_word(MapWord::FromForwardingAddress(target));
1718         return;
1719       }
1720
1721       heap->DoScavengeObject(first->map(), slot, first);
1722       object->set_map_word(MapWord::FromForwardingAddress(*slot));
1723       return;
1724     }
1725
1726     int object_size = ConsString::kSize;
1727     EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1728   }
1729
1730   template<ObjectContents object_contents>
1731   class ObjectEvacuationStrategy {
1732    public:
1733     template<int object_size>
1734     static inline void VisitSpecialized(Map* map,
1735                                         HeapObject** slot,
1736                                         HeapObject* object) {
1737       EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1738     }
1739
1740     static inline void Visit(Map* map,
1741                              HeapObject** slot,
1742                              HeapObject* object) {
1743       int object_size = map->instance_size();
1744       EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1745     }
1746   };
1747
1748   static VisitorDispatchTable<ScavengingCallback> table_;
1749 };
1750
1751
1752 template<MarksHandling marks_handling,
1753          LoggingAndProfiling logging_and_profiling_mode>
1754 VisitorDispatchTable<ScavengingCallback>
1755     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1756
1757
1758 static void InitializeScavengingVisitorsTables() {
1759   ScavengingVisitor<TRANSFER_MARKS,
1760                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
1761   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1762   ScavengingVisitor<TRANSFER_MARKS,
1763                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
1764   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1765 }
1766
1767
1768 void Heap::SelectScavengingVisitorsTable() {
1769   bool logging_and_profiling =
1770       isolate()->logger()->is_logging() ||
1771       CpuProfiler::is_profiling(isolate()) ||
1772       (isolate()->heap_profiler() != NULL &&
1773        isolate()->heap_profiler()->is_profiling());
1774
1775   if (!incremental_marking()->IsMarking()) {
1776     if (!logging_and_profiling) {
1777       scavenging_visitors_table_.CopyFrom(
1778           ScavengingVisitor<IGNORE_MARKS,
1779                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
1780     } else {
1781       scavenging_visitors_table_.CopyFrom(
1782           ScavengingVisitor<IGNORE_MARKS,
1783                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
1784     }
1785   } else {
1786     if (!logging_and_profiling) {
1787       scavenging_visitors_table_.CopyFrom(
1788           ScavengingVisitor<TRANSFER_MARKS,
1789                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
1790     } else {
1791       scavenging_visitors_table_.CopyFrom(
1792           ScavengingVisitor<TRANSFER_MARKS,
1793                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
1794     }
1795
1796     if (incremental_marking()->IsCompacting()) {
1797       // When compacting forbid short-circuiting of cons-strings.
1798       // Scavenging code relies on the fact that new space object
1799       // can't be evacuated into evacuation candidate but
1800       // short-circuiting violates this assumption.
1801       scavenging_visitors_table_.Register(
1802           StaticVisitorBase::kVisitShortcutCandidate,
1803           scavenging_visitors_table_.GetVisitorById(
1804               StaticVisitorBase::kVisitConsString));
1805     }
1806   }
1807 }
1808
1809
1810 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1811   SLOW_ASSERT(HEAP->InFromSpace(object));
1812   MapWord first_word = object->map_word();
1813   SLOW_ASSERT(!first_word.IsForwardingAddress());
1814   Map* map = first_word.ToMap();
1815   map->GetHeap()->DoScavengeObject(map, p, object);
1816 }
1817
1818
1819 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1820                                       int instance_size) {
1821   Object* result;
1822   { MaybeObject* maybe_result = AllocateRawMap();
1823     if (!maybe_result->ToObject(&result)) return maybe_result;
1824   }
1825
1826   // Map::cast cannot be used due to uninitialized map field.
1827   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1828   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1829   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1830   reinterpret_cast<Map*>(result)->set_visitor_id(
1831         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1832   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1833   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1834   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1835   reinterpret_cast<Map*>(result)->set_bit_field(0);
1836   reinterpret_cast<Map*>(result)->set_bit_field2(0);
1837   return result;
1838 }
1839
1840
1841 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
1842                                int instance_size,
1843                                ElementsKind elements_kind) {
1844   Object* result;
1845   { MaybeObject* maybe_result = AllocateRawMap();
1846     if (!maybe_result->ToObject(&result)) return maybe_result;
1847   }
1848
1849   Map* map = reinterpret_cast<Map*>(result);
1850   map->set_map_no_write_barrier(meta_map());
1851   map->set_instance_type(instance_type);
1852   map->set_visitor_id(
1853       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1854   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
1855   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
1856   map->set_instance_size(instance_size);
1857   map->set_inobject_properties(0);
1858   map->set_pre_allocated_property_fields(0);
1859   map->init_instance_descriptors();
1860   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
1861   map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
1862   map->set_unused_property_fields(0);
1863   map->set_bit_field(0);
1864   map->set_bit_field2(1 << Map::kIsExtensible);
1865   map->set_elements_kind(elements_kind);
1866
1867   // If the map object is aligned fill the padding area with Smi 0 objects.
1868   if (Map::kPadStart < Map::kSize) {
1869     memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1870            0,
1871            Map::kSize - Map::kPadStart);
1872   }
1873   return map;
1874 }
1875
1876
1877 MaybeObject* Heap::AllocateCodeCache() {
1878   Object* result;
1879   { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1880     if (!maybe_result->ToObject(&result)) return maybe_result;
1881   }
1882   CodeCache* code_cache = CodeCache::cast(result);
1883   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
1884   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
1885   return code_cache;
1886 }
1887
1888
1889 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1890   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1891 }
1892
1893
1894 MaybeObject* Heap::AllocateAccessorPair() {
1895   Object* result;
1896   { MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
1897     if (!maybe_result->ToObject(&result)) return maybe_result;
1898   }
1899   AccessorPair* accessors = AccessorPair::cast(result);
1900   // Later we will have to distinguish between undefined and the hole...
1901   // accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
1902   // accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
1903   return accessors;
1904 }
1905
1906
1907 const Heap::StringTypeTable Heap::string_type_table[] = {
1908 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
1909   {type, size, k##camel_name##MapRootIndex},
1910   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1911 #undef STRING_TYPE_ELEMENT
1912 };
1913
1914
1915 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1916 #define CONSTANT_SYMBOL_ELEMENT(name, contents)                                \
1917   {contents, k##name##RootIndex},
1918   SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1919 #undef CONSTANT_SYMBOL_ELEMENT
1920 };
1921
1922
1923 const Heap::StructTable Heap::struct_table[] = {
1924 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
1925   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1926   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1927 #undef STRUCT_TABLE_ELEMENT
1928 };
1929
1930
1931 bool Heap::CreateInitialMaps() {
1932   Object* obj;
1933   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1934     if (!maybe_obj->ToObject(&obj)) return false;
1935   }
1936   // Map::cast cannot be used due to uninitialized map field.
1937   Map* new_meta_map = reinterpret_cast<Map*>(obj);
1938   set_meta_map(new_meta_map);
1939   new_meta_map->set_map(new_meta_map);
1940
1941   { MaybeObject* maybe_obj =
1942         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1943     if (!maybe_obj->ToObject(&obj)) return false;
1944   }
1945   set_fixed_array_map(Map::cast(obj));
1946
1947   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1948     if (!maybe_obj->ToObject(&obj)) return false;
1949   }
1950   set_oddball_map(Map::cast(obj));
1951
1952   // Allocate the empty array.
1953   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1954     if (!maybe_obj->ToObject(&obj)) return false;
1955   }
1956   set_empty_fixed_array(FixedArray::cast(obj));
1957
1958   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1959     if (!maybe_obj->ToObject(&obj)) return false;
1960   }
1961   set_null_value(Oddball::cast(obj));
1962   Oddball::cast(obj)->set_kind(Oddball::kNull);
1963
1964   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1965     if (!maybe_obj->ToObject(&obj)) return false;
1966   }
1967   set_undefined_value(Oddball::cast(obj));
1968   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
1969   ASSERT(!InNewSpace(undefined_value()));
1970
1971   // Allocate the empty descriptor array.
1972   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1973     if (!maybe_obj->ToObject(&obj)) return false;
1974   }
1975   set_empty_descriptor_array(DescriptorArray::cast(obj));
1976
1977   // Fix the instance_descriptors for the existing maps.
1978   meta_map()->init_instance_descriptors();
1979   meta_map()->set_code_cache(empty_fixed_array());
1980   meta_map()->set_prototype_transitions(empty_fixed_array());
1981
1982   fixed_array_map()->init_instance_descriptors();
1983   fixed_array_map()->set_code_cache(empty_fixed_array());
1984   fixed_array_map()->set_prototype_transitions(empty_fixed_array());
1985
1986   oddball_map()->init_instance_descriptors();
1987   oddball_map()->set_code_cache(empty_fixed_array());
1988   oddball_map()->set_prototype_transitions(empty_fixed_array());
1989
1990   // Fix prototype object for existing maps.
1991   meta_map()->set_prototype(null_value());
1992   meta_map()->set_constructor(null_value());
1993
1994   fixed_array_map()->set_prototype(null_value());
1995   fixed_array_map()->set_constructor(null_value());
1996
1997   oddball_map()->set_prototype(null_value());
1998   oddball_map()->set_constructor(null_value());
1999
2000   { MaybeObject* maybe_obj =
2001         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2002     if (!maybe_obj->ToObject(&obj)) return false;
2003   }
2004   set_fixed_cow_array_map(Map::cast(obj));
2005   ASSERT(fixed_array_map() != fixed_cow_array_map());
2006
2007   { MaybeObject* maybe_obj =
2008         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2009     if (!maybe_obj->ToObject(&obj)) return false;
2010   }
2011   set_scope_info_map(Map::cast(obj));
2012
2013   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2014     if (!maybe_obj->ToObject(&obj)) return false;
2015   }
2016   set_heap_number_map(Map::cast(obj));
2017
2018   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2019     if (!maybe_obj->ToObject(&obj)) return false;
2020   }
2021   set_foreign_map(Map::cast(obj));
2022
2023   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2024     const StringTypeTable& entry = string_type_table[i];
2025     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2026       if (!maybe_obj->ToObject(&obj)) return false;
2027     }
2028     roots_[entry.index] = Map::cast(obj);
2029   }
2030
2031   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2032     if (!maybe_obj->ToObject(&obj)) return false;
2033   }
2034   set_undetectable_string_map(Map::cast(obj));
2035   Map::cast(obj)->set_is_undetectable();
2036
2037   { MaybeObject* maybe_obj =
2038         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2039     if (!maybe_obj->ToObject(&obj)) return false;
2040   }
2041   set_undetectable_ascii_string_map(Map::cast(obj));
2042   Map::cast(obj)->set_is_undetectable();
2043
2044   { MaybeObject* maybe_obj =
2045         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2046     if (!maybe_obj->ToObject(&obj)) return false;
2047   }
2048   set_fixed_double_array_map(Map::cast(obj));
2049
2050   { MaybeObject* maybe_obj =
2051         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2052     if (!maybe_obj->ToObject(&obj)) return false;
2053   }
2054   set_byte_array_map(Map::cast(obj));
2055
2056   { MaybeObject* maybe_obj =
2057         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2058     if (!maybe_obj->ToObject(&obj)) return false;
2059   }
2060   set_free_space_map(Map::cast(obj));
2061
2062   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2063     if (!maybe_obj->ToObject(&obj)) return false;
2064   }
2065   set_empty_byte_array(ByteArray::cast(obj));
2066
2067   { MaybeObject* maybe_obj =
2068         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2069     if (!maybe_obj->ToObject(&obj)) return false;
2070   }
2071   set_external_pixel_array_map(Map::cast(obj));
2072
2073   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2074                                          ExternalArray::kAlignedSize);
2075     if (!maybe_obj->ToObject(&obj)) return false;
2076   }
2077   set_external_byte_array_map(Map::cast(obj));
2078
2079   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2080                                          ExternalArray::kAlignedSize);
2081     if (!maybe_obj->ToObject(&obj)) return false;
2082   }
2083   set_external_unsigned_byte_array_map(Map::cast(obj));
2084
2085   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2086                                          ExternalArray::kAlignedSize);
2087     if (!maybe_obj->ToObject(&obj)) return false;
2088   }
2089   set_external_short_array_map(Map::cast(obj));
2090
2091   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2092                                          ExternalArray::kAlignedSize);
2093     if (!maybe_obj->ToObject(&obj)) return false;
2094   }
2095   set_external_unsigned_short_array_map(Map::cast(obj));
2096
2097   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2098                                          ExternalArray::kAlignedSize);
2099     if (!maybe_obj->ToObject(&obj)) return false;
2100   }
2101   set_external_int_array_map(Map::cast(obj));
2102
2103   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2104                                          ExternalArray::kAlignedSize);
2105     if (!maybe_obj->ToObject(&obj)) return false;
2106   }
2107   set_external_unsigned_int_array_map(Map::cast(obj));
2108
2109   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2110                                          ExternalArray::kAlignedSize);
2111     if (!maybe_obj->ToObject(&obj)) return false;
2112   }
2113   set_external_float_array_map(Map::cast(obj));
2114
2115   { MaybeObject* maybe_obj =
2116         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2117     if (!maybe_obj->ToObject(&obj)) return false;
2118   }
2119   set_non_strict_arguments_elements_map(Map::cast(obj));
2120
2121   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2122                                          ExternalArray::kAlignedSize);
2123     if (!maybe_obj->ToObject(&obj)) return false;
2124   }
2125   set_external_double_array_map(Map::cast(obj));
2126
2127   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2128     if (!maybe_obj->ToObject(&obj)) return false;
2129   }
2130   set_code_map(Map::cast(obj));
2131
2132   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2133                                          JSGlobalPropertyCell::kSize);
2134     if (!maybe_obj->ToObject(&obj)) return false;
2135   }
2136   set_global_property_cell_map(Map::cast(obj));
2137
2138   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2139     if (!maybe_obj->ToObject(&obj)) return false;
2140   }
2141   set_one_pointer_filler_map(Map::cast(obj));
2142
2143   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2144     if (!maybe_obj->ToObject(&obj)) return false;
2145   }
2146   set_two_pointer_filler_map(Map::cast(obj));
2147
2148   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2149     const StructTable& entry = struct_table[i];
2150     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2151       if (!maybe_obj->ToObject(&obj)) return false;
2152     }
2153     roots_[entry.index] = Map::cast(obj);
2154   }
2155
2156   { MaybeObject* maybe_obj =
2157         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2158     if (!maybe_obj->ToObject(&obj)) return false;
2159   }
2160   set_hash_table_map(Map::cast(obj));
2161
2162   { MaybeObject* maybe_obj =
2163         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2164     if (!maybe_obj->ToObject(&obj)) return false;
2165   }
2166   set_function_context_map(Map::cast(obj));
2167
2168   { MaybeObject* maybe_obj =
2169         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2170     if (!maybe_obj->ToObject(&obj)) return false;
2171   }
2172   set_catch_context_map(Map::cast(obj));
2173
2174   { MaybeObject* maybe_obj =
2175         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2176     if (!maybe_obj->ToObject(&obj)) return false;
2177   }
2178   set_with_context_map(Map::cast(obj));
2179
2180   { MaybeObject* maybe_obj =
2181         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2182     if (!maybe_obj->ToObject(&obj)) return false;
2183   }
2184   set_block_context_map(Map::cast(obj));
2185
2186   { MaybeObject* maybe_obj =
2187         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2188     if (!maybe_obj->ToObject(&obj)) return false;
2189   }
2190   Map* global_context_map = Map::cast(obj);
2191   global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
2192   set_global_context_map(global_context_map);
2193
2194   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2195                                          SharedFunctionInfo::kAlignedSize);
2196     if (!maybe_obj->ToObject(&obj)) return false;
2197   }
2198   set_shared_function_info_map(Map::cast(obj));
2199
2200   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2201                                          JSMessageObject::kSize);
2202     if (!maybe_obj->ToObject(&obj)) return false;
2203   }
2204   set_message_object_map(Map::cast(obj));
2205
2206   ASSERT(!InNewSpace(empty_fixed_array()));
2207   return true;
2208 }
2209
2210
2211 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2212   // Statically ensure that it is safe to allocate heap numbers in paged
2213   // spaces.
2214   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2215   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2216
2217   Object* result;
2218   { MaybeObject* maybe_result =
2219         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2220     if (!maybe_result->ToObject(&result)) return maybe_result;
2221   }
2222
2223   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2224   HeapNumber::cast(result)->set_value(value);
2225   return result;
2226 }
2227
2228
2229 MaybeObject* Heap::AllocateHeapNumber(double value) {
2230   // Use general version, if we're forced to always allocate.
2231   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2232
2233   // This version of AllocateHeapNumber is optimized for
2234   // allocation in new space.
2235   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2236   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2237   Object* result;
2238   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2239     if (!maybe_result->ToObject(&result)) return maybe_result;
2240   }
2241   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2242   HeapNumber::cast(result)->set_value(value);
2243   return result;
2244 }
2245
2246
2247 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2248   Object* result;
2249   { MaybeObject* maybe_result = AllocateRawCell();
2250     if (!maybe_result->ToObject(&result)) return maybe_result;
2251   }
2252   HeapObject::cast(result)->set_map_no_write_barrier(
2253       global_property_cell_map());
2254   JSGlobalPropertyCell::cast(result)->set_value(value);
2255   return result;
2256 }
2257
2258
2259 MaybeObject* Heap::CreateOddball(const char* to_string,
2260                                  Object* to_number,
2261                                  byte kind) {
2262   Object* result;
2263   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2264     if (!maybe_result->ToObject(&result)) return maybe_result;
2265   }
2266   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2267 }
2268
2269
2270 bool Heap::CreateApiObjects() {
2271   Object* obj;
2272
2273   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2274     if (!maybe_obj->ToObject(&obj)) return false;
2275   }
2276   // Don't use Smi-only elements optimizations for objects with the neander
2277   // map. There are too many cases where element values are set directly with a
2278   // bottleneck to trap the Smi-only -> fast elements transition, and there
2279   // appears to be no benefit for optimize this case.
2280   Map* new_neander_map = Map::cast(obj);
2281   new_neander_map->set_elements_kind(FAST_ELEMENTS);
2282   set_neander_map(new_neander_map);
2283
2284   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2285     if (!maybe_obj->ToObject(&obj)) return false;
2286   }
2287   Object* elements;
2288   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2289     if (!maybe_elements->ToObject(&elements)) return false;
2290   }
2291   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2292   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2293   set_message_listeners(JSObject::cast(obj));
2294
2295   return true;
2296 }
2297
2298
2299 void Heap::CreateJSEntryStub() {
2300   JSEntryStub stub;
2301   set_js_entry_code(*stub.GetCode());
2302 }
2303
2304
2305 void Heap::CreateJSConstructEntryStub() {
2306   JSConstructEntryStub stub;
2307   set_js_construct_entry_code(*stub.GetCode());
2308 }
2309
2310
2311 void Heap::CreateFixedStubs() {
2312   // Here we create roots for fixed stubs. They are needed at GC
2313   // for cooking and uncooking (check out frames.cc).
2314   // The eliminates the need for doing dictionary lookup in the
2315   // stub cache for these stubs.
2316   HandleScope scope;
2317   // gcc-4.4 has problem generating correct code of following snippet:
2318   // {  JSEntryStub stub;
2319   //    js_entry_code_ = *stub.GetCode();
2320   // }
2321   // {  JSConstructEntryStub stub;
2322   //    js_construct_entry_code_ = *stub.GetCode();
2323   // }
2324   // To workaround the problem, make separate functions without inlining.
2325   Heap::CreateJSEntryStub();
2326   Heap::CreateJSConstructEntryStub();
2327
2328   // Create stubs that should be there, so we don't unexpectedly have to
2329   // create them if we need them during the creation of another stub.
2330   // Stub creation mixes raw pointers and handles in an unsafe manner so
2331   // we cannot create stubs while we are creating stubs.
2332   CodeStub::GenerateStubsAheadOfTime();
2333 }
2334
2335
2336 bool Heap::CreateInitialObjects() {
2337   Object* obj;
2338
2339   // The -0 value must be set before NumberFromDouble works.
2340   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2341     if (!maybe_obj->ToObject(&obj)) return false;
2342   }
2343   set_minus_zero_value(HeapNumber::cast(obj));
2344   ASSERT(signbit(minus_zero_value()->Number()) != 0);
2345
2346   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2347     if (!maybe_obj->ToObject(&obj)) return false;
2348   }
2349   set_nan_value(HeapNumber::cast(obj));
2350
2351   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2352     if (!maybe_obj->ToObject(&obj)) return false;
2353   }
2354   set_infinity_value(HeapNumber::cast(obj));
2355
2356   // The hole has not been created yet, but we want to put something
2357   // predictable in the gaps in the symbol table, so lets make that Smi zero.
2358   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2359
2360   // Allocate initial symbol table.
2361   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2362     if (!maybe_obj->ToObject(&obj)) return false;
2363   }
2364   // Don't use set_symbol_table() due to asserts.
2365   roots_[kSymbolTableRootIndex] = obj;
2366
2367   // Finish initializing oddballs after creating symboltable.
2368   { MaybeObject* maybe_obj =
2369         undefined_value()->Initialize("undefined",
2370                                       nan_value(),
2371                                       Oddball::kUndefined);
2372     if (!maybe_obj->ToObject(&obj)) return false;
2373   }
2374
2375   // Initialize the null_value.
2376   { MaybeObject* maybe_obj =
2377         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2378     if (!maybe_obj->ToObject(&obj)) return false;
2379   }
2380
2381   { MaybeObject* maybe_obj = CreateOddball("true",
2382                                            Smi::FromInt(1),
2383                                            Oddball::kTrue);
2384     if (!maybe_obj->ToObject(&obj)) return false;
2385   }
2386   set_true_value(Oddball::cast(obj));
2387
2388   { MaybeObject* maybe_obj = CreateOddball("false",
2389                                            Smi::FromInt(0),
2390                                            Oddball::kFalse);
2391     if (!maybe_obj->ToObject(&obj)) return false;
2392   }
2393   set_false_value(Oddball::cast(obj));
2394
2395   { MaybeObject* maybe_obj = CreateOddball("hole",
2396                                            Smi::FromInt(-1),
2397                                            Oddball::kTheHole);
2398     if (!maybe_obj->ToObject(&obj)) return false;
2399   }
2400   set_the_hole_value(Oddball::cast(obj));
2401
2402   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2403                                            Smi::FromInt(-2),
2404                                            Oddball::kArgumentMarker);
2405     if (!maybe_obj->ToObject(&obj)) return false;
2406   }
2407   set_arguments_marker(Oddball::cast(obj));
2408
2409   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2410                                            Smi::FromInt(-3),
2411                                            Oddball::kOther);
2412     if (!maybe_obj->ToObject(&obj)) return false;
2413   }
2414   set_no_interceptor_result_sentinel(obj);
2415
2416   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2417                                            Smi::FromInt(-4),
2418                                            Oddball::kOther);
2419     if (!maybe_obj->ToObject(&obj)) return false;
2420   }
2421   set_termination_exception(obj);
2422
2423   { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
2424                                            Smi::FromInt(-5),
2425                                            Oddball::kOther);
2426     if (!maybe_obj->ToObject(&obj)) return false;
2427   }
2428   set_frame_alignment_marker(Oddball::cast(obj));
2429   STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
2430
2431   // Allocate the empty string.
2432   { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2433     if (!maybe_obj->ToObject(&obj)) return false;
2434   }
2435   set_empty_string(String::cast(obj));
2436
2437   for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2438     { MaybeObject* maybe_obj =
2439           LookupAsciiSymbol(constant_symbol_table[i].contents);
2440       if (!maybe_obj->ToObject(&obj)) return false;
2441     }
2442     roots_[constant_symbol_table[i].index] = String::cast(obj);
2443   }
2444
2445   // Allocate the hidden symbol which is used to identify the hidden properties
2446   // in JSObjects. The hash code has a special value so that it will not match
2447   // the empty string when searching for the property. It cannot be part of the
2448   // loop above because it needs to be allocated manually with the special
2449   // hash code in place. The hash code for the hidden_symbol is zero to ensure
2450   // that it will always be at the first entry in property descriptors.
2451   { MaybeObject* maybe_obj =
2452         AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2453     if (!maybe_obj->ToObject(&obj)) return false;
2454   }
2455   hidden_symbol_ = String::cast(obj);
2456
2457   // Allocate the foreign for __proto__.
2458   { MaybeObject* maybe_obj =
2459         AllocateForeign((Address) &Accessors::ObjectPrototype);
2460     if (!maybe_obj->ToObject(&obj)) return false;
2461   }
2462   set_prototype_accessors(Foreign::cast(obj));
2463
2464   // Allocate the code_stubs dictionary. The initial size is set to avoid
2465   // expanding the dictionary during bootstrapping.
2466   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2467     if (!maybe_obj->ToObject(&obj)) return false;
2468   }
2469   set_code_stubs(UnseededNumberDictionary::cast(obj));
2470
2471
2472   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2473   // is set to avoid expanding the dictionary during bootstrapping.
2474   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2475     if (!maybe_obj->ToObject(&obj)) return false;
2476   }
2477   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2478
2479   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2480     if (!maybe_obj->ToObject(&obj)) return false;
2481   }
2482   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2483
2484   set_instanceof_cache_function(Smi::FromInt(0));
2485   set_instanceof_cache_map(Smi::FromInt(0));
2486   set_instanceof_cache_answer(Smi::FromInt(0));
2487
2488   CreateFixedStubs();
2489
2490   // Allocate the dictionary of intrinsic function names.
2491   { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2492     if (!maybe_obj->ToObject(&obj)) return false;
2493   }
2494   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2495                                                                        obj);
2496     if (!maybe_obj->ToObject(&obj)) return false;
2497   }
2498   set_intrinsic_function_names(StringDictionary::cast(obj));
2499
2500   if (InitializeNumberStringCache()->IsFailure()) return false;
2501
2502   // Allocate cache for single character ASCII strings.
2503   { MaybeObject* maybe_obj =
2504         AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_single_character_string_cache(FixedArray::cast(obj));
2508
2509   // Allocate cache for string split.
2510   { MaybeObject* maybe_obj =
2511         AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
2512     if (!maybe_obj->ToObject(&obj)) return false;
2513   }
2514   set_string_split_cache(FixedArray::cast(obj));
2515
2516   // Allocate cache for external strings pointing to native source code.
2517   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2518     if (!maybe_obj->ToObject(&obj)) return false;
2519   }
2520   set_natives_source_cache(FixedArray::cast(obj));
2521
2522   // Handling of script id generation is in FACTORY->NewScript.
2523   set_last_script_id(undefined_value());
2524
2525   // Initialize keyed lookup cache.
2526   isolate_->keyed_lookup_cache()->Clear();
2527
2528   // Initialize context slot cache.
2529   isolate_->context_slot_cache()->Clear();
2530
2531   // Initialize descriptor cache.
2532   isolate_->descriptor_lookup_cache()->Clear();
2533
2534   // Initialize compilation cache.
2535   isolate_->compilation_cache()->Clear();
2536
2537   return true;
2538 }
2539
2540
2541 Object* StringSplitCache::Lookup(
2542     FixedArray* cache, String* string, String* pattern) {
2543   if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
2544   uint32_t hash = string->Hash();
2545   uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2546       ~(kArrayEntriesPerCacheEntry - 1));
2547   if (cache->get(index + kStringOffset) == string &&
2548       cache->get(index + kPatternOffset) == pattern) {
2549     return cache->get(index + kArrayOffset);
2550   }
2551   index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2552   if (cache->get(index + kStringOffset) == string &&
2553       cache->get(index + kPatternOffset) == pattern) {
2554     return cache->get(index + kArrayOffset);
2555   }
2556   return Smi::FromInt(0);
2557 }
2558
2559
2560 void StringSplitCache::Enter(Heap* heap,
2561                              FixedArray* cache,
2562                              String* string,
2563                              String* pattern,
2564                              FixedArray* array) {
2565   if (!string->IsSymbol() || !pattern->IsSymbol()) return;
2566   uint32_t hash = string->Hash();
2567   uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2568       ~(kArrayEntriesPerCacheEntry - 1));
2569   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2570     cache->set(index + kStringOffset, string);
2571     cache->set(index + kPatternOffset, pattern);
2572     cache->set(index + kArrayOffset, array);
2573   } else {
2574     uint32_t index2 =
2575         ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2576     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2577       cache->set(index2 + kStringOffset, string);
2578       cache->set(index2 + kPatternOffset, pattern);
2579       cache->set(index2 + kArrayOffset, array);
2580     } else {
2581       cache->set(index2 + kStringOffset, Smi::FromInt(0));
2582       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2583       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2584       cache->set(index + kStringOffset, string);
2585       cache->set(index + kPatternOffset, pattern);
2586       cache->set(index + kArrayOffset, array);
2587     }
2588   }
2589   if (array->length() < 100) {  // Limit how many new symbols we want to make.
2590     for (int i = 0; i < array->length(); i++) {
2591       String* str = String::cast(array->get(i));
2592       Object* symbol;
2593       MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2594       if (maybe_symbol->ToObject(&symbol)) {
2595         array->set(i, symbol);
2596       }
2597     }
2598   }
2599   array->set_map_no_write_barrier(heap->fixed_cow_array_map());
2600 }
2601
2602
2603 void StringSplitCache::Clear(FixedArray* cache) {
2604   for (int i = 0; i < kStringSplitCacheSize; i++) {
2605     cache->set(i, Smi::FromInt(0));
2606   }
2607 }
2608
2609
2610 MaybeObject* Heap::InitializeNumberStringCache() {
2611   // Compute the size of the number string cache based on the max heap size.
2612   // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2613   // max_semispace_size_ ==   8 MB => number_string_cache_size = 16KB.
2614   int number_string_cache_size = max_semispace_size_ / 512;
2615   number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
2616   Object* obj;
2617   MaybeObject* maybe_obj =
2618       AllocateFixedArray(number_string_cache_size * 2, TENURED);
2619   if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2620   return maybe_obj;
2621 }
2622
2623
2624 void Heap::FlushNumberStringCache() {
2625   // Flush the number to string cache.
2626   int len = number_string_cache()->length();
2627   for (int i = 0; i < len; i++) {
2628     number_string_cache()->set_undefined(this, i);
2629   }
2630 }
2631
2632
2633 static inline int double_get_hash(double d) {
2634   DoubleRepresentation rep(d);
2635   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2636 }
2637
2638
2639 static inline int smi_get_hash(Smi* smi) {
2640   return smi->value();
2641 }
2642
2643
2644 Object* Heap::GetNumberStringCache(Object* number) {
2645   int hash;
2646   int mask = (number_string_cache()->length() >> 1) - 1;
2647   if (number->IsSmi()) {
2648     hash = smi_get_hash(Smi::cast(number)) & mask;
2649   } else {
2650     hash = double_get_hash(number->Number()) & mask;
2651   }
2652   Object* key = number_string_cache()->get(hash * 2);
2653   if (key == number) {
2654     return String::cast(number_string_cache()->get(hash * 2 + 1));
2655   } else if (key->IsHeapNumber() &&
2656              number->IsHeapNumber() &&
2657              key->Number() == number->Number()) {
2658     return String::cast(number_string_cache()->get(hash * 2 + 1));
2659   }
2660   return undefined_value();
2661 }
2662
2663
2664 void Heap::SetNumberStringCache(Object* number, String* string) {
2665   int hash;
2666   int mask = (number_string_cache()->length() >> 1) - 1;
2667   if (number->IsSmi()) {
2668     hash = smi_get_hash(Smi::cast(number)) & mask;
2669     number_string_cache()->set(hash * 2, Smi::cast(number));
2670   } else {
2671     hash = double_get_hash(number->Number()) & mask;
2672     number_string_cache()->set(hash * 2, number);
2673   }
2674   number_string_cache()->set(hash * 2 + 1, string);
2675 }
2676
2677
2678 MaybeObject* Heap::NumberToString(Object* number,
2679                                   bool check_number_string_cache) {
2680   isolate_->counters()->number_to_string_runtime()->Increment();
2681   if (check_number_string_cache) {
2682     Object* cached = GetNumberStringCache(number);
2683     if (cached != undefined_value()) {
2684       return cached;
2685     }
2686   }
2687
2688   char arr[100];
2689   Vector<char> buffer(arr, ARRAY_SIZE(arr));
2690   const char* str;
2691   if (number->IsSmi()) {
2692     int num = Smi::cast(number)->value();
2693     str = IntToCString(num, buffer);
2694   } else {
2695     double num = HeapNumber::cast(number)->value();
2696     str = DoubleToCString(num, buffer);
2697   }
2698
2699   Object* js_string;
2700   MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2701   if (maybe_js_string->ToObject(&js_string)) {
2702     SetNumberStringCache(number, String::cast(js_string));
2703   }
2704   return maybe_js_string;
2705 }
2706
2707
2708 MaybeObject* Heap::Uint32ToString(uint32_t value,
2709                                   bool check_number_string_cache) {
2710   Object* number;
2711   MaybeObject* maybe = NumberFromUint32(value);
2712   if (!maybe->To<Object>(&number)) return maybe;
2713   return NumberToString(number, check_number_string_cache);
2714 }
2715
2716
2717 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2718   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2719 }
2720
2721
2722 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2723     ExternalArrayType array_type) {
2724   switch (array_type) {
2725     case kExternalByteArray:
2726       return kExternalByteArrayMapRootIndex;
2727     case kExternalUnsignedByteArray:
2728       return kExternalUnsignedByteArrayMapRootIndex;
2729     case kExternalShortArray:
2730       return kExternalShortArrayMapRootIndex;
2731     case kExternalUnsignedShortArray:
2732       return kExternalUnsignedShortArrayMapRootIndex;
2733     case kExternalIntArray:
2734       return kExternalIntArrayMapRootIndex;
2735     case kExternalUnsignedIntArray:
2736       return kExternalUnsignedIntArrayMapRootIndex;
2737     case kExternalFloatArray:
2738       return kExternalFloatArrayMapRootIndex;
2739     case kExternalDoubleArray:
2740       return kExternalDoubleArrayMapRootIndex;
2741     case kExternalPixelArray:
2742       return kExternalPixelArrayMapRootIndex;
2743     default:
2744       UNREACHABLE();
2745       return kUndefinedValueRootIndex;
2746   }
2747 }
2748
2749
2750 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
2751   // We need to distinguish the minus zero value and this cannot be
2752   // done after conversion to int. Doing this by comparing bit
2753   // patterns is faster than using fpclassify() et al.
2754   static const DoubleRepresentation minus_zero(-0.0);
2755
2756   DoubleRepresentation rep(value);
2757   if (rep.bits == minus_zero.bits) {
2758     return AllocateHeapNumber(-0.0, pretenure);
2759   }
2760
2761   int int_value = FastD2I(value);
2762   if (value == int_value && Smi::IsValid(int_value)) {
2763     return Smi::FromInt(int_value);
2764   }
2765
2766   // Materialize the value in the heap.
2767   return AllocateHeapNumber(value, pretenure);
2768 }
2769
2770
2771 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2772   // Statically ensure that it is safe to allocate foreigns in paged spaces.
2773   STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
2774   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2775   Foreign* result;
2776   MaybeObject* maybe_result = Allocate(foreign_map(), space);
2777   if (!maybe_result->To(&result)) return maybe_result;
2778   result->set_foreign_address(address);
2779   return result;
2780 }
2781
2782
2783 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2784   SharedFunctionInfo* share;
2785   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2786   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
2787
2788   // Set pointer fields.
2789   share->set_name(name);
2790   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
2791   share->set_code(illegal);
2792   share->set_scope_info(ScopeInfo::Empty());
2793   Code* construct_stub =
2794       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
2795   share->set_construct_stub(construct_stub);
2796   share->set_instance_class_name(Object_symbol());
2797   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
2798   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
2799   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
2800   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
2801   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
2802   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
2803   share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2804
2805   // Set integer fields (smi or int, depending on the architecture).
2806   share->set_length(0);
2807   share->set_formal_parameter_count(0);
2808   share->set_expected_nof_properties(0);
2809   share->set_num_literals(0);
2810   share->set_start_position_and_type(0);
2811   share->set_end_position(0);
2812   share->set_function_token_position(0);
2813   // All compiler hints default to false or 0.
2814   share->set_compiler_hints(0);
2815   share->set_this_property_assignments_count(0);
2816   share->set_opt_count(0);
2817
2818   return share;
2819 }
2820
2821
2822 MaybeObject* Heap::AllocateJSMessageObject(String* type,
2823                                            JSArray* arguments,
2824                                            int start_position,
2825                                            int end_position,
2826                                            Object* script,
2827                                            Object* stack_trace,
2828                                            Object* stack_frames) {
2829   Object* result;
2830   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2831     if (!maybe_result->ToObject(&result)) return maybe_result;
2832   }
2833   JSMessageObject* message = JSMessageObject::cast(result);
2834   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
2835   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
2836   message->set_type(type);
2837   message->set_arguments(arguments);
2838   message->set_start_position(start_position);
2839   message->set_end_position(end_position);
2840   message->set_script(script);
2841   message->set_stack_trace(stack_trace);
2842   message->set_stack_frames(stack_frames);
2843   return result;
2844 }
2845
2846
2847
2848 // Returns true for a character in a range.  Both limits are inclusive.
2849 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2850   // This makes uses of the the unsigned wraparound.
2851   return character - from <= to - from;
2852 }
2853
2854
2855 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2856     Heap* heap,
2857     uint32_t c1,
2858     uint32_t c2) {
2859   String* symbol;
2860   // Numeric strings have a different hash algorithm not known by
2861   // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2862   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2863       heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2864     return symbol;
2865   // Now we know the length is 2, we might as well make use of that fact
2866   // when building the new string.
2867   } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) {  // We can do this
2868     ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1));  // because of this.
2869     Object* result;
2870     { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
2871       if (!maybe_result->ToObject(&result)) return maybe_result;
2872     }
2873     char* dest = SeqAsciiString::cast(result)->GetChars();
2874     dest[0] = c1;
2875     dest[1] = c2;
2876     return result;
2877   } else {
2878     Object* result;
2879     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
2880       if (!maybe_result->ToObject(&result)) return maybe_result;
2881     }
2882     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2883     dest[0] = c1;
2884     dest[1] = c2;
2885     return result;
2886   }
2887 }
2888
2889
2890 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
2891   int first_length = first->length();
2892   if (first_length == 0) {
2893     return second;
2894   }
2895
2896   int second_length = second->length();
2897   if (second_length == 0) {
2898     return first;
2899   }
2900
2901   int length = first_length + second_length;
2902
2903   // Optimization for 2-byte strings often used as keys in a decompression
2904   // dictionary.  Check whether we already have the string in the symbol
2905   // table to prevent creation of many unneccesary strings.
2906   if (length == 2) {
2907     unsigned c1 = first->Get(0);
2908     unsigned c2 = second->Get(0);
2909     return MakeOrFindTwoCharacterString(this, c1, c2);
2910   }
2911
2912   bool first_is_ascii = first->IsAsciiRepresentation();
2913   bool second_is_ascii = second->IsAsciiRepresentation();
2914   bool is_ascii = first_is_ascii && second_is_ascii;
2915
2916   // Make sure that an out of memory exception is thrown if the length
2917   // of the new cons string is too large.
2918   if (length > String::kMaxLength || length < 0) {
2919     isolate()->context()->mark_out_of_memory();
2920     return Failure::OutOfMemoryException();
2921   }
2922
2923   bool is_ascii_data_in_two_byte_string = false;
2924   if (!is_ascii) {
2925     // At least one of the strings uses two-byte representation so we
2926     // can't use the fast case code for short ascii strings below, but
2927     // we can try to save memory if all chars actually fit in ascii.
2928     is_ascii_data_in_two_byte_string =
2929         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2930     if (is_ascii_data_in_two_byte_string) {
2931       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2932     }
2933   }
2934
2935   // If the resulting string is small make a flat string.
2936   if (length < String::kMinNonFlatLength) {
2937     // Note that neither of the two inputs can be a slice because:
2938     STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
2939     ASSERT(first->IsFlat());
2940     ASSERT(second->IsFlat());
2941     if (is_ascii) {
2942       Object* result;
2943       { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2944         if (!maybe_result->ToObject(&result)) return maybe_result;
2945       }
2946       // Copy the characters into the new object.
2947       char* dest = SeqAsciiString::cast(result)->GetChars();
2948       // Copy first part.
2949       const char* src;
2950       if (first->IsExternalString()) {
2951         src = ExternalAsciiString::cast(first)->GetChars();
2952       } else {
2953         src = SeqAsciiString::cast(first)->GetChars();
2954       }
2955       for (int i = 0; i < first_length; i++) *dest++ = src[i];
2956       // Copy second part.
2957       if (second->IsExternalString()) {
2958         src = ExternalAsciiString::cast(second)->GetChars();
2959       } else {
2960         src = SeqAsciiString::cast(second)->GetChars();
2961       }
2962       for (int i = 0; i < second_length; i++) *dest++ = src[i];
2963       return result;
2964     } else {
2965       if (is_ascii_data_in_two_byte_string) {
2966         Object* result;
2967         { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2968           if (!maybe_result->ToObject(&result)) return maybe_result;
2969         }
2970         // Copy the characters into the new object.
2971         char* dest = SeqAsciiString::cast(result)->GetChars();
2972         String::WriteToFlat(first, dest, 0, first_length);
2973         String::WriteToFlat(second, dest + first_length, 0, second_length);
2974         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2975         return result;
2976       }
2977
2978       Object* result;
2979       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2980         if (!maybe_result->ToObject(&result)) return maybe_result;
2981       }
2982       // Copy the characters into the new object.
2983       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2984       String::WriteToFlat(first, dest, 0, first_length);
2985       String::WriteToFlat(second, dest + first_length, 0, second_length);
2986       return result;
2987     }
2988   }
2989
2990   Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2991       cons_ascii_string_map() : cons_string_map();
2992
2993   Object* result;
2994   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2995     if (!maybe_result->ToObject(&result)) return maybe_result;
2996   }
2997
2998   AssertNoAllocation no_gc;
2999   ConsString* cons_string = ConsString::cast(result);
3000   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3001   cons_string->set_length(length);
3002   cons_string->set_hash_field(String::kEmptyHashField);
3003   cons_string->set_first(first, mode);
3004   cons_string->set_second(second, mode);
3005   return result;
3006 }
3007
3008
3009 MaybeObject* Heap::AllocateSubString(String* buffer,
3010                                      int start,
3011                                      int end,
3012                                      PretenureFlag pretenure) {
3013   int length = end - start;
3014   if (length == 0) {
3015     return empty_string();
3016   } else if (length == 1) {
3017     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3018   } else if (length == 2) {
3019     // Optimization for 2-byte strings often used as keys in a decompression
3020     // dictionary.  Check whether we already have the string in the symbol
3021     // table to prevent creation of many unneccesary strings.
3022     unsigned c1 = buffer->Get(start);
3023     unsigned c2 = buffer->Get(start + 1);
3024     return MakeOrFindTwoCharacterString(this, c1, c2);
3025   }
3026
3027   // Make an attempt to flatten the buffer to reduce access time.
3028   buffer = buffer->TryFlattenGetString();
3029
3030   if (!FLAG_string_slices ||
3031       !buffer->IsFlat() ||
3032       length < SlicedString::kMinLength ||
3033       pretenure == TENURED) {
3034     Object* result;
3035     // WriteToFlat takes care of the case when an indirect string has a
3036     // different encoding from its underlying string.  These encodings may
3037     // differ because of externalization.
3038     bool is_ascii = buffer->IsAsciiRepresentation();
3039     { MaybeObject* maybe_result = is_ascii
3040                                   ? AllocateRawAsciiString(length, pretenure)
3041                                   : AllocateRawTwoByteString(length, pretenure);
3042       if (!maybe_result->ToObject(&result)) return maybe_result;
3043     }
3044     String* string_result = String::cast(result);
3045     // Copy the characters into the new object.
3046     if (is_ascii) {
3047       ASSERT(string_result->IsAsciiRepresentation());
3048       char* dest = SeqAsciiString::cast(string_result)->GetChars();
3049       String::WriteToFlat(buffer, dest, start, end);
3050     } else {
3051       ASSERT(string_result->IsTwoByteRepresentation());
3052       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3053       String::WriteToFlat(buffer, dest, start, end);
3054     }
3055     return result;
3056   }
3057
3058   ASSERT(buffer->IsFlat());
3059 #if DEBUG
3060   if (FLAG_verify_heap) {
3061     buffer->StringVerify();
3062   }
3063 #endif
3064
3065   Object* result;
3066   // When slicing an indirect string we use its encoding for a newly created
3067   // slice and don't check the encoding of the underlying string.  This is safe
3068   // even if the encodings are different because of externalization.  If an
3069   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3070   // codes of the underlying string must still fit into ASCII (because
3071   // externalization must not change char codes).
3072   { Map* map = buffer->IsAsciiRepresentation()
3073                  ? sliced_ascii_string_map()
3074                  : sliced_string_map();
3075     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3076     if (!maybe_result->ToObject(&result)) return maybe_result;
3077   }
3078
3079   AssertNoAllocation no_gc;
3080   SlicedString* sliced_string = SlicedString::cast(result);
3081   sliced_string->set_length(length);
3082   sliced_string->set_hash_field(String::kEmptyHashField);
3083   if (buffer->IsConsString()) {
3084     ConsString* cons = ConsString::cast(buffer);
3085     ASSERT(cons->second()->length() == 0);
3086     sliced_string->set_parent(cons->first());
3087     sliced_string->set_offset(start);
3088   } else if (buffer->IsSlicedString()) {
3089     // Prevent nesting sliced strings.
3090     SlicedString* parent_slice = SlicedString::cast(buffer);
3091     sliced_string->set_parent(parent_slice->parent());
3092     sliced_string->set_offset(start + parent_slice->offset());
3093   } else {
3094     sliced_string->set_parent(buffer);
3095     sliced_string->set_offset(start);
3096   }
3097   ASSERT(sliced_string->parent()->IsSeqString() ||
3098          sliced_string->parent()->IsExternalString());
3099   return result;
3100 }
3101
3102
3103 MaybeObject* Heap::AllocateExternalStringFromAscii(
3104     const ExternalAsciiString::Resource* resource) {
3105   size_t length = resource->length();
3106   if (length > static_cast<size_t>(String::kMaxLength)) {
3107     isolate()->context()->mark_out_of_memory();
3108     return Failure::OutOfMemoryException();
3109   }
3110
3111   Map* map = external_ascii_string_map();
3112   Object* result;
3113   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3114     if (!maybe_result->ToObject(&result)) return maybe_result;
3115   }
3116
3117   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3118   external_string->set_length(static_cast<int>(length));
3119   external_string->set_hash_field(String::kEmptyHashField);
3120   external_string->set_resource(resource);
3121
3122   return result;
3123 }
3124
3125
3126 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3127     const ExternalTwoByteString::Resource* resource) {
3128   size_t length = resource->length();
3129   if (length > static_cast<size_t>(String::kMaxLength)) {
3130     isolate()->context()->mark_out_of_memory();
3131     return Failure::OutOfMemoryException();
3132   }
3133
3134   // For small strings we check whether the resource contains only
3135   // ASCII characters.  If yes, we use a different string map.
3136   static const size_t kAsciiCheckLengthLimit = 32;
3137   bool is_ascii = length <= kAsciiCheckLengthLimit &&
3138       String::IsAscii(resource->data(), static_cast<int>(length));
3139   Map* map = is_ascii ?
3140       external_string_with_ascii_data_map() : external_string_map();
3141   Object* result;
3142   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3143     if (!maybe_result->ToObject(&result)) return maybe_result;
3144   }
3145
3146   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3147   external_string->set_length(static_cast<int>(length));
3148   external_string->set_hash_field(String::kEmptyHashField);
3149   external_string->set_resource(resource);
3150
3151   return result;
3152 }
3153
3154
3155 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3156   if (code <= String::kMaxAsciiCharCode) {
3157     Object* value = single_character_string_cache()->get(code);
3158     if (value != undefined_value()) return value;
3159
3160     char buffer[1];
3161     buffer[0] = static_cast<char>(code);
3162     Object* result;
3163     MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3164
3165     if (!maybe_result->ToObject(&result)) return maybe_result;
3166     single_character_string_cache()->set(code, result);
3167     return result;
3168   }
3169
3170   Object* result;
3171   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3172     if (!maybe_result->ToObject(&result)) return maybe_result;
3173   }
3174   String* answer = String::cast(result);
3175   answer->Set(0, code);
3176   return answer;
3177 }
3178
3179
3180 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3181   if (length < 0 || length > ByteArray::kMaxLength) {
3182     return Failure::OutOfMemoryException();
3183   }
3184   if (pretenure == NOT_TENURED) {
3185     return AllocateByteArray(length);
3186   }
3187   int size = ByteArray::SizeFor(length);
3188   Object* result;
3189   { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
3190                    ? old_data_space_->AllocateRaw(size)
3191                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3192     if (!maybe_result->ToObject(&result)) return maybe_result;
3193   }
3194
3195   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3196       byte_array_map());
3197   reinterpret_cast<ByteArray*>(result)->set_length(length);
3198   return result;
3199 }
3200
3201
3202 MaybeObject* Heap::AllocateByteArray(int length) {
3203   if (length < 0 || length > ByteArray::kMaxLength) {
3204     return Failure::OutOfMemoryException();
3205   }
3206   int size = ByteArray::SizeFor(length);
3207   AllocationSpace space =
3208       (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
3209   Object* result;
3210   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3211     if (!maybe_result->ToObject(&result)) return maybe_result;
3212   }
3213
3214   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3215       byte_array_map());
3216   reinterpret_cast<ByteArray*>(result)->set_length(length);
3217   return result;
3218 }
3219
3220
3221 void Heap::CreateFillerObjectAt(Address addr, int size) {
3222   if (size == 0) return;
3223   HeapObject* filler = HeapObject::FromAddress(addr);
3224   if (size == kPointerSize) {
3225     filler->set_map_no_write_barrier(one_pointer_filler_map());
3226   } else if (size == 2 * kPointerSize) {
3227     filler->set_map_no_write_barrier(two_pointer_filler_map());
3228   } else {
3229     filler->set_map_no_write_barrier(free_space_map());
3230     FreeSpace::cast(filler)->set_size(size);
3231   }
3232 }
3233
3234
3235 MaybeObject* Heap::AllocateExternalArray(int length,
3236                                          ExternalArrayType array_type,
3237                                          void* external_pointer,
3238                                          PretenureFlag pretenure) {
3239   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3240   Object* result;
3241   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3242                                             space,
3243                                             OLD_DATA_SPACE);
3244     if (!maybe_result->ToObject(&result)) return maybe_result;
3245   }
3246
3247   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3248       MapForExternalArrayType(array_type));
3249   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3250   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3251       external_pointer);
3252
3253   return result;
3254 }
3255
3256
3257 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3258                               Code::Flags flags,
3259                               Handle<Object> self_reference,
3260                               bool immovable) {
3261   // Allocate ByteArray before the Code object, so that we do not risk
3262   // leaving uninitialized Code object (and breaking the heap).
3263   ByteArray* reloc_info;
3264   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3265   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3266
3267   // Compute size.
3268   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3269   int obj_size = Code::SizeFor(body_size);
3270   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3271   MaybeObject* maybe_result;
3272   // Large code objects and code objects which should stay at a fixed address
3273   // are allocated in large object space.
3274   if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
3275     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3276   } else {
3277     maybe_result = code_space_->AllocateRaw(obj_size);
3278   }
3279
3280   Object* result;
3281   if (!maybe_result->ToObject(&result)) return maybe_result;
3282
3283   // Initialize the object
3284   HeapObject::cast(result)->set_map_no_write_barrier(code_map());
3285   Code* code = Code::cast(result);
3286   ASSERT(!isolate_->code_range()->exists() ||
3287       isolate_->code_range()->contains(code->address()));
3288   code->set_instruction_size(desc.instr_size);
3289   code->set_relocation_info(reloc_info);
3290   code->set_flags(flags);
3291   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3292     code->set_check_type(RECEIVER_MAP_CHECK);
3293   }
3294   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3295   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3296   code->set_next_code_flushing_candidate(undefined_value());
3297   // Allow self references to created code object by patching the handle to
3298   // point to the newly allocated Code object.
3299   if (!self_reference.is_null()) {
3300     *(self_reference.location()) = code;
3301   }
3302   // Migrate generated code.
3303   // The generated code can contain Object** values (typically from handles)
3304   // that are dereferenced during the copy to point directly to the actual heap
3305   // objects. These pointers can include references to the code object itself,
3306   // through the self_reference parameter.
3307   code->CopyFrom(desc);
3308
3309 #ifdef DEBUG
3310   if (FLAG_verify_heap) {
3311     code->Verify();
3312   }
3313 #endif
3314   return code;
3315 }
3316
3317
3318 MaybeObject* Heap::CopyCode(Code* code) {
3319   // Allocate an object the same size as the code object.
3320   int obj_size = code->Size();
3321   MaybeObject* maybe_result;
3322   if (obj_size > MaxObjectSizeInPagedSpace()) {
3323     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3324   } else {
3325     maybe_result = code_space_->AllocateRaw(obj_size);
3326   }
3327
3328   Object* result;
3329   if (!maybe_result->ToObject(&result)) return maybe_result;
3330
3331   // Copy code object.
3332   Address old_addr = code->address();
3333   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3334   CopyBlock(new_addr, old_addr, obj_size);
3335   // Relocate the copy.
3336   Code* new_code = Code::cast(result);
3337   ASSERT(!isolate_->code_range()->exists() ||
3338       isolate_->code_range()->contains(code->address()));
3339   new_code->Relocate(new_addr - old_addr);
3340   return new_code;
3341 }
3342
3343
3344 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3345   // Allocate ByteArray before the Code object, so that we do not risk
3346   // leaving uninitialized Code object (and breaking the heap).
3347   Object* reloc_info_array;
3348   { MaybeObject* maybe_reloc_info_array =
3349         AllocateByteArray(reloc_info.length(), TENURED);
3350     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3351       return maybe_reloc_info_array;
3352     }
3353   }
3354
3355   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3356
3357   int new_obj_size = Code::SizeFor(new_body_size);
3358
3359   Address old_addr = code->address();
3360
3361   size_t relocation_offset =
3362       static_cast<size_t>(code->instruction_end() - old_addr);
3363
3364   MaybeObject* maybe_result;
3365   if (new_obj_size > MaxObjectSizeInPagedSpace()) {
3366     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3367   } else {
3368     maybe_result = code_space_->AllocateRaw(new_obj_size);
3369   }
3370
3371   Object* result;
3372   if (!maybe_result->ToObject(&result)) return maybe_result;
3373
3374   // Copy code object.
3375   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3376
3377   // Copy header and instructions.
3378   memcpy(new_addr, old_addr, relocation_offset);
3379
3380   Code* new_code = Code::cast(result);
3381   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3382
3383   // Copy patched rinfo.
3384   memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3385
3386   // Relocate the copy.
3387   ASSERT(!isolate_->code_range()->exists() ||
3388       isolate_->code_range()->contains(code->address()));
3389   new_code->Relocate(new_addr - old_addr);
3390
3391 #ifdef DEBUG
3392   if (FLAG_verify_heap) {
3393     code->Verify();
3394   }
3395 #endif
3396   return new_code;
3397 }
3398
3399
3400 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3401   ASSERT(gc_state_ == NOT_IN_GC);
3402   ASSERT(map->instance_type() != MAP_TYPE);
3403   // If allocation failures are disallowed, we may allocate in a different
3404   // space when new space is full and the object is not a large object.
3405   AllocationSpace retry_space =
3406       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3407   Object* result;
3408   { MaybeObject* maybe_result =
3409         AllocateRaw(map->instance_size(), space, retry_space);
3410     if (!maybe_result->ToObject(&result)) return maybe_result;
3411   }
3412   // No need for write barrier since object is white and map is in old space.
3413   HeapObject::cast(result)->set_map_no_write_barrier(map);
3414   return result;
3415 }
3416
3417
3418 void Heap::InitializeFunction(JSFunction* function,
3419                               SharedFunctionInfo* shared,
3420                               Object* prototype) {
3421   ASSERT(!prototype->IsMap());
3422   function->initialize_properties();
3423   function->initialize_elements();
3424   function->set_shared(shared);
3425   function->set_code(shared->code());
3426   function->set_prototype_or_initial_map(prototype);
3427   function->set_context(undefined_value());
3428   function->set_literals_or_bindings(empty_fixed_array());
3429   function->set_next_function_link(undefined_value());
3430 }
3431
3432
3433 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3434   // Allocate the prototype.  Make sure to use the object function
3435   // from the function's context, since the function can be from a
3436   // different context.
3437   JSFunction* object_function =
3438       function->context()->global_context()->object_function();
3439
3440   // Each function prototype gets a copy of the object function map.
3441   // This avoid unwanted sharing of maps between prototypes of different
3442   // constructors.
3443   Map* new_map;
3444   ASSERT(object_function->has_initial_map());
3445   { MaybeObject* maybe_map =
3446         object_function->initial_map()->CopyDropTransitions();
3447     if (!maybe_map->To<Map>(&new_map)) return maybe_map;
3448   }
3449   Object* prototype;
3450   { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3451     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3452   }
3453   // When creating the prototype for the function we must set its
3454   // constructor to the function.
3455   Object* result;
3456   { MaybeObject* maybe_result =
3457         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3458             constructor_symbol(), function, DONT_ENUM);
3459     if (!maybe_result->ToObject(&result)) return maybe_result;
3460   }
3461   return prototype;
3462 }
3463
3464
3465 MaybeObject* Heap::AllocateFunction(Map* function_map,
3466                                     SharedFunctionInfo* shared,
3467                                     Object* prototype,
3468                                     PretenureFlag pretenure) {
3469   AllocationSpace space =
3470       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3471   Object* result;
3472   { MaybeObject* maybe_result = Allocate(function_map, space);
3473     if (!maybe_result->ToObject(&result)) return maybe_result;
3474   }
3475   InitializeFunction(JSFunction::cast(result), shared, prototype);
3476   return result;
3477 }
3478
3479
3480 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3481   // To get fast allocation and map sharing for arguments objects we
3482   // allocate them based on an arguments boilerplate.
3483
3484   JSObject* boilerplate;
3485   int arguments_object_size;
3486   bool strict_mode_callee = callee->IsJSFunction() &&
3487       !JSFunction::cast(callee)->shared()->is_classic_mode();
3488   if (strict_mode_callee) {
3489     boilerplate =
3490         isolate()->context()->global_context()->
3491             strict_mode_arguments_boilerplate();
3492     arguments_object_size = kArgumentsObjectSizeStrict;
3493   } else {
3494     boilerplate =
3495         isolate()->context()->global_context()->arguments_boilerplate();
3496     arguments_object_size = kArgumentsObjectSize;
3497   }
3498
3499   // This calls Copy directly rather than using Heap::AllocateRaw so we
3500   // duplicate the check here.
3501   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3502
3503   // Check that the size of the boilerplate matches our
3504   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3505   // on the size being a known constant.
3506   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3507
3508   // Do the allocation.
3509   Object* result;
3510   { MaybeObject* maybe_result =
3511         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3512     if (!maybe_result->ToObject(&result)) return maybe_result;
3513   }
3514
3515   // Copy the content. The arguments boilerplate doesn't have any
3516   // fields that point to new space so it's safe to skip the write
3517   // barrier here.
3518   CopyBlock(HeapObject::cast(result)->address(),
3519             boilerplate->address(),
3520             JSObject::kHeaderSize);
3521
3522   // Set the length property.
3523   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3524                                                 Smi::FromInt(length),
3525                                                 SKIP_WRITE_BARRIER);
3526   // Set the callee property for non-strict mode arguments object only.
3527   if (!strict_mode_callee) {
3528     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3529                                                   callee);
3530   }
3531
3532   // Check the state of the object
3533   ASSERT(JSObject::cast(result)->HasFastProperties());
3534   ASSERT(JSObject::cast(result)->HasFastElements());
3535
3536   return result;
3537 }
3538
3539
3540 static bool HasDuplicates(DescriptorArray* descriptors) {
3541   int count = descriptors->number_of_descriptors();
3542   if (count > 1) {
3543     String* prev_key = descriptors->GetKey(0);
3544     for (int i = 1; i != count; i++) {
3545       String* current_key = descriptors->GetKey(i);
3546       if (prev_key == current_key) return true;
3547       prev_key = current_key;
3548     }
3549   }
3550   return false;
3551 }
3552
3553
3554 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3555   ASSERT(!fun->has_initial_map());
3556
3557   // First create a new map with the size and number of in-object properties
3558   // suggested by the function.
3559   int instance_size = fun->shared()->CalculateInstanceSize();
3560   int in_object_properties = fun->shared()->CalculateInObjectProperties();
3561   Object* map_obj;
3562   { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
3563     if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3564   }
3565
3566   // Fetch or allocate prototype.
3567   Object* prototype;
3568   if (fun->has_instance_prototype()) {
3569     prototype = fun->instance_prototype();
3570   } else {
3571     { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3572       if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3573     }
3574   }
3575   Map* map = Map::cast(map_obj);
3576   map->set_inobject_properties(in_object_properties);
3577   map->set_unused_property_fields(in_object_properties);
3578   map->set_prototype(prototype);
3579   ASSERT(map->has_fast_elements());
3580
3581   // If the function has only simple this property assignments add
3582   // field descriptors for these to the initial map as the object
3583   // cannot be constructed without having these properties.  Guard by
3584   // the inline_new flag so we only change the map if we generate a
3585   // specialized construct stub.
3586   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3587   if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3588     int count = fun->shared()->this_property_assignments_count();
3589     if (count > in_object_properties) {
3590       // Inline constructor can only handle inobject properties.
3591       fun->shared()->ForbidInlineConstructor();
3592     } else {
3593       DescriptorArray* descriptors;
3594       { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3595         if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
3596           return maybe_descriptors_obj;
3597         }
3598       }
3599       DescriptorArray::WhitenessWitness witness(descriptors);
3600       for (int i = 0; i < count; i++) {
3601         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3602         ASSERT(name->IsSymbol());
3603         FieldDescriptor field(name, i, NONE);
3604         field.SetEnumerationIndex(i);
3605         descriptors->Set(i, &field, witness);
3606       }
3607       descriptors->SetNextEnumerationIndex(count);
3608       descriptors->SortUnchecked(witness);
3609
3610       // The descriptors may contain duplicates because the compiler does not
3611       // guarantee the uniqueness of property names (it would have required
3612       // quadratic time). Once the descriptors are sorted we can check for
3613       // duplicates in linear time.
3614       if (HasDuplicates(descriptors)) {
3615         fun->shared()->ForbidInlineConstructor();
3616       } else {
3617         map->set_instance_descriptors(descriptors);
3618         map->set_pre_allocated_property_fields(count);
3619         map->set_unused_property_fields(in_object_properties - count);
3620       }
3621     }
3622   }
3623
3624   fun->shared()->StartInobjectSlackTracking(map);
3625
3626   return map;
3627 }
3628
3629
3630 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3631                                      FixedArray* properties,
3632                                      Map* map) {
3633   obj->set_properties(properties);
3634   obj->initialize_elements();
3635   // TODO(1240798): Initialize the object's body using valid initial values
3636   // according to the object's initial map.  For example, if the map's
3637   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3638   // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3639   // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
3640   // verification code has to cope with (temporarily) invalid objects.  See
3641   // for example, JSArray::JSArrayVerify).
3642   Object* filler;
3643   // We cannot always fill with one_pointer_filler_map because objects
3644   // created from API functions expect their internal fields to be initialized
3645   // with undefined_value.
3646   // Pre-allocated fields need to be initialized with undefined_value as well
3647   // so that object accesses before the constructor completes (e.g. in the
3648   // debugger) will not cause a crash.
3649   if (map->constructor()->IsJSFunction() &&
3650       JSFunction::cast(map->constructor())->shared()->
3651           IsInobjectSlackTrackingInProgress()) {
3652     // We might want to shrink the object later.
3653     ASSERT(obj->GetInternalFieldCount() == 0);
3654     filler = Heap::one_pointer_filler_map();
3655   } else {
3656     filler = Heap::undefined_value();
3657   }
3658   obj->InitializeBody(map, Heap::undefined_value(), filler);
3659 }
3660
3661
3662 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3663   // JSFunctions should be allocated using AllocateFunction to be
3664   // properly initialized.
3665   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3666
3667   // Both types of global objects should be allocated using
3668   // AllocateGlobalObject to be properly initialized.
3669   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3670   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3671
3672   // Allocate the backing storage for the properties.
3673   int prop_size =
3674       map->pre_allocated_property_fields() +
3675       map->unused_property_fields() -
3676       map->inobject_properties();
3677   ASSERT(prop_size >= 0);
3678   Object* properties;
3679   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3680     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3681   }
3682
3683   // Allocate the JSObject.
3684   AllocationSpace space =
3685       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3686   if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3687   Object* obj;
3688   { MaybeObject* maybe_obj = Allocate(map, space);
3689     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3690   }
3691
3692   // Initialize the JSObject.
3693   InitializeJSObjectFromMap(JSObject::cast(obj),
3694                             FixedArray::cast(properties),
3695                             map);
3696   ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
3697          JSObject::cast(obj)->HasFastElements());
3698   return obj;
3699 }
3700
3701
3702 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3703                                     PretenureFlag pretenure) {
3704   // Allocate the initial map if absent.
3705   if (!constructor->has_initial_map()) {
3706     Object* initial_map;
3707     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3708       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3709     }
3710     constructor->set_initial_map(Map::cast(initial_map));
3711     Map::cast(initial_map)->set_constructor(constructor);
3712   }
3713   // Allocate the object based on the constructors initial map.
3714   MaybeObject* result =
3715       AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
3716 #ifdef DEBUG
3717   // Make sure result is NOT a global object if valid.
3718   Object* non_failure;
3719   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3720 #endif
3721   return result;
3722 }
3723
3724
3725 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3726   // Allocate map.
3727   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3728   // maps. Will probably depend on the identity of the handler object, too.
3729   Map* map;
3730   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3731   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3732   map->set_prototype(prototype);
3733
3734   // Allocate the proxy object.
3735   JSProxy* result;
3736   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3737   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
3738   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3739   result->set_handler(handler);
3740   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
3741   return result;
3742 }
3743
3744
3745 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
3746                                            Object* call_trap,
3747                                            Object* construct_trap,
3748                                            Object* prototype) {
3749   // Allocate map.
3750   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3751   // maps. Will probably depend on the identity of the handler object, too.
3752   Map* map;
3753   MaybeObject* maybe_map_obj =
3754       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
3755   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3756   map->set_prototype(prototype);
3757
3758   // Allocate the proxy object.
3759   JSFunctionProxy* result;
3760   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3761   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
3762   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3763   result->set_handler(handler);
3764   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
3765   result->set_call_trap(call_trap);
3766   result->set_construct_trap(construct_trap);
3767   return result;
3768 }
3769
3770
3771 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
3772   ASSERT(constructor->has_initial_map());
3773   Map* map = constructor->initial_map();
3774
3775   // Make sure no field properties are described in the initial map.
3776   // This guarantees us that normalizing the properties does not
3777   // require us to change property values to JSGlobalPropertyCells.
3778   ASSERT(map->NextFreePropertyIndex() == 0);
3779
3780   // Make sure we don't have a ton of pre-allocated slots in the
3781   // global objects. They will be unused once we normalize the object.
3782   ASSERT(map->unused_property_fields() == 0);
3783   ASSERT(map->inobject_properties() == 0);
3784
3785   // Initial size of the backing store to avoid resize of the storage during
3786   // bootstrapping. The size differs between the JS global object ad the
3787   // builtins object.
3788   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3789
3790   // Allocate a dictionary object for backing storage.
3791   Object* obj;
3792   { MaybeObject* maybe_obj =
3793         StringDictionary::Allocate(
3794             map->NumberOfDescribedProperties() * 2 + initial_size);
3795     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3796   }
3797   StringDictionary* dictionary = StringDictionary::cast(obj);
3798
3799   // The global object might be created from an object template with accessors.
3800   // Fill these accessors into the dictionary.
3801   DescriptorArray* descs = map->instance_descriptors();
3802   for (int i = 0; i < descs->number_of_descriptors(); i++) {
3803     PropertyDetails details(descs->GetDetails(i));
3804     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
3805     PropertyDetails d =
3806         PropertyDetails(details.attributes(), CALLBACKS, details.index());
3807     Object* value = descs->GetCallbacksObject(i);
3808     { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
3809       if (!maybe_value->ToObject(&value)) return maybe_value;
3810     }
3811
3812     Object* result;
3813     { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3814       if (!maybe_result->ToObject(&result)) return maybe_result;
3815     }
3816     dictionary = StringDictionary::cast(result);
3817   }
3818
3819   // Allocate the global object and initialize it with the backing store.
3820   { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3821     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3822   }
3823   JSObject* global = JSObject::cast(obj);
3824   InitializeJSObjectFromMap(global, dictionary, map);
3825
3826   // Create a new map for the global object.
3827   { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3828     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3829   }
3830   Map* new_map = Map::cast(obj);
3831
3832   // Set up the global object as a normalized object.
3833   global->set_map(new_map);
3834   global->map()->clear_instance_descriptors();
3835   global->set_properties(dictionary);
3836
3837   // Make sure result is a global object with properties in dictionary.
3838   ASSERT(global->IsGlobalObject());
3839   ASSERT(!global->HasFastProperties());
3840   return global;
3841 }
3842
3843
3844 MaybeObject* Heap::CopyJSObject(JSObject* source) {
3845   // Never used to copy functions.  If functions need to be copied we
3846   // have to be careful to clear the literals array.
3847   SLOW_ASSERT(!source->IsJSFunction());
3848
3849   // Make the clone.
3850   Map* map = source->map();
3851   int object_size = map->instance_size();
3852   Object* clone;
3853
3854   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3855
3856   // If we're forced to always allocate, we use the general allocation
3857   // functions which may leave us with an object in old space.
3858   if (always_allocate()) {
3859     { MaybeObject* maybe_clone =
3860           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3861       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3862     }
3863     Address clone_address = HeapObject::cast(clone)->address();
3864     CopyBlock(clone_address,
3865               source->address(),
3866               object_size);
3867     // Update write barrier for all fields that lie beyond the header.
3868     RecordWrites(clone_address,
3869                  JSObject::kHeaderSize,
3870                  (object_size - JSObject::kHeaderSize) / kPointerSize);
3871   } else {
3872     wb_mode = SKIP_WRITE_BARRIER;
3873     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3874       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3875     }
3876     SLOW_ASSERT(InNewSpace(clone));
3877     // Since we know the clone is allocated in new space, we can copy
3878     // the contents without worrying about updating the write barrier.
3879     CopyBlock(HeapObject::cast(clone)->address(),
3880               source->address(),
3881               object_size);
3882   }
3883
3884   SLOW_ASSERT(
3885       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3886   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3887   FixedArray* properties = FixedArray::cast(source->properties());
3888   // Update elements if necessary.
3889   if (elements->length() > 0) {
3890     Object* elem;
3891     { MaybeObject* maybe_elem;
3892       if (elements->map() == fixed_cow_array_map()) {
3893         maybe_elem = FixedArray::cast(elements);
3894       } else if (source->HasFastDoubleElements()) {
3895         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3896       } else {
3897         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
3898       }
3899       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3900     }
3901     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
3902   }
3903   // Update properties if necessary.
3904   if (properties->length() > 0) {
3905     Object* prop;
3906     { MaybeObject* maybe_prop = CopyFixedArray(properties);
3907       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3908     }
3909     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
3910   }
3911   // Return the new clone.
3912   return clone;
3913 }
3914
3915
3916 MaybeObject* Heap::ReinitializeJSReceiver(
3917     JSReceiver* object, InstanceType type, int size) {
3918   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
3919
3920   // Allocate fresh map.
3921   // TODO(rossberg): Once we optimize proxies, cache these maps.
3922   Map* map;
3923   MaybeObject* maybe = AllocateMap(type, size);
3924   if (!maybe->To<Map>(&map)) return maybe;
3925
3926   // Check that the receiver has at least the size of the fresh object.
3927   int size_difference = object->map()->instance_size() - map->instance_size();
3928   ASSERT(size_difference >= 0);
3929
3930   map->set_prototype(object->map()->prototype());
3931
3932   // Allocate the backing storage for the properties.
3933   int prop_size = map->unused_property_fields() - map->inobject_properties();
3934   Object* properties;
3935   maybe = AllocateFixedArray(prop_size, TENURED);
3936   if (!maybe->ToObject(&properties)) return maybe;
3937
3938   // Functions require some allocation, which might fail here.
3939   SharedFunctionInfo* shared = NULL;
3940   if (type == JS_FUNCTION_TYPE) {
3941     String* name;
3942     maybe = LookupAsciiSymbol("<freezing call trap>");
3943     if (!maybe->To<String>(&name)) return maybe;
3944     maybe = AllocateSharedFunctionInfo(name);
3945     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
3946   }
3947
3948   // Because of possible retries of this function after failure,
3949   // we must NOT fail after this point, where we have changed the type!
3950
3951   // Reset the map for the object.
3952   object->set_map(map);
3953   JSObject* jsobj = JSObject::cast(object);
3954
3955   // Reinitialize the object from the constructor map.
3956   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
3957
3958   // Functions require some minimal initialization.
3959   if (type == JS_FUNCTION_TYPE) {
3960     map->set_function_with_prototype(true);
3961     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3962     JSFunction::cast(object)->set_context(
3963         isolate()->context()->global_context());
3964   }
3965
3966   // Put in filler if the new object is smaller than the old.
3967   if (size_difference > 0) {
3968     CreateFillerObjectAt(
3969         object->address() + map->instance_size(), size_difference);
3970   }
3971
3972   return object;
3973 }
3974
3975
3976 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3977                                              JSGlobalProxy* object) {
3978   ASSERT(constructor->has_initial_map());
3979   Map* map = constructor->initial_map();
3980
3981   // Check that the already allocated object has the same size and type as
3982   // objects allocated using the constructor.
3983   ASSERT(map->instance_size() == object->map()->instance_size());
3984   ASSERT(map->instance_type() == object->map()->instance_type());
3985
3986   // Allocate the backing storage for the properties.
3987   int prop_size = map->unused_property_fields() - map->inobject_properties();
3988   Object* properties;
3989   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3990     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3991   }
3992
3993   // Reset the map for the object.
3994   object->set_map(constructor->initial_map());
3995
3996   // Reinitialize the object from the constructor map.
3997   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3998   return object;
3999 }
4000
4001
4002 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
4003                                            PretenureFlag pretenure) {
4004   if (string.length() == 1) {
4005     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4006   }
4007   Object* result;
4008   { MaybeObject* maybe_result =
4009         AllocateRawAsciiString(string.length(), pretenure);
4010     if (!maybe_result->ToObject(&result)) return maybe_result;
4011   }
4012
4013   // Copy the characters into the new object.
4014   SeqAsciiString* string_result = SeqAsciiString::cast(result);
4015   for (int i = 0; i < string.length(); i++) {
4016     string_result->SeqAsciiStringSet(i, string[i]);
4017   }
4018   return result;
4019 }
4020
4021
4022 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4023                                               PretenureFlag pretenure) {
4024   // V8 only supports characters in the Basic Multilingual Plane.
4025   const uc32 kMaxSupportedChar = 0xFFFF;
4026   // Count the number of characters in the UTF-8 string and check if
4027   // it is an ASCII string.
4028   Access<UnicodeCache::Utf8Decoder>
4029       decoder(isolate_->unicode_cache()->utf8_decoder());
4030   decoder->Reset(string.start(), string.length());
4031   int chars = 0;
4032   while (decoder->has_more()) {
4033     decoder->GetNext();
4034     chars++;
4035   }
4036
4037   Object* result;
4038   { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4039     if (!maybe_result->ToObject(&result)) return maybe_result;
4040   }
4041
4042   // Convert and copy the characters into the new object.
4043   String* string_result = String::cast(result);
4044   decoder->Reset(string.start(), string.length());
4045   for (int i = 0; i < chars; i++) {
4046     uc32 r = decoder->GetNext();
4047     if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
4048     string_result->Set(i, r);
4049   }
4050   return result;
4051 }
4052
4053
4054 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4055                                              PretenureFlag pretenure) {
4056   // Check if the string is an ASCII string.
4057   MaybeObject* maybe_result;
4058   if (String::IsAscii(string.start(), string.length())) {
4059     maybe_result = AllocateRawAsciiString(string.length(), pretenure);
4060   } else {  // It's not an ASCII string.
4061     maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
4062   }
4063   Object* result;
4064   if (!maybe_result->ToObject(&result)) return maybe_result;
4065
4066   // Copy the characters into the new object, which may be either ASCII or
4067   // UTF-16.
4068   String* string_result = String::cast(result);
4069   for (int i = 0; i < string.length(); i++) {
4070     string_result->Set(i, string[i]);
4071   }
4072   return result;
4073 }
4074
4075
4076 Map* Heap::SymbolMapForString(String* string) {
4077   // If the string is in new space it cannot be used as a symbol.
4078   if (InNewSpace(string)) return NULL;
4079
4080   // Find the corresponding symbol map for strings.
4081   switch (string->map()->instance_type()) {
4082     case STRING_TYPE: return symbol_map();
4083     case ASCII_STRING_TYPE: return ascii_symbol_map();
4084     case CONS_STRING_TYPE: return cons_symbol_map();
4085     case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4086     case EXTERNAL_STRING_TYPE: return external_symbol_map();
4087     case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4088     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4089       return external_symbol_with_ascii_data_map();
4090     case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4091     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4092       return short_external_ascii_symbol_map();
4093     case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4094       return short_external_symbol_with_ascii_data_map();
4095     default: return NULL;  // No match found.
4096   }
4097 }
4098
4099
4100 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4101                                           int chars,
4102                                           uint32_t hash_field) {
4103   ASSERT(chars >= 0);
4104   // Ensure the chars matches the number of characters in the buffer.
4105   ASSERT(static_cast<unsigned>(chars) == buffer->Length());
4106   // Determine whether the string is ascii.
4107   bool is_ascii = true;
4108   while (buffer->has_more()) {
4109     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4110       is_ascii = false;
4111       break;
4112     }
4113   }
4114   buffer->Rewind();
4115
4116   // Compute map and object size.
4117   int size;
4118   Map* map;
4119
4120   if (is_ascii) {
4121     if (chars > SeqAsciiString::kMaxLength) {
4122       return Failure::OutOfMemoryException();
4123     }
4124     map = ascii_symbol_map();
4125     size = SeqAsciiString::SizeFor(chars);
4126   } else {
4127     if (chars > SeqTwoByteString::kMaxLength) {
4128       return Failure::OutOfMemoryException();
4129     }
4130     map = symbol_map();
4131     size = SeqTwoByteString::SizeFor(chars);
4132   }
4133
4134   // Allocate string.
4135   Object* result;
4136   { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
4137                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4138                    : old_data_space_->AllocateRaw(size);
4139     if (!maybe_result->ToObject(&result)) return maybe_result;
4140   }
4141
4142   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4143   // Set length and hash fields of the allocated string.
4144   String* answer = String::cast(result);
4145   answer->set_length(chars);
4146   answer->set_hash_field(hash_field);
4147
4148   ASSERT_EQ(size, answer->Size());
4149
4150   // Fill in the characters.
4151   for (int i = 0; i < chars; i++) {
4152     answer->Set(i, buffer->GetNext());
4153   }
4154   return answer;
4155 }
4156
4157
4158 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4159   if (length < 0 || length > SeqAsciiString::kMaxLength) {
4160     return Failure::OutOfMemoryException();
4161   }
4162
4163   int size = SeqAsciiString::SizeFor(length);
4164   ASSERT(size <= SeqAsciiString::kMaxSize);
4165
4166   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4167   AllocationSpace retry_space = OLD_DATA_SPACE;
4168
4169   if (space == NEW_SPACE) {
4170     if (size > kMaxObjectSizeInNewSpace) {
4171       // Allocate in large object space, retry space will be ignored.
4172       space = LO_SPACE;
4173     } else if (size > MaxObjectSizeInPagedSpace()) {
4174       // Allocate in new space, retry in large object space.
4175       retry_space = LO_SPACE;
4176     }
4177   } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4178     space = LO_SPACE;
4179   }
4180   Object* result;
4181   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4182     if (!maybe_result->ToObject(&result)) return maybe_result;
4183   }
4184
4185   // Partially initialize the object.
4186   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4187   String::cast(result)->set_length(length);
4188   String::cast(result)->set_hash_field(String::kEmptyHashField);
4189   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4190   return result;
4191 }
4192
4193
4194 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4195                                             PretenureFlag pretenure) {
4196   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4197     return Failure::OutOfMemoryException();
4198   }
4199   int size = SeqTwoByteString::SizeFor(length);
4200   ASSERT(size <= SeqTwoByteString::kMaxSize);
4201   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4202   AllocationSpace retry_space = OLD_DATA_SPACE;
4203
4204   if (space == NEW_SPACE) {
4205     if (size > kMaxObjectSizeInNewSpace) {
4206       // Allocate in large object space, retry space will be ignored.
4207       space = LO_SPACE;
4208     } else if (size > MaxObjectSizeInPagedSpace()) {
4209       // Allocate in new space, retry in large object space.
4210       retry_space = LO_SPACE;
4211     }
4212   } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4213     space = LO_SPACE;
4214   }
4215   Object* result;
4216   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4217     if (!maybe_result->ToObject(&result)) return maybe_result;
4218   }
4219
4220   // Partially initialize the object.
4221   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4222   String::cast(result)->set_length(length);
4223   String::cast(result)->set_hash_field(String::kEmptyHashField);
4224   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4225   return result;
4226 }
4227
4228
4229 MaybeObject* Heap::AllocateEmptyFixedArray() {
4230   int size = FixedArray::SizeFor(0);
4231   Object* result;
4232   { MaybeObject* maybe_result =
4233         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4234     if (!maybe_result->ToObject(&result)) return maybe_result;
4235   }
4236   // Initialize the object.
4237   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4238       fixed_array_map());
4239   reinterpret_cast<FixedArray*>(result)->set_length(0);
4240   return result;
4241 }
4242
4243
4244 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4245   if (length < 0 || length > FixedArray::kMaxLength) {
4246     return Failure::OutOfMemoryException();
4247   }
4248   ASSERT(length > 0);
4249   // Use the general function if we're forced to always allocate.
4250   if (always_allocate()) return AllocateFixedArray(length, TENURED);
4251   // Allocate the raw data for a fixed array.
4252   int size = FixedArray::SizeFor(length);
4253   return size <= kMaxObjectSizeInNewSpace
4254       ? new_space_.AllocateRaw(size)
4255       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4256 }
4257
4258
4259 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4260   int len = src->length();
4261   Object* obj;
4262   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4263     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4264   }
4265   if (InNewSpace(obj)) {
4266     HeapObject* dst = HeapObject::cast(obj);
4267     dst->set_map_no_write_barrier(map);
4268     CopyBlock(dst->address() + kPointerSize,
4269               src->address() + kPointerSize,
4270               FixedArray::SizeFor(len) - kPointerSize);
4271     return obj;
4272   }
4273   HeapObject::cast(obj)->set_map_no_write_barrier(map);
4274   FixedArray* result = FixedArray::cast(obj);
4275   result->set_length(len);
4276
4277   // Copy the content
4278   AssertNoAllocation no_gc;
4279   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4280   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4281   return result;
4282 }
4283
4284
4285 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4286                                                Map* map) {
4287   int len = src->length();
4288   Object* obj;
4289   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4290     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4291   }
4292   HeapObject* dst = HeapObject::cast(obj);
4293   dst->set_map_no_write_barrier(map);
4294   CopyBlock(
4295       dst->address() + FixedDoubleArray::kLengthOffset,
4296       src->address() + FixedDoubleArray::kLengthOffset,
4297       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4298   return obj;
4299 }
4300
4301
4302 MaybeObject* Heap::AllocateFixedArray(int length) {
4303   ASSERT(length >= 0);
4304   if (length == 0) return empty_fixed_array();
4305   Object* result;
4306   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4307     if (!maybe_result->ToObject(&result)) return maybe_result;
4308   }
4309   // Initialize header.
4310   FixedArray* array = reinterpret_cast<FixedArray*>(result);
4311   array->set_map_no_write_barrier(fixed_array_map());
4312   array->set_length(length);
4313   // Initialize body.
4314   ASSERT(!InNewSpace(undefined_value()));
4315   MemsetPointer(array->data_start(), undefined_value(), length);
4316   return result;
4317 }
4318
4319
4320 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4321   if (length < 0 || length > FixedArray::kMaxLength) {
4322     return Failure::OutOfMemoryException();
4323   }
4324
4325   AllocationSpace space =
4326       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4327   int size = FixedArray::SizeFor(length);
4328   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4329     // Too big for new space.
4330     space = LO_SPACE;
4331   } else if (space == OLD_POINTER_SPACE &&
4332              size > MaxObjectSizeInPagedSpace()) {
4333     // Too big for old pointer space.
4334     space = LO_SPACE;
4335   }
4336
4337   AllocationSpace retry_space =
4338       (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
4339
4340   return AllocateRaw(size, space, retry_space);
4341 }
4342
4343
4344 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4345     Heap* heap,
4346     int length,
4347     PretenureFlag pretenure,
4348     Object* filler) {
4349   ASSERT(length >= 0);
4350   ASSERT(heap->empty_fixed_array()->IsFixedArray());
4351   if (length == 0) return heap->empty_fixed_array();
4352
4353   ASSERT(!heap->InNewSpace(filler));
4354   Object* result;
4355   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4356     if (!maybe_result->ToObject(&result)) return maybe_result;
4357   }
4358
4359   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4360   FixedArray* array = FixedArray::cast(result);
4361   array->set_length(length);
4362   MemsetPointer(array->data_start(), filler, length);
4363   return array;
4364 }
4365
4366
4367 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4368   return AllocateFixedArrayWithFiller(this,
4369                                       length,
4370                                       pretenure,
4371                                       undefined_value());
4372 }
4373
4374
4375 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4376                                                PretenureFlag pretenure) {
4377   return AllocateFixedArrayWithFiller(this,
4378                                       length,
4379                                       pretenure,
4380                                       the_hole_value());
4381 }
4382
4383
4384 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4385   if (length == 0) return empty_fixed_array();
4386
4387   Object* obj;
4388   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4389     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4390   }
4391
4392   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4393       fixed_array_map());
4394   FixedArray::cast(obj)->set_length(length);
4395   return obj;
4396 }
4397
4398
4399 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4400   int size = FixedDoubleArray::SizeFor(0);
4401   Object* result;
4402   { MaybeObject* maybe_result =
4403         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4404     if (!maybe_result->ToObject(&result)) return maybe_result;
4405   }
4406   // Initialize the object.
4407   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4408       fixed_double_array_map());
4409   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4410   return result;
4411 }
4412
4413
4414 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4415     int length,
4416     PretenureFlag pretenure) {
4417   if (length == 0) return empty_fixed_double_array();
4418
4419   Object* obj;
4420   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4421     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4422   }
4423
4424   reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
4425       fixed_double_array_map());
4426   FixedDoubleArray::cast(obj)->set_length(length);
4427   return obj;
4428 }
4429
4430
4431 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4432                                                PretenureFlag pretenure) {
4433   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4434     return Failure::OutOfMemoryException();
4435   }
4436
4437   AllocationSpace space =
4438       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4439   int size = FixedDoubleArray::SizeFor(length);
4440   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4441     // Too big for new space.
4442     space = LO_SPACE;
4443   } else if (space == OLD_DATA_SPACE &&
4444              size > MaxObjectSizeInPagedSpace()) {
4445     // Too big for old data space.
4446     space = LO_SPACE;
4447   }
4448
4449   AllocationSpace retry_space =
4450       (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
4451
4452   return AllocateRaw(size, space, retry_space);
4453 }
4454
4455
4456 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4457   Object* result;
4458   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4459     if (!maybe_result->ToObject(&result)) return maybe_result;
4460   }
4461   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
4462       hash_table_map());
4463   ASSERT(result->IsHashTable());
4464   return result;
4465 }
4466
4467
4468 MaybeObject* Heap::AllocateGlobalContext() {
4469   Object* result;
4470   { MaybeObject* maybe_result =
4471         AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
4472     if (!maybe_result->ToObject(&result)) return maybe_result;
4473   }
4474   Context* context = reinterpret_cast<Context*>(result);
4475   context->set_map_no_write_barrier(global_context_map());
4476   ASSERT(context->IsGlobalContext());
4477   ASSERT(result->IsContext());
4478   return result;
4479 }
4480
4481
4482 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
4483   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
4484   Object* result;
4485   { MaybeObject* maybe_result = AllocateFixedArray(length);
4486     if (!maybe_result->ToObject(&result)) return maybe_result;
4487   }
4488   Context* context = reinterpret_cast<Context*>(result);
4489   context->set_map_no_write_barrier(function_context_map());
4490   context->set_closure(function);
4491   context->set_previous(function->context());
4492   context->set_extension(NULL);
4493   context->set_global(function->context()->global());
4494   return context;
4495 }
4496
4497
4498 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4499                                         Context* previous,
4500                                         String* name,
4501                                         Object* thrown_object) {
4502   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4503   Object* result;
4504   { MaybeObject* maybe_result =
4505         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4506     if (!maybe_result->ToObject(&result)) return maybe_result;
4507   }
4508   Context* context = reinterpret_cast<Context*>(result);
4509   context->set_map_no_write_barrier(catch_context_map());
4510   context->set_closure(function);
4511   context->set_previous(previous);
4512   context->set_extension(name);
4513   context->set_global(previous->global());
4514   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4515   return context;
4516 }
4517
4518
4519 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4520                                        Context* previous,
4521                                        JSObject* extension) {
4522   Object* result;
4523   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
4524     if (!maybe_result->ToObject(&result)) return maybe_result;
4525   }
4526   Context* context = reinterpret_cast<Context*>(result);
4527   context->set_map_no_write_barrier(with_context_map());
4528   context->set_closure(function);
4529   context->set_previous(previous);
4530   context->set_extension(extension);
4531   context->set_global(previous->global());
4532   return context;
4533 }
4534
4535
4536 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4537                                         Context* previous,
4538                                         ScopeInfo* scope_info) {
4539   Object* result;
4540   { MaybeObject* maybe_result =
4541         AllocateFixedArrayWithHoles(scope_info->ContextLength());
4542     if (!maybe_result->ToObject(&result)) return maybe_result;
4543   }
4544   Context* context = reinterpret_cast<Context*>(result);
4545   context->set_map_no_write_barrier(block_context_map());
4546   context->set_closure(function);
4547   context->set_previous(previous);
4548   context->set_extension(scope_info);
4549   context->set_global(previous->global());
4550   return context;
4551 }
4552
4553
4554 MaybeObject* Heap::AllocateScopeInfo(int length) {
4555   FixedArray* scope_info;
4556   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
4557   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
4558   scope_info->set_map_no_write_barrier(scope_info_map());
4559   return scope_info;
4560 }
4561
4562
4563 MaybeObject* Heap::AllocateStruct(InstanceType type) {
4564   Map* map;
4565   switch (type) {
4566 #define MAKE_CASE(NAME, Name, name) \
4567     case NAME##_TYPE: map = name##_map(); break;
4568 STRUCT_LIST(MAKE_CASE)
4569 #undef MAKE_CASE
4570     default:
4571       UNREACHABLE();
4572       return Failure::InternalError();
4573   }
4574   int size = map->instance_size();
4575   AllocationSpace space =
4576       (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
4577   Object* result;
4578   { MaybeObject* maybe_result = Allocate(map, space);
4579     if (!maybe_result->ToObject(&result)) return maybe_result;
4580   }
4581   Struct::cast(result)->InitializeBody(size);
4582   return result;
4583 }
4584
4585
4586 bool Heap::IsHeapIterable() {
4587   return (!old_pointer_space()->was_swept_conservatively() &&
4588           !old_data_space()->was_swept_conservatively());
4589 }
4590
4591
4592 void Heap::EnsureHeapIsIterable() {
4593   ASSERT(IsAllocationAllowed());
4594   if (!IsHeapIterable()) {
4595     CollectAllGarbage(kMakeHeapIterableMask);
4596   }
4597   ASSERT(IsHeapIterable());
4598 }
4599
4600
4601 bool Heap::IdleNotification(int hint) {
4602   if (hint >= 1000) return IdleGlobalGC();
4603   if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
4604       FLAG_expose_gc || Serializer::enabled()) {
4605     return true;
4606   }
4607
4608   // By doing small chunks of GC work in each IdleNotification,
4609   // perform a round of incremental GCs and after that wait until
4610   // the mutator creates enough garbage to justify a new round.
4611   // An incremental GC progresses as follows:
4612   // 1. many incremental marking steps,
4613   // 2. one old space mark-sweep-compact,
4614   // 3. many lazy sweep steps.
4615   // Use mark-sweep-compact events to count incremental GCs in a round.
4616
4617   intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
4618   // The size factor is in range [3..100].
4619   intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
4620
4621   if (incremental_marking()->IsStopped()) {
4622     if (!IsSweepingComplete() &&
4623         !AdvanceSweepers(static_cast<int>(step_size))) {
4624       return false;
4625     }
4626   }
4627
4628   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4629     if (EnoughGarbageSinceLastIdleRound()) {
4630       StartIdleRound();
4631     } else {
4632       return true;
4633     }
4634   }
4635
4636   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
4637   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
4638   ms_count_at_last_idle_notification_ = ms_count_;
4639
4640   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4641     FinishIdleRound();
4642     return true;
4643   }
4644
4645   if (incremental_marking()->IsStopped()) {
4646     if (hint < 1000 && !WorthStartingGCWhenIdle()) {
4647       FinishIdleRound();
4648       return true;
4649     }
4650     incremental_marking()->Start();
4651   }
4652
4653   // This flag prevents incremental marking from requesting GC via stack guard
4654   idle_notification_will_schedule_next_gc_ = true;
4655   incremental_marking()->Step(step_size);
4656   idle_notification_will_schedule_next_gc_ = false;
4657
4658   if (incremental_marking()->IsComplete()) {
4659     bool uncommit = false;
4660     if (gc_count_at_last_idle_gc_ == gc_count_) {
4661       // No GC since the last full GC, the mutator is probably not active.
4662       isolate_->compilation_cache()->Clear();
4663       uncommit = true;
4664     }
4665     CollectAllGarbage(kNoGCFlags);
4666     gc_count_at_last_idle_gc_ = gc_count_;
4667     if (uncommit) {
4668       new_space_.Shrink();
4669       UncommitFromSpace();
4670     }
4671   }
4672   return false;
4673 }
4674
4675
4676 bool Heap::IdleGlobalGC() {
4677   static const int kIdlesBeforeScavenge = 4;
4678   static const int kIdlesBeforeMarkSweep = 7;
4679   static const int kIdlesBeforeMarkCompact = 8;
4680   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4681   static const unsigned int kGCsBetweenCleanup = 4;
4682
4683   if (!last_idle_notification_gc_count_init_) {
4684     last_idle_notification_gc_count_ = gc_count_;
4685     last_idle_notification_gc_count_init_ = true;
4686   }
4687
4688   bool uncommit = true;
4689   bool finished = false;
4690
4691   // Reset the number of idle notifications received when a number of
4692   // GCs have taken place. This allows another round of cleanup based
4693   // on idle notifications if enough work has been carried out to
4694   // provoke a number of garbage collections.
4695   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4696     number_idle_notifications_ =
4697         Min(number_idle_notifications_ + 1, kMaxIdleCount);
4698   } else {
4699     number_idle_notifications_ = 0;
4700     last_idle_notification_gc_count_ = gc_count_;
4701   }
4702
4703   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4704     if (contexts_disposed_ > 0) {
4705       HistogramTimerScope scope(isolate_->counters()->gc_context());
4706       CollectAllGarbage(kNoGCFlags);
4707     } else {
4708       CollectGarbage(NEW_SPACE);
4709     }
4710     new_space_.Shrink();
4711     last_idle_notification_gc_count_ = gc_count_;
4712   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4713     // Before doing the mark-sweep collections we clear the
4714     // compilation cache to avoid hanging on to source code and
4715     // generated code for cached functions.
4716     isolate_->compilation_cache()->Clear();
4717
4718     CollectAllGarbage(kNoGCFlags);
4719     new_space_.Shrink();
4720     last_idle_notification_gc_count_ = gc_count_;
4721
4722   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4723     CollectAllGarbage(kNoGCFlags);
4724     new_space_.Shrink();
4725     last_idle_notification_gc_count_ = gc_count_;
4726     number_idle_notifications_ = 0;
4727     finished = true;
4728   } else if (contexts_disposed_ > 0) {
4729     if (FLAG_expose_gc) {
4730       contexts_disposed_ = 0;
4731     } else {
4732       HistogramTimerScope scope(isolate_->counters()->gc_context());
4733       CollectAllGarbage(kNoGCFlags);
4734       last_idle_notification_gc_count_ = gc_count_;
4735     }
4736     // If this is the first idle notification, we reset the
4737     // notification count to avoid letting idle notifications for
4738     // context disposal garbage collections start a potentially too
4739     // aggressive idle GC cycle.
4740     if (number_idle_notifications_ <= 1) {
4741       number_idle_notifications_ = 0;
4742       uncommit = false;
4743     }
4744   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4745     // If we have received more than kIdlesBeforeMarkCompact idle
4746     // notifications we do not perform any cleanup because we don't
4747     // expect to gain much by doing so.
4748     finished = true;
4749   }
4750
4751   // Make sure that we have no pending context disposals and
4752   // conditionally uncommit from space.
4753   // Take into account that we might have decided to delay full collection
4754   // because incremental marking is in progress.
4755   ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
4756   if (uncommit) UncommitFromSpace();
4757
4758   return finished;
4759 }
4760
4761
4762 #ifdef DEBUG
4763
4764 void Heap::Print() {
4765   if (!HasBeenSetUp()) return;
4766   isolate()->PrintStack();
4767   AllSpaces spaces;
4768   for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4769     space->Print();
4770 }
4771
4772
4773 void Heap::ReportCodeStatistics(const char* title) {
4774   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4775   PagedSpace::ResetCodeStatistics();
4776   // We do not look for code in new space, map space, or old space.  If code
4777   // somehow ends up in those spaces, we would miss it here.
4778   code_space_->CollectCodeStatistics();
4779   lo_space_->CollectCodeStatistics();
4780   PagedSpace::ReportCodeStatistics();
4781 }
4782
4783
4784 // This function expects that NewSpace's allocated objects histogram is
4785 // populated (via a call to CollectStatistics or else as a side effect of a
4786 // just-completed scavenge collection).
4787 void Heap::ReportHeapStatistics(const char* title) {
4788   USE(title);
4789   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4790          title, gc_count_);
4791   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4792          old_gen_promotion_limit_);
4793   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4794          old_gen_allocation_limit_);
4795   PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
4796
4797   PrintF("\n");
4798   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
4799   isolate_->global_handles()->PrintStats();
4800   PrintF("\n");
4801
4802   PrintF("Heap statistics : ");
4803   isolate_->memory_allocator()->ReportStatistics();
4804   PrintF("To space : ");
4805   new_space_.ReportStatistics();
4806   PrintF("Old pointer space : ");
4807   old_pointer_space_->ReportStatistics();
4808   PrintF("Old data space : ");
4809   old_data_space_->ReportStatistics();
4810   PrintF("Code space : ");
4811   code_space_->ReportStatistics();
4812   PrintF("Map space : ");
4813   map_space_->ReportStatistics();
4814   PrintF("Cell space : ");
4815   cell_space_->ReportStatistics();
4816   PrintF("Large object space : ");
4817   lo_space_->ReportStatistics();
4818   PrintF(">>>>>> ========================================= >>>>>>\n");
4819 }
4820
4821 #endif  // DEBUG
4822
4823 bool Heap::Contains(HeapObject* value) {
4824   return Contains(value->address());
4825 }
4826
4827
4828 bool Heap::Contains(Address addr) {
4829   if (OS::IsOutsideAllocatedSpace(addr)) return false;
4830   return HasBeenSetUp() &&
4831     (new_space_.ToSpaceContains(addr) ||
4832      old_pointer_space_->Contains(addr) ||
4833      old_data_space_->Contains(addr) ||
4834      code_space_->Contains(addr) ||
4835      map_space_->Contains(addr) ||
4836      cell_space_->Contains(addr) ||
4837      lo_space_->SlowContains(addr));
4838 }
4839
4840
4841 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4842   return InSpace(value->address(), space);
4843 }
4844
4845
4846 bool Heap::InSpace(Address addr, AllocationSpace space) {
4847   if (OS::IsOutsideAllocatedSpace(addr)) return false;
4848   if (!HasBeenSetUp()) return false;
4849
4850   switch (space) {
4851     case NEW_SPACE:
4852       return new_space_.ToSpaceContains(addr);
4853     case OLD_POINTER_SPACE:
4854       return old_pointer_space_->Contains(addr);
4855     case OLD_DATA_SPACE:
4856       return old_data_space_->Contains(addr);
4857     case CODE_SPACE:
4858       return code_space_->Contains(addr);
4859     case MAP_SPACE:
4860       return map_space_->Contains(addr);
4861     case CELL_SPACE:
4862       return cell_space_->Contains(addr);
4863     case LO_SPACE:
4864       return lo_space_->SlowContains(addr);
4865   }
4866
4867   return false;
4868 }
4869
4870
4871 #ifdef DEBUG
4872 void Heap::Verify() {
4873   ASSERT(HasBeenSetUp());
4874
4875   store_buffer()->Verify();
4876
4877   VerifyPointersVisitor visitor;
4878   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4879
4880   new_space_.Verify();
4881
4882   old_pointer_space_->Verify(&visitor);
4883   map_space_->Verify(&visitor);
4884
4885   VerifyPointersVisitor no_dirty_regions_visitor;
4886   old_data_space_->Verify(&no_dirty_regions_visitor);
4887   code_space_->Verify(&no_dirty_regions_visitor);
4888   cell_space_->Verify(&no_dirty_regions_visitor);
4889
4890   lo_space_->Verify();
4891 }
4892
4893 #endif  // DEBUG
4894
4895
4896 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4897   Object* symbol = NULL;
4898   Object* new_table;
4899   { MaybeObject* maybe_new_table =
4900         symbol_table()->LookupSymbol(string, &symbol);
4901     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4902   }
4903   // Can't use set_symbol_table because SymbolTable::cast knows that
4904   // SymbolTable is a singleton and checks for identity.
4905   roots_[kSymbolTableRootIndex] = new_table;
4906   ASSERT(symbol != NULL);
4907   return symbol;
4908 }
4909
4910
4911 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4912   Object* symbol = NULL;
4913   Object* new_table;
4914   { MaybeObject* maybe_new_table =
4915         symbol_table()->LookupAsciiSymbol(string, &symbol);
4916     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4917   }
4918   // Can't use set_symbol_table because SymbolTable::cast knows that
4919   // SymbolTable is a singleton and checks for identity.
4920   roots_[kSymbolTableRootIndex] = new_table;
4921   ASSERT(symbol != NULL);
4922   return symbol;
4923 }
4924
4925
4926 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4927                                      int from,
4928                                      int length) {
4929   Object* symbol = NULL;
4930   Object* new_table;
4931   { MaybeObject* maybe_new_table =
4932         symbol_table()->LookupSubStringAsciiSymbol(string,
4933                                                    from,
4934                                                    length,
4935                                                    &symbol);
4936     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4937   }
4938   // Can't use set_symbol_table because SymbolTable::cast knows that
4939   // SymbolTable is a singleton and checks for identity.
4940   roots_[kSymbolTableRootIndex] = new_table;
4941   ASSERT(symbol != NULL);
4942   return symbol;
4943 }
4944
4945
4946 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4947   Object* symbol = NULL;
4948   Object* new_table;
4949   { MaybeObject* maybe_new_table =
4950         symbol_table()->LookupTwoByteSymbol(string, &symbol);
4951     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4952   }
4953   // Can't use set_symbol_table because SymbolTable::cast knows that
4954   // SymbolTable is a singleton and checks for identity.
4955   roots_[kSymbolTableRootIndex] = new_table;
4956   ASSERT(symbol != NULL);
4957   return symbol;
4958 }
4959
4960
4961 MaybeObject* Heap::LookupSymbol(String* string) {
4962   if (string->IsSymbol()) return string;
4963   Object* symbol = NULL;
4964   Object* new_table;
4965   { MaybeObject* maybe_new_table =
4966         symbol_table()->LookupString(string, &symbol);
4967     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4968   }
4969   // Can't use set_symbol_table because SymbolTable::cast knows that
4970   // SymbolTable is a singleton and checks for identity.
4971   roots_[kSymbolTableRootIndex] = new_table;
4972   ASSERT(symbol != NULL);
4973   return symbol;
4974 }
4975
4976
4977 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4978   if (string->IsSymbol()) {
4979     *symbol = string;
4980     return true;
4981   }
4982   return symbol_table()->LookupSymbolIfExists(string, symbol);
4983 }
4984
4985
4986 #ifdef DEBUG
4987 void Heap::ZapFromSpace() {
4988   NewSpacePageIterator it(new_space_.FromSpaceStart(),
4989                           new_space_.FromSpaceEnd());
4990   while (it.has_next()) {
4991     NewSpacePage* page = it.next();
4992     for (Address cursor = page->body(), limit = page->body_limit();
4993          cursor < limit;
4994          cursor += kPointerSize) {
4995       Memory::Address_at(cursor) = kFromSpaceZapValue;
4996     }
4997   }
4998 }
4999 #endif  // DEBUG
5000
5001
5002 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5003                                              Address end,
5004                                              ObjectSlotCallback callback) {
5005   Address slot_address = start;
5006
5007   // We are not collecting slots on new space objects during mutation
5008   // thus we have to scan for pointers to evacuation candidates when we
5009   // promote objects. But we should not record any slots in non-black
5010   // objects. Grey object's slots would be rescanned.
5011   // White object might not survive until the end of collection
5012   // it would be a violation of the invariant to record it's slots.
5013   bool record_slots = false;
5014   if (incremental_marking()->IsCompacting()) {
5015     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5016     record_slots = Marking::IsBlack(mark_bit);
5017   }
5018
5019   while (slot_address < end) {
5020     Object** slot = reinterpret_cast<Object**>(slot_address);
5021     Object* object = *slot;
5022     // If the store buffer becomes overfull we mark pages as being exempt from
5023     // the store buffer.  These pages are scanned to find pointers that point
5024     // to the new space.  In that case we may hit newly promoted objects and
5025     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
5026     if (object->IsHeapObject()) {
5027       if (Heap::InFromSpace(object)) {
5028         callback(reinterpret_cast<HeapObject**>(slot),
5029                  HeapObject::cast(object));
5030         Object* new_object = *slot;
5031         if (InNewSpace(new_object)) {
5032           SLOW_ASSERT(Heap::InToSpace(new_object));
5033           SLOW_ASSERT(new_object->IsHeapObject());
5034           store_buffer_.EnterDirectlyIntoStoreBuffer(
5035               reinterpret_cast<Address>(slot));
5036         }
5037         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5038       } else if (record_slots &&
5039                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5040         mark_compact_collector()->RecordSlot(slot, slot, object);
5041       }
5042     }
5043     slot_address += kPointerSize;
5044   }
5045 }
5046
5047
5048 #ifdef DEBUG
5049 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5050
5051
5052 bool IsAMapPointerAddress(Object** addr) {
5053   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5054   int mod = a % Map::kSize;
5055   return mod >= Map::kPointerFieldsBeginOffset &&
5056          mod < Map::kPointerFieldsEndOffset;
5057 }
5058
5059
5060 bool EverythingsAPointer(Object** addr) {
5061   return true;
5062 }
5063
5064
5065 static void CheckStoreBuffer(Heap* heap,
5066                              Object** current,
5067                              Object** limit,
5068                              Object**** store_buffer_position,
5069                              Object*** store_buffer_top,
5070                              CheckStoreBufferFilter filter,
5071                              Address special_garbage_start,
5072                              Address special_garbage_end) {
5073   Map* free_space_map = heap->free_space_map();
5074   for ( ; current < limit; current++) {
5075     Object* o = *current;
5076     Address current_address = reinterpret_cast<Address>(current);
5077     // Skip free space.
5078     if (o == free_space_map) {
5079       Address current_address = reinterpret_cast<Address>(current);
5080       FreeSpace* free_space =
5081           FreeSpace::cast(HeapObject::FromAddress(current_address));
5082       int skip = free_space->Size();
5083       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5084       ASSERT(skip > 0);
5085       current_address += skip - kPointerSize;
5086       current = reinterpret_cast<Object**>(current_address);
5087       continue;
5088     }
5089     // Skip the current linear allocation space between top and limit which is
5090     // unmarked with the free space map, but can contain junk.
5091     if (current_address == special_garbage_start &&
5092         special_garbage_end != special_garbage_start) {
5093       current_address = special_garbage_end - kPointerSize;
5094       current = reinterpret_cast<Object**>(current_address);
5095       continue;
5096     }
5097     if (!(*filter)(current)) continue;
5098     ASSERT(current_address < special_garbage_start ||
5099            current_address >= special_garbage_end);
5100     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5101     // We have to check that the pointer does not point into new space
5102     // without trying to cast it to a heap object since the hash field of
5103     // a string can contain values like 1 and 3 which are tagged null
5104     // pointers.
5105     if (!heap->InNewSpace(o)) continue;
5106     while (**store_buffer_position < current &&
5107            *store_buffer_position < store_buffer_top) {
5108       (*store_buffer_position)++;
5109     }
5110     if (**store_buffer_position != current ||
5111         *store_buffer_position == store_buffer_top) {
5112       Object** obj_start = current;
5113       while (!(*obj_start)->IsMap()) obj_start--;
5114       UNREACHABLE();
5115     }
5116   }
5117 }
5118
5119
5120 // Check that the store buffer contains all intergenerational pointers by
5121 // scanning a page and ensuring that all pointers to young space are in the
5122 // store buffer.
5123 void Heap::OldPointerSpaceCheckStoreBuffer() {
5124   OldSpace* space = old_pointer_space();
5125   PageIterator pages(space);
5126
5127   store_buffer()->SortUniq();
5128
5129   while (pages.has_next()) {
5130     Page* page = pages.next();
5131     Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5132
5133     Address end = page->ObjectAreaEnd();
5134
5135     Object*** store_buffer_position = store_buffer()->Start();
5136     Object*** store_buffer_top = store_buffer()->Top();
5137
5138     Object** limit = reinterpret_cast<Object**>(end);
5139     CheckStoreBuffer(this,
5140                      current,
5141                      limit,
5142                      &store_buffer_position,
5143                      store_buffer_top,
5144                      &EverythingsAPointer,
5145                      space->top(),
5146                      space->limit());
5147   }
5148 }
5149
5150
5151 void Heap::MapSpaceCheckStoreBuffer() {
5152   MapSpace* space = map_space();
5153   PageIterator pages(space);
5154
5155   store_buffer()->SortUniq();
5156
5157   while (pages.has_next()) {
5158     Page* page = pages.next();
5159     Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5160
5161     Address end = page->ObjectAreaEnd();
5162
5163     Object*** store_buffer_position = store_buffer()->Start();
5164     Object*** store_buffer_top = store_buffer()->Top();
5165
5166     Object** limit = reinterpret_cast<Object**>(end);
5167     CheckStoreBuffer(this,
5168                      current,
5169                      limit,
5170                      &store_buffer_position,
5171                      store_buffer_top,
5172                      &IsAMapPointerAddress,
5173                      space->top(),
5174                      space->limit());
5175   }
5176 }
5177
5178
5179 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5180   LargeObjectIterator it(lo_space());
5181   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5182     // We only have code, sequential strings, or fixed arrays in large
5183     // object space, and only fixed arrays can possibly contain pointers to
5184     // the young generation.
5185     if (object->IsFixedArray()) {
5186       Object*** store_buffer_position = store_buffer()->Start();
5187       Object*** store_buffer_top = store_buffer()->Top();
5188       Object** current = reinterpret_cast<Object**>(object->address());
5189       Object** limit =
5190           reinterpret_cast<Object**>(object->address() + object->Size());
5191       CheckStoreBuffer(this,
5192                        current,
5193                        limit,
5194                        &store_buffer_position,
5195                        store_buffer_top,
5196                        &EverythingsAPointer,
5197                        NULL,
5198                        NULL);
5199     }
5200   }
5201 }
5202 #endif
5203
5204
5205 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5206   IterateStrongRoots(v, mode);
5207   IterateWeakRoots(v, mode);
5208 }
5209
5210
5211 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5212   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5213   v->Synchronize(VisitorSynchronization::kSymbolTable);
5214   if (mode != VISIT_ALL_IN_SCAVENGE &&
5215       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5216     // Scavenge collections have special processing for this.
5217     external_string_table_.Iterate(v);
5218   }
5219   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5220 }
5221
5222
5223 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5224   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5225   v->Synchronize(VisitorSynchronization::kStrongRootList);
5226
5227   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5228   v->Synchronize(VisitorSynchronization::kSymbol);
5229
5230   isolate_->bootstrapper()->Iterate(v);
5231   v->Synchronize(VisitorSynchronization::kBootstrapper);
5232   isolate_->Iterate(v);
5233   v->Synchronize(VisitorSynchronization::kTop);
5234   Relocatable::Iterate(v);
5235   v->Synchronize(VisitorSynchronization::kRelocatable);
5236
5237 #ifdef ENABLE_DEBUGGER_SUPPORT
5238   isolate_->debug()->Iterate(v);
5239   if (isolate_->deoptimizer_data() != NULL) {
5240     isolate_->deoptimizer_data()->Iterate(v);
5241   }
5242 #endif
5243   v->Synchronize(VisitorSynchronization::kDebug);
5244   isolate_->compilation_cache()->Iterate(v);
5245   v->Synchronize(VisitorSynchronization::kCompilationCache);
5246
5247   // Iterate over local handles in handle scopes.
5248   isolate_->handle_scope_implementer()->Iterate(v);
5249   v->Synchronize(VisitorSynchronization::kHandleScope);
5250
5251   // Iterate over the builtin code objects and code stubs in the
5252   // heap. Note that it is not necessary to iterate over code objects
5253   // on scavenge collections.
5254   if (mode != VISIT_ALL_IN_SCAVENGE) {
5255     isolate_->builtins()->IterateBuiltins(v);
5256   }
5257   v->Synchronize(VisitorSynchronization::kBuiltins);
5258
5259   // Iterate over global handles.
5260   switch (mode) {
5261     case VISIT_ONLY_STRONG:
5262       isolate_->global_handles()->IterateStrongRoots(v);
5263       break;
5264     case VISIT_ALL_IN_SCAVENGE:
5265       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5266       break;
5267     case VISIT_ALL_IN_SWEEP_NEWSPACE:
5268     case VISIT_ALL:
5269       isolate_->global_handles()->IterateAllRoots(v);
5270       break;
5271   }
5272   v->Synchronize(VisitorSynchronization::kGlobalHandles);
5273
5274   // Iterate over pointers being held by inactive threads.
5275   isolate_->thread_manager()->Iterate(v);
5276   v->Synchronize(VisitorSynchronization::kThreadManager);
5277
5278   // Iterate over the pointers the Serialization/Deserialization code is
5279   // holding.
5280   // During garbage collection this keeps the partial snapshot cache alive.
5281   // During deserialization of the startup snapshot this creates the partial
5282   // snapshot cache and deserializes the objects it refers to.  During
5283   // serialization this does nothing, since the partial snapshot cache is
5284   // empty.  However the next thing we do is create the partial snapshot,
5285   // filling up the partial snapshot cache with objects it needs as we go.
5286   SerializerDeserializer::Iterate(v);
5287   // We don't do a v->Synchronize call here, because in debug mode that will
5288   // output a flag to the snapshot.  However at this point the serializer and
5289   // deserializer are deliberately a little unsynchronized (see above) so the
5290   // checking of the sync flag in the snapshot would fail.
5291 }
5292
5293
5294 // TODO(1236194): Since the heap size is configurable on the command line
5295 // and through the API, we should gracefully handle the case that the heap
5296 // size is not big enough to fit all the initial objects.
5297 bool Heap::ConfigureHeap(int max_semispace_size,
5298                          intptr_t max_old_gen_size,
5299                          intptr_t max_executable_size) {
5300   if (HasBeenSetUp()) return false;
5301
5302   if (max_semispace_size > 0) {
5303     if (max_semispace_size < Page::kPageSize) {
5304       max_semispace_size = Page::kPageSize;
5305       if (FLAG_trace_gc) {
5306         PrintF("Max semispace size cannot be less than %dkbytes\n",
5307                Page::kPageSize >> 10);
5308       }
5309     }
5310     max_semispace_size_ = max_semispace_size;
5311   }
5312
5313   if (Snapshot::IsEnabled()) {
5314     // If we are using a snapshot we always reserve the default amount
5315     // of memory for each semispace because code in the snapshot has
5316     // write-barrier code that relies on the size and alignment of new
5317     // space.  We therefore cannot use a larger max semispace size
5318     // than the default reserved semispace size.
5319     if (max_semispace_size_ > reserved_semispace_size_) {
5320       max_semispace_size_ = reserved_semispace_size_;
5321       if (FLAG_trace_gc) {
5322         PrintF("Max semispace size cannot be more than %dkbytes\n",
5323                reserved_semispace_size_ >> 10);
5324       }
5325     }
5326   } else {
5327     // If we are not using snapshots we reserve space for the actual
5328     // max semispace size.
5329     reserved_semispace_size_ = max_semispace_size_;
5330   }
5331
5332   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5333   if (max_executable_size > 0) {
5334     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5335   }
5336
5337   // The max executable size must be less than or equal to the max old
5338   // generation size.
5339   if (max_executable_size_ > max_old_generation_size_) {
5340     max_executable_size_ = max_old_generation_size_;
5341   }
5342
5343   // The new space size must be a power of two to support single-bit testing
5344   // for containment.
5345   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5346   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5347   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5348   external_allocation_limit_ = 10 * max_semispace_size_;
5349
5350   // The old generation is paged and needs at least one page for each space.
5351   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5352   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5353                                                        Page::kPageSize),
5354                                  RoundUp(max_old_generation_size_,
5355                                          Page::kPageSize));
5356
5357   configured_ = true;
5358   return true;
5359 }
5360
5361
5362 bool Heap::ConfigureHeapDefault() {
5363   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5364                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5365                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5366 }
5367
5368
5369 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5370   *stats->start_marker = HeapStats::kStartMarker;
5371   *stats->end_marker = HeapStats::kEndMarker;
5372   *stats->new_space_size = new_space_.SizeAsInt();
5373   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5374   *stats->old_pointer_space_size = old_pointer_space_->Size();
5375   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5376   *stats->old_data_space_size = old_data_space_->Size();
5377   *stats->old_data_space_capacity = old_data_space_->Capacity();
5378   *stats->code_space_size = code_space_->Size();
5379   *stats->code_space_capacity = code_space_->Capacity();
5380   *stats->map_space_size = map_space_->Size();
5381   *stats->map_space_capacity = map_space_->Capacity();
5382   *stats->cell_space_size = cell_space_->Size();
5383   *stats->cell_space_capacity = cell_space_->Capacity();
5384   *stats->lo_space_size = lo_space_->Size();
5385   isolate_->global_handles()->RecordStats(stats);
5386   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5387   *stats->memory_allocator_capacity =
5388       isolate()->memory_allocator()->Size() +
5389       isolate()->memory_allocator()->Available();
5390   *stats->os_error = OS::GetLastError();
5391       isolate()->memory_allocator()->Available();
5392   if (take_snapshot) {
5393     HeapIterator iterator;
5394     for (HeapObject* obj = iterator.next();
5395          obj != NULL;
5396          obj = iterator.next()) {
5397       InstanceType type = obj->map()->instance_type();
5398       ASSERT(0 <= type && type <= LAST_TYPE);
5399       stats->objects_per_type[type]++;
5400       stats->size_per_type[type] += obj->Size();
5401     }
5402   }
5403 }
5404
5405
5406 intptr_t Heap::PromotedSpaceSize() {
5407   return old_pointer_space_->Size()
5408       + old_data_space_->Size()
5409       + code_space_->Size()
5410       + map_space_->Size()
5411       + cell_space_->Size()
5412       + lo_space_->Size();
5413 }
5414
5415
5416 int Heap::PromotedExternalMemorySize() {
5417   if (amount_of_external_allocated_memory_
5418       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5419   return amount_of_external_allocated_memory_
5420       - amount_of_external_allocated_memory_at_last_global_gc_;
5421 }
5422
5423 #ifdef DEBUG
5424
5425 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5426 static const int kMarkTag = 2;
5427
5428
5429 class HeapDebugUtils {
5430  public:
5431   explicit HeapDebugUtils(Heap* heap)
5432     : search_for_any_global_(false),
5433       search_target_(NULL),
5434       found_target_(false),
5435       object_stack_(20),
5436       heap_(heap) {
5437   }
5438
5439   class MarkObjectVisitor : public ObjectVisitor {
5440    public:
5441     explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5442
5443     void VisitPointers(Object** start, Object** end) {
5444       // Copy all HeapObject pointers in [start, end)
5445       for (Object** p = start; p < end; p++) {
5446         if ((*p)->IsHeapObject())
5447           utils_->MarkObjectRecursively(p);
5448       }
5449     }
5450
5451     HeapDebugUtils* utils_;
5452   };
5453
5454   void MarkObjectRecursively(Object** p) {
5455     if (!(*p)->IsHeapObject()) return;
5456
5457     HeapObject* obj = HeapObject::cast(*p);
5458
5459     Object* map = obj->map();
5460
5461     if (!map->IsHeapObject()) return;  // visited before
5462
5463     if (found_target_) return;  // stop if target found
5464     object_stack_.Add(obj);
5465     if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5466         (!search_for_any_global_ && (obj == search_target_))) {
5467       found_target_ = true;
5468       return;
5469     }
5470
5471     // not visited yet
5472     Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5473
5474     Address map_addr = map_p->address();
5475
5476     obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
5477
5478     MarkObjectRecursively(&map);
5479
5480     MarkObjectVisitor mark_visitor(this);
5481
5482     obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5483                      &mark_visitor);
5484
5485     if (!found_target_)  // don't pop if found the target
5486       object_stack_.RemoveLast();
5487   }
5488
5489
5490   class UnmarkObjectVisitor : public ObjectVisitor {
5491    public:
5492     explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5493
5494     void VisitPointers(Object** start, Object** end) {
5495       // Copy all HeapObject pointers in [start, end)
5496       for (Object** p = start; p < end; p++) {
5497         if ((*p)->IsHeapObject())
5498           utils_->UnmarkObjectRecursively(p);
5499       }
5500     }
5501
5502     HeapDebugUtils* utils_;
5503   };
5504
5505
5506   void UnmarkObjectRecursively(Object** p) {
5507     if (!(*p)->IsHeapObject()) return;
5508
5509     HeapObject* obj = HeapObject::cast(*p);
5510
5511     Object* map = obj->map();
5512
5513     if (map->IsHeapObject()) return;  // unmarked already
5514
5515     Address map_addr = reinterpret_cast<Address>(map);
5516
5517     map_addr -= kMarkTag;
5518
5519     ASSERT_TAG_ALIGNED(map_addr);
5520
5521     HeapObject* map_p = HeapObject::FromAddress(map_addr);
5522
5523     obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
5524
5525     UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5526
5527     UnmarkObjectVisitor unmark_visitor(this);
5528
5529     obj->IterateBody(Map::cast(map_p)->instance_type(),
5530                      obj->SizeFromMap(Map::cast(map_p)),
5531                      &unmark_visitor);
5532   }
5533
5534
5535   void MarkRootObjectRecursively(Object** root) {
5536     if (search_for_any_global_) {
5537       ASSERT(search_target_ == NULL);
5538     } else {
5539       ASSERT(search_target_->IsHeapObject());
5540     }
5541     found_target_ = false;
5542     object_stack_.Clear();
5543
5544     MarkObjectRecursively(root);
5545     UnmarkObjectRecursively(root);
5546
5547     if (found_target_) {
5548       PrintF("=====================================\n");
5549       PrintF("====        Path to object       ====\n");
5550       PrintF("=====================================\n\n");
5551
5552       ASSERT(!object_stack_.is_empty());
5553       for (int i = 0; i < object_stack_.length(); i++) {
5554         if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
5555         Object* obj = object_stack_[i];
5556         obj->Print();
5557       }
5558       PrintF("=====================================\n");
5559     }
5560   }
5561
5562   // Helper class for visiting HeapObjects recursively.
5563   class MarkRootVisitor: public ObjectVisitor {
5564    public:
5565     explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5566
5567     void VisitPointers(Object** start, Object** end) {
5568       // Visit all HeapObject pointers in [start, end)
5569       for (Object** p = start; p < end; p++) {
5570         if ((*p)->IsHeapObject())
5571           utils_->MarkRootObjectRecursively(p);
5572       }
5573     }
5574
5575     HeapDebugUtils* utils_;
5576   };
5577
5578   bool search_for_any_global_;
5579   Object* search_target_;
5580   bool found_target_;
5581   List<Object*> object_stack_;
5582   Heap* heap_;
5583
5584   friend class Heap;
5585 };
5586
5587 #endif
5588
5589 bool Heap::SetUp(bool create_heap_objects) {
5590 #ifdef DEBUG
5591   allocation_timeout_ = FLAG_gc_interval;
5592   debug_utils_ = new HeapDebugUtils(this);
5593 #endif
5594
5595   // Initialize heap spaces and initial maps and objects. Whenever something
5596   // goes wrong, just return false. The caller should check the results and
5597   // call Heap::TearDown() to release allocated memory.
5598   //
5599   // If the heap is not yet configured (eg, through the API), configure it.
5600   // Configuration is based on the flags new-space-size (really the semispace
5601   // size) and old-space-size if set or the initial values of semispace_size_
5602   // and old_generation_size_ otherwise.
5603   if (!configured_) {
5604     if (!ConfigureHeapDefault()) return false;
5605   }
5606
5607   gc_initializer_mutex->Lock();
5608   static bool initialized_gc = false;
5609   if (!initialized_gc) {
5610       initialized_gc = true;
5611       InitializeScavengingVisitorsTables();
5612       NewSpaceScavenger::Initialize();
5613       MarkCompactCollector::Initialize();
5614   }
5615   gc_initializer_mutex->Unlock();
5616
5617   MarkMapPointersAsEncoded(false);
5618
5619   // Set up memory allocator.
5620   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5621       return false;
5622
5623   // Set up new space.
5624   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
5625     return false;
5626   }
5627
5628   // Initialize old pointer space.
5629   old_pointer_space_ =
5630       new OldSpace(this,
5631                    max_old_generation_size_,
5632                    OLD_POINTER_SPACE,
5633                    NOT_EXECUTABLE);
5634   if (old_pointer_space_ == NULL) return false;
5635   if (!old_pointer_space_->SetUp()) return false;
5636
5637   // Initialize old data space.
5638   old_data_space_ =
5639       new OldSpace(this,
5640                    max_old_generation_size_,
5641                    OLD_DATA_SPACE,
5642                    NOT_EXECUTABLE);
5643   if (old_data_space_ == NULL) return false;
5644   if (!old_data_space_->SetUp()) return false;
5645
5646   // Initialize the code space, set its maximum capacity to the old
5647   // generation size. It needs executable memory.
5648   // On 64-bit platform(s), we put all code objects in a 2 GB range of
5649   // virtual address space, so that they can call each other with near calls.
5650   if (code_range_size_ > 0) {
5651     if (!isolate_->code_range()->SetUp(code_range_size_)) {
5652       return false;
5653     }
5654   }
5655
5656   code_space_ =
5657       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5658   if (code_space_ == NULL) return false;
5659   if (!code_space_->SetUp()) return false;
5660
5661   // Initialize map space.
5662   map_space_ = new MapSpace(this,
5663                             max_old_generation_size_,
5664                             FLAG_max_map_space_pages,
5665                             MAP_SPACE);
5666   if (map_space_ == NULL) return false;
5667   if (!map_space_->SetUp()) return false;
5668
5669   // Initialize global property cell space.
5670   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5671   if (cell_space_ == NULL) return false;
5672   if (!cell_space_->SetUp()) return false;
5673
5674   // The large object code space may contain code or data.  We set the memory
5675   // to be non-executable here for safety, but this means we need to enable it
5676   // explicitly when allocating large code objects.
5677   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5678   if (lo_space_ == NULL) return false;
5679   if (!lo_space_->SetUp()) return false;
5680
5681   // Set up the seed that is used to randomize the string hash function.
5682   ASSERT(hash_seed() == 0);
5683   if (FLAG_randomize_hashes) {
5684     if (FLAG_hash_seed == 0) {
5685       set_hash_seed(
5686           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
5687     } else {
5688       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5689     }
5690   }
5691
5692   if (create_heap_objects) {
5693     // Create initial maps.
5694     if (!CreateInitialMaps()) return false;
5695     if (!CreateApiObjects()) return false;
5696
5697     // Create initial objects
5698     if (!CreateInitialObjects()) return false;
5699
5700     global_contexts_list_ = undefined_value();
5701   }
5702
5703   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5704   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5705
5706   store_buffer()->SetUp();
5707
5708   return true;
5709 }
5710
5711
5712 void Heap::SetStackLimits() {
5713   ASSERT(isolate_ != NULL);
5714   ASSERT(isolate_ == isolate());
5715   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5716   // something that looks like an out of range Smi to the GC.
5717
5718   // Set up the special root array entries containing the stack limits.
5719   // These are actually addresses, but the tag makes the GC ignore it.
5720   roots_[kStackLimitRootIndex] =
5721       reinterpret_cast<Object*>(
5722           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5723   roots_[kRealStackLimitRootIndex] =
5724       reinterpret_cast<Object*>(
5725           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5726 }
5727
5728
5729 void Heap::TearDown() {
5730   if (FLAG_print_cumulative_gc_stat) {
5731     PrintF("\n\n");
5732     PrintF("gc_count=%d ", gc_count_);
5733     PrintF("mark_sweep_count=%d ", ms_count_);
5734     PrintF("max_gc_pause=%d ", get_max_gc_pause());
5735     PrintF("min_in_mutator=%d ", get_min_in_mutator());
5736     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5737            get_max_alive_after_gc());
5738     PrintF("\n\n");
5739   }
5740
5741   isolate_->global_handles()->TearDown();
5742
5743   external_string_table_.TearDown();
5744
5745   new_space_.TearDown();
5746
5747   if (old_pointer_space_ != NULL) {
5748     old_pointer_space_->TearDown();
5749     delete old_pointer_space_;
5750     old_pointer_space_ = NULL;
5751   }
5752
5753   if (old_data_space_ != NULL) {
5754     old_data_space_->TearDown();
5755     delete old_data_space_;
5756     old_data_space_ = NULL;
5757   }
5758
5759   if (code_space_ != NULL) {
5760     code_space_->TearDown();
5761     delete code_space_;
5762     code_space_ = NULL;
5763   }
5764
5765   if (map_space_ != NULL) {
5766     map_space_->TearDown();
5767     delete map_space_;
5768     map_space_ = NULL;
5769   }
5770
5771   if (cell_space_ != NULL) {
5772     cell_space_->TearDown();
5773     delete cell_space_;
5774     cell_space_ = NULL;
5775   }
5776
5777   if (lo_space_ != NULL) {
5778     lo_space_->TearDown();
5779     delete lo_space_;
5780     lo_space_ = NULL;
5781   }
5782
5783   store_buffer()->TearDown();
5784   incremental_marking()->TearDown();
5785
5786   isolate_->memory_allocator()->TearDown();
5787
5788 #ifdef DEBUG
5789   delete debug_utils_;
5790   debug_utils_ = NULL;
5791 #endif
5792 }
5793
5794
5795 void Heap::Shrink() {
5796   // Try to shrink all paged spaces.
5797   PagedSpaces spaces;
5798   for (PagedSpace* space = spaces.next();
5799        space != NULL;
5800        space = spaces.next()) {
5801     space->ReleaseAllUnusedPages();
5802   }
5803 }
5804
5805
5806 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5807   ASSERT(callback != NULL);
5808   GCPrologueCallbackPair pair(callback, gc_type);
5809   ASSERT(!gc_prologue_callbacks_.Contains(pair));
5810   return gc_prologue_callbacks_.Add(pair);
5811 }
5812
5813
5814 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5815   ASSERT(callback != NULL);
5816   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5817     if (gc_prologue_callbacks_[i].callback == callback) {
5818       gc_prologue_callbacks_.Remove(i);
5819       return;
5820     }
5821   }
5822   UNREACHABLE();
5823 }
5824
5825
5826 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5827   ASSERT(callback != NULL);
5828   GCEpilogueCallbackPair pair(callback, gc_type);
5829   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5830   return gc_epilogue_callbacks_.Add(pair);
5831 }
5832
5833
5834 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5835   ASSERT(callback != NULL);
5836   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5837     if (gc_epilogue_callbacks_[i].callback == callback) {
5838       gc_epilogue_callbacks_.Remove(i);
5839       return;
5840     }
5841   }
5842   UNREACHABLE();
5843 }
5844
5845
5846 #ifdef DEBUG
5847
5848 class PrintHandleVisitor: public ObjectVisitor {
5849  public:
5850   void VisitPointers(Object** start, Object** end) {
5851     for (Object** p = start; p < end; p++)
5852       PrintF("  handle %p to %p\n",
5853              reinterpret_cast<void*>(p),
5854              reinterpret_cast<void*>(*p));
5855   }
5856 };
5857
5858 void Heap::PrintHandles() {
5859   PrintF("Handles:\n");
5860   PrintHandleVisitor v;
5861   isolate_->handle_scope_implementer()->Iterate(&v);
5862 }
5863
5864 #endif
5865
5866
5867 Space* AllSpaces::next() {
5868   switch (counter_++) {
5869     case NEW_SPACE:
5870       return HEAP->new_space();
5871     case OLD_POINTER_SPACE:
5872       return HEAP->old_pointer_space();
5873     case OLD_DATA_SPACE:
5874       return HEAP->old_data_space();
5875     case CODE_SPACE:
5876       return HEAP->code_space();
5877     case MAP_SPACE:
5878       return HEAP->map_space();
5879     case CELL_SPACE:
5880       return HEAP->cell_space();
5881     case LO_SPACE:
5882       return HEAP->lo_space();
5883     default:
5884       return NULL;
5885   }
5886 }
5887
5888
5889 PagedSpace* PagedSpaces::next() {
5890   switch (counter_++) {
5891     case OLD_POINTER_SPACE:
5892       return HEAP->old_pointer_space();
5893     case OLD_DATA_SPACE:
5894       return HEAP->old_data_space();
5895     case CODE_SPACE:
5896       return HEAP->code_space();
5897     case MAP_SPACE:
5898       return HEAP->map_space();
5899     case CELL_SPACE:
5900       return HEAP->cell_space();
5901     default:
5902       return NULL;
5903   }
5904 }
5905
5906
5907
5908 OldSpace* OldSpaces::next() {
5909   switch (counter_++) {
5910     case OLD_POINTER_SPACE:
5911       return HEAP->old_pointer_space();
5912     case OLD_DATA_SPACE:
5913       return HEAP->old_data_space();
5914     case CODE_SPACE:
5915       return HEAP->code_space();
5916     default:
5917       return NULL;
5918   }
5919 }
5920
5921
5922 SpaceIterator::SpaceIterator()
5923     : current_space_(FIRST_SPACE),
5924       iterator_(NULL),
5925       size_func_(NULL) {
5926 }
5927
5928
5929 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5930     : current_space_(FIRST_SPACE),
5931       iterator_(NULL),
5932       size_func_(size_func) {
5933 }
5934
5935
5936 SpaceIterator::~SpaceIterator() {
5937   // Delete active iterator if any.
5938   delete iterator_;
5939 }
5940
5941
5942 bool SpaceIterator::has_next() {
5943   // Iterate until no more spaces.
5944   return current_space_ != LAST_SPACE;
5945 }
5946
5947
5948 ObjectIterator* SpaceIterator::next() {
5949   if (iterator_ != NULL) {
5950     delete iterator_;
5951     iterator_ = NULL;
5952     // Move to the next space
5953     current_space_++;
5954     if (current_space_ > LAST_SPACE) {
5955       return NULL;
5956     }
5957   }
5958
5959   // Return iterator for the new current space.
5960   return CreateIterator();
5961 }
5962
5963
5964 // Create an iterator for the space to iterate.
5965 ObjectIterator* SpaceIterator::CreateIterator() {
5966   ASSERT(iterator_ == NULL);
5967
5968   switch (current_space_) {
5969     case NEW_SPACE:
5970       iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
5971       break;
5972     case OLD_POINTER_SPACE:
5973       iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
5974       break;
5975     case OLD_DATA_SPACE:
5976       iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
5977       break;
5978     case CODE_SPACE:
5979       iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
5980       break;
5981     case MAP_SPACE:
5982       iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
5983       break;
5984     case CELL_SPACE:
5985       iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
5986       break;
5987     case LO_SPACE:
5988       iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
5989       break;
5990   }
5991
5992   // Return the newly allocated iterator;
5993   ASSERT(iterator_ != NULL);
5994   return iterator_;
5995 }
5996
5997
5998 class HeapObjectsFilter {
5999  public:
6000   virtual ~HeapObjectsFilter() {}
6001   virtual bool SkipObject(HeapObject* object) = 0;
6002 };
6003
6004
6005 class UnreachableObjectsFilter : public HeapObjectsFilter {
6006  public:
6007   UnreachableObjectsFilter() {
6008     MarkReachableObjects();
6009   }
6010
6011   ~UnreachableObjectsFilter() {
6012     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6013   }
6014
6015   bool SkipObject(HeapObject* object) {
6016     MarkBit mark_bit = Marking::MarkBitFrom(object);
6017     return !mark_bit.Get();
6018   }
6019
6020  private:
6021   class MarkingVisitor : public ObjectVisitor {
6022    public:
6023     MarkingVisitor() : marking_stack_(10) {}
6024
6025     void VisitPointers(Object** start, Object** end) {
6026       for (Object** p = start; p < end; p++) {
6027         if (!(*p)->IsHeapObject()) continue;
6028         HeapObject* obj = HeapObject::cast(*p);
6029         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6030         if (!mark_bit.Get()) {
6031           mark_bit.Set();
6032           marking_stack_.Add(obj);
6033         }
6034       }
6035     }
6036
6037     void TransitiveClosure() {
6038       while (!marking_stack_.is_empty()) {
6039         HeapObject* obj = marking_stack_.RemoveLast();
6040         obj->Iterate(this);
6041       }
6042     }
6043
6044    private:
6045     List<HeapObject*> marking_stack_;
6046   };
6047
6048   void MarkReachableObjects() {
6049     Heap* heap = Isolate::Current()->heap();
6050     MarkingVisitor visitor;
6051     heap->IterateRoots(&visitor, VISIT_ALL);
6052     visitor.TransitiveClosure();
6053   }
6054
6055   AssertNoAllocation no_alloc;
6056 };
6057
6058
6059 HeapIterator::HeapIterator()
6060     : filtering_(HeapIterator::kNoFiltering),
6061       filter_(NULL) {
6062   Init();
6063 }
6064
6065
6066 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6067     : filtering_(filtering),
6068       filter_(NULL) {
6069   Init();
6070 }
6071
6072
6073 HeapIterator::~HeapIterator() {
6074   Shutdown();
6075 }
6076
6077
6078 void HeapIterator::Init() {
6079   // Start the iteration.
6080   space_iterator_ = new SpaceIterator;
6081   switch (filtering_) {
6082     case kFilterUnreachable:
6083       filter_ = new UnreachableObjectsFilter;
6084       break;
6085     default:
6086       break;
6087   }
6088   object_iterator_ = space_iterator_->next();
6089 }
6090
6091
6092 void HeapIterator::Shutdown() {
6093 #ifdef DEBUG
6094   // Assert that in filtering mode we have iterated through all
6095   // objects. Otherwise, heap will be left in an inconsistent state.
6096   if (filtering_ != kNoFiltering) {
6097     ASSERT(object_iterator_ == NULL);
6098   }
6099 #endif
6100   // Make sure the last iterator is deallocated.
6101   delete space_iterator_;
6102   space_iterator_ = NULL;
6103   object_iterator_ = NULL;
6104   delete filter_;
6105   filter_ = NULL;
6106 }
6107
6108
6109 HeapObject* HeapIterator::next() {
6110   if (filter_ == NULL) return NextObject();
6111
6112   HeapObject* obj = NextObject();
6113   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6114   return obj;
6115 }
6116
6117
6118 HeapObject* HeapIterator::NextObject() {
6119   // No iterator means we are done.
6120   if (object_iterator_ == NULL) return NULL;
6121
6122   if (HeapObject* obj = object_iterator_->next_object()) {
6123     // If the current iterator has more objects we are fine.
6124     return obj;
6125   } else {
6126     // Go though the spaces looking for one that has objects.
6127     while (space_iterator_->has_next()) {
6128       object_iterator_ = space_iterator_->next();
6129       if (HeapObject* obj = object_iterator_->next_object()) {
6130         return obj;
6131       }
6132     }
6133   }
6134   // Done with the last space.
6135   object_iterator_ = NULL;
6136   return NULL;
6137 }
6138
6139
6140 void HeapIterator::reset() {
6141   // Restart the iterator.
6142   Shutdown();
6143   Init();
6144 }
6145
6146
6147 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6148
6149 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6150
6151 class PathTracer::MarkVisitor: public ObjectVisitor {
6152  public:
6153   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6154   void VisitPointers(Object** start, Object** end) {
6155     // Scan all HeapObject pointers in [start, end)
6156     for (Object** p = start; !tracer_->found() && (p < end); p++) {
6157       if ((*p)->IsHeapObject())
6158         tracer_->MarkRecursively(p, this);
6159     }
6160   }
6161
6162  private:
6163   PathTracer* tracer_;
6164 };
6165
6166
6167 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6168  public:
6169   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6170   void VisitPointers(Object** start, Object** end) {
6171     // Scan all HeapObject pointers in [start, end)
6172     for (Object** p = start; p < end; p++) {
6173       if ((*p)->IsHeapObject())
6174         tracer_->UnmarkRecursively(p, this);
6175     }
6176   }
6177
6178  private:
6179   PathTracer* tracer_;
6180 };
6181
6182
6183 void PathTracer::VisitPointers(Object** start, Object** end) {
6184   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6185   // Visit all HeapObject pointers in [start, end)
6186   for (Object** p = start; !done && (p < end); p++) {
6187     if ((*p)->IsHeapObject()) {
6188       TracePathFrom(p);
6189       done = ((what_to_find_ == FIND_FIRST) && found_target_);
6190     }
6191   }
6192 }
6193
6194
6195 void PathTracer::Reset() {
6196   found_target_ = false;
6197   object_stack_.Clear();
6198 }
6199
6200
6201 void PathTracer::TracePathFrom(Object** root) {
6202   ASSERT((search_target_ == kAnyGlobalObject) ||
6203          search_target_->IsHeapObject());
6204   found_target_in_trace_ = false;
6205   object_stack_.Clear();
6206
6207   MarkVisitor mark_visitor(this);
6208   MarkRecursively(root, &mark_visitor);
6209
6210   UnmarkVisitor unmark_visitor(this);
6211   UnmarkRecursively(root, &unmark_visitor);
6212
6213   ProcessResults();
6214 }
6215
6216
6217 static bool SafeIsGlobalContext(HeapObject* obj) {
6218   return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
6219 }
6220
6221
6222 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6223   if (!(*p)->IsHeapObject()) return;
6224
6225   HeapObject* obj = HeapObject::cast(*p);
6226
6227   Object* map = obj->map();
6228
6229   if (!map->IsHeapObject()) return;  // visited before
6230
6231   if (found_target_in_trace_) return;  // stop if target found
6232   object_stack_.Add(obj);
6233   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6234       (obj == search_target_)) {
6235     found_target_in_trace_ = true;
6236     found_target_ = true;
6237     return;
6238   }
6239
6240   bool is_global_context = SafeIsGlobalContext(obj);
6241
6242   // not visited yet
6243   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6244
6245   Address map_addr = map_p->address();
6246
6247   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6248
6249   // Scan the object body.
6250   if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6251     // This is specialized to scan Context's properly.
6252     Object** start = reinterpret_cast<Object**>(obj->address() +
6253                                                 Context::kHeaderSize);
6254     Object** end = reinterpret_cast<Object**>(obj->address() +
6255         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6256     mark_visitor->VisitPointers(start, end);
6257   } else {
6258     obj->IterateBody(map_p->instance_type(),
6259                      obj->SizeFromMap(map_p),
6260                      mark_visitor);
6261   }
6262
6263   // Scan the map after the body because the body is a lot more interesting
6264   // when doing leak detection.
6265   MarkRecursively(&map, mark_visitor);
6266
6267   if (!found_target_in_trace_)  // don't pop if found the target
6268     object_stack_.RemoveLast();
6269 }
6270
6271
6272 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6273   if (!(*p)->IsHeapObject()) return;
6274
6275   HeapObject* obj = HeapObject::cast(*p);
6276
6277   Object* map = obj->map();
6278
6279   if (map->IsHeapObject()) return;  // unmarked already
6280
6281   Address map_addr = reinterpret_cast<Address>(map);
6282
6283   map_addr -= kMarkTag;
6284
6285   ASSERT_TAG_ALIGNED(map_addr);
6286
6287   HeapObject* map_p = HeapObject::FromAddress(map_addr);
6288
6289   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6290
6291   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6292
6293   obj->IterateBody(Map::cast(map_p)->instance_type(),
6294                    obj->SizeFromMap(Map::cast(map_p)),
6295                    unmark_visitor);
6296 }
6297
6298
6299 void PathTracer::ProcessResults() {
6300   if (found_target_) {
6301     PrintF("=====================================\n");
6302     PrintF("====        Path to object       ====\n");
6303     PrintF("=====================================\n\n");
6304
6305     ASSERT(!object_stack_.is_empty());
6306     for (int i = 0; i < object_stack_.length(); i++) {
6307       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
6308       Object* obj = object_stack_[i];
6309 #ifdef OBJECT_PRINT
6310       obj->Print();
6311 #else
6312       obj->ShortPrint();
6313 #endif
6314     }
6315     PrintF("=====================================\n");
6316   }
6317 }
6318 #endif  // DEBUG || LIVE_OBJECT_LIST
6319
6320
6321 #ifdef DEBUG
6322 // Triggers a depth-first traversal of reachable objects from roots
6323 // and finds a path to a specific heap object and prints it.
6324 void Heap::TracePathToObject(Object* target) {
6325   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6326   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6327 }
6328
6329
6330 // Triggers a depth-first traversal of reachable objects from roots
6331 // and finds a path to any global object and prints it. Useful for
6332 // determining the source for leaks of global objects.
6333 void Heap::TracePathToGlobal() {
6334   PathTracer tracer(PathTracer::kAnyGlobalObject,
6335                     PathTracer::FIND_ALL,
6336                     VISIT_ALL);
6337   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6338 }
6339 #endif
6340
6341
6342 static intptr_t CountTotalHolesSize() {
6343   intptr_t holes_size = 0;
6344   OldSpaces spaces;
6345   for (OldSpace* space = spaces.next();
6346        space != NULL;
6347        space = spaces.next()) {
6348     holes_size += space->Waste() + space->Available();
6349   }
6350   return holes_size;
6351 }
6352
6353
6354 GCTracer::GCTracer(Heap* heap)
6355     : start_time_(0.0),
6356       start_size_(0),
6357       gc_count_(0),
6358       full_gc_count_(0),
6359       allocated_since_last_gc_(0),
6360       spent_in_mutator_(0),
6361       promoted_objects_size_(0),
6362       heap_(heap) {
6363   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6364   start_time_ = OS::TimeCurrentMillis();
6365   start_size_ = heap_->SizeOfObjects();
6366
6367   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6368     scopes_[i] = 0;
6369   }
6370
6371   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6372
6373   allocated_since_last_gc_ =
6374       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6375
6376   if (heap_->last_gc_end_timestamp_ > 0) {
6377     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6378   }
6379
6380   steps_count_ = heap_->incremental_marking()->steps_count();
6381   steps_took_ = heap_->incremental_marking()->steps_took();
6382   longest_step_ = heap_->incremental_marking()->longest_step();
6383   steps_count_since_last_gc_ =
6384       heap_->incremental_marking()->steps_count_since_last_gc();
6385   steps_took_since_last_gc_ =
6386       heap_->incremental_marking()->steps_took_since_last_gc();
6387 }
6388
6389
6390 GCTracer::~GCTracer() {
6391   // Printf ONE line iff flag is set.
6392   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6393
6394   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6395
6396   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6397   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6398
6399   int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6400
6401   // Update cumulative GC statistics if required.
6402   if (FLAG_print_cumulative_gc_stat) {
6403     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6404     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6405                                      heap_->alive_after_last_gc_);
6406     if (!first_gc) {
6407       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6408                                    static_cast<int>(spent_in_mutator_));
6409     }
6410   }
6411
6412   if (!FLAG_trace_gc_nvp) {
6413     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6414
6415     PrintF("%s %.1f -> %.1f MB, ",
6416            CollectorString(),
6417            static_cast<double>(start_size_) / MB,
6418            SizeOfHeapObjects());
6419
6420     if (external_time > 0) PrintF("%d / ", external_time);
6421     PrintF("%d ms", time);
6422     if (steps_count_ > 0) {
6423       if (collector_ == SCAVENGER) {
6424         PrintF(" (+ %d ms in %d steps since last GC)",
6425                static_cast<int>(steps_took_since_last_gc_),
6426                steps_count_since_last_gc_);
6427       } else {
6428         PrintF(" (+ %d ms in %d steps since start of marking, "
6429                    "biggest step %f ms)",
6430                static_cast<int>(steps_took_),
6431                steps_count_,
6432                longest_step_);
6433       }
6434     }
6435     PrintF(".\n");
6436   } else {
6437     PrintF("pause=%d ", time);
6438     PrintF("mutator=%d ",
6439            static_cast<int>(spent_in_mutator_));
6440
6441     PrintF("gc=");
6442     switch (collector_) {
6443       case SCAVENGER:
6444         PrintF("s");
6445         break;
6446       case MARK_COMPACTOR:
6447         PrintF("ms");
6448         break;
6449       default:
6450         UNREACHABLE();
6451     }
6452     PrintF(" ");
6453
6454     PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6455     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6456     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6457     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6458     PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
6459     PrintF("new_new=%d ",
6460            static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
6461     PrintF("root_new=%d ",
6462            static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6463     PrintF("old_new=%d ",
6464            static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6465     PrintF("compaction_ptrs=%d ",
6466            static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6467     PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6468         Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
6469     PrintF("misc_compaction=%d ",
6470            static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
6471
6472     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
6473     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6474     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6475            in_free_list_or_wasted_before_gc_);
6476     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6477
6478     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6479     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6480
6481     if (collector_ == SCAVENGER) {
6482       PrintF("stepscount=%d ", steps_count_since_last_gc_);
6483       PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6484     } else {
6485       PrintF("stepscount=%d ", steps_count_);
6486       PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6487     }
6488
6489     PrintF("\n");
6490   }
6491
6492   heap_->PrintShortHeapStatistics();
6493 }
6494
6495
6496 const char* GCTracer::CollectorString() {
6497   switch (collector_) {
6498     case SCAVENGER:
6499       return "Scavenge";
6500     case MARK_COMPACTOR:
6501       return "Mark-sweep";
6502   }
6503   return "Unknown GC";
6504 }
6505
6506
6507 int KeyedLookupCache::Hash(Map* map, String* name) {
6508   // Uses only lower 32 bits if pointers are larger.
6509   uintptr_t addr_hash =
6510       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6511   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6512 }
6513
6514
6515 int KeyedLookupCache::Lookup(Map* map, String* name) {
6516   int index = Hash(map, name);
6517   Key& key = keys_[index];
6518   if ((key.map == map) && key.name->Equals(name)) {
6519     return field_offsets_[index];
6520   }
6521   return kNotFound;
6522 }
6523
6524
6525 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6526   String* symbol;
6527   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
6528     int index = Hash(map, symbol);
6529     Key& key = keys_[index];
6530     key.map = map;
6531     key.name = symbol;
6532     field_offsets_[index] = field_offset;
6533   }
6534 }
6535
6536
6537 void KeyedLookupCache::Clear() {
6538   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6539 }
6540
6541
6542 void DescriptorLookupCache::Clear() {
6543   for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
6544 }
6545
6546
6547 #ifdef DEBUG
6548 void Heap::GarbageCollectionGreedyCheck() {
6549   ASSERT(FLAG_gc_greedy);
6550   if (isolate_->bootstrapper()->IsActive()) return;
6551   if (disallow_allocation_failure()) return;
6552   CollectGarbage(NEW_SPACE);
6553 }
6554 #endif
6555
6556
6557 TranscendentalCache::SubCache::SubCache(Type t)
6558   : type_(t),
6559     isolate_(Isolate::Current()) {
6560   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
6561   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
6562   for (int i = 0; i < kCacheSize; i++) {
6563     elements_[i].in[0] = in0;
6564     elements_[i].in[1] = in1;
6565     elements_[i].output = NULL;
6566   }
6567 }
6568
6569
6570 void TranscendentalCache::Clear() {
6571   for (int i = 0; i < kNumberOfCaches; i++) {
6572     if (caches_[i] != NULL) {
6573       delete caches_[i];
6574       caches_[i] = NULL;
6575     }
6576   }
6577 }
6578
6579
6580 void ExternalStringTable::CleanUp() {
6581   int last = 0;
6582   for (int i = 0; i < new_space_strings_.length(); ++i) {
6583     if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
6584       continue;
6585     }
6586     if (heap_->InNewSpace(new_space_strings_[i])) {
6587       new_space_strings_[last++] = new_space_strings_[i];
6588     } else {
6589       old_space_strings_.Add(new_space_strings_[i]);
6590     }
6591   }
6592   new_space_strings_.Rewind(last);
6593   last = 0;
6594   for (int i = 0; i < old_space_strings_.length(); ++i) {
6595     if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
6596       continue;
6597     }
6598     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6599     old_space_strings_[last++] = old_space_strings_[i];
6600   }
6601   old_space_strings_.Rewind(last);
6602   if (FLAG_verify_heap) {
6603     Verify();
6604   }
6605 }
6606
6607
6608 void ExternalStringTable::TearDown() {
6609   new_space_strings_.Free();
6610   old_space_strings_.Free();
6611 }
6612
6613
6614 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6615   chunk->set_next_chunk(chunks_queued_for_free_);
6616   chunks_queued_for_free_ = chunk;
6617 }
6618
6619
6620 void Heap::FreeQueuedChunks() {
6621   if (chunks_queued_for_free_ == NULL) return;
6622   MemoryChunk* next;
6623   MemoryChunk* chunk;
6624   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6625     next = chunk->next_chunk();
6626     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6627
6628     if (chunk->owner()->identity() == LO_SPACE) {
6629       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6630       // If FromAnyPointerAddress encounters a slot that belongs to a large
6631       // chunk queued for deletion it will fail to find the chunk because
6632       // it try to perform a search in the list of pages owned by of the large
6633       // object space and queued chunks were detached from that list.
6634       // To work around this we split large chunk into normal kPageSize aligned
6635       // pieces and initialize size, owner and flags field of every piece.
6636       // If FromAnyPointerAddress encounters a slot that belongs to one of
6637       // these smaller pieces it will treat it as a slot on a normal Page.
6638       MemoryChunk* inner = MemoryChunk::FromAddress(
6639           chunk->address() + Page::kPageSize);
6640       MemoryChunk* inner_last = MemoryChunk::FromAddress(
6641           chunk->address() + chunk->size() - 1);
6642       while (inner <= inner_last) {
6643         // Size of a large chunk is always a multiple of
6644         // OS::AllocateAlignment() so there is always
6645         // enough space for a fake MemoryChunk header.
6646         inner->set_size(Page::kPageSize);
6647         inner->set_owner(lo_space());
6648         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6649         inner = MemoryChunk::FromAddress(
6650             inner->address() + Page::kPageSize);
6651       }
6652     }
6653   }
6654   isolate_->heap()->store_buffer()->Compact();
6655   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6656   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6657     next = chunk->next_chunk();
6658     isolate_->memory_allocator()->Free(chunk);
6659   }
6660   chunks_queued_for_free_ = NULL;
6661 }
6662
6663 } }  // namespace v8::internal