Merge remote-tracking branch 'ry/v0.10' into master
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "natives.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
44 #include "once.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
47 #include "snapshot.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "v8utils.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
55 #endif
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
59 #endif
60
61 namespace v8 {
62 namespace internal {
63
64
65 Heap::Heap()
66     : isolate_(NULL),
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71       code_range_size_(512*MB),
72 #else
73 #define LUMP_OF_MEMORY MB
74       code_range_size_(0),
75 #endif
76 #if defined(ANDROID)
77       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       initial_semispace_size_(Page::kPageSize),
80       max_old_generation_size_(192*MB),
81       max_executable_size_(max_old_generation_size_),
82 #else
83       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       initial_semispace_size_(Page::kPageSize),
86       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87       max_executable_size_(256l * LUMP_OF_MEMORY),
88 #endif
89
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94       survived_since_last_expansion_(0),
95       sweep_generation_(0),
96       always_allocate_scope_depth_(0),
97       linear_allocation_scope_depth_(0),
98       contexts_disposed_(0),
99       global_ic_age_(0),
100       flush_monomorphic_ics_(false),
101       scan_on_scavenge_pages_(0),
102       new_space_(this),
103       old_pointer_space_(NULL),
104       old_data_space_(NULL),
105       code_space_(NULL),
106       map_space_(NULL),
107       cell_space_(NULL),
108       lo_space_(NULL),
109       gc_state_(NOT_IN_GC),
110       gc_post_processing_depth_(0),
111       ms_count_(0),
112       gc_count_(0),
113       remembered_unmapped_pages_index_(0),
114       unflattened_strings_length_(0),
115 #ifdef DEBUG
116       allocation_allowed_(true),
117       allocation_timeout_(0),
118       disallow_allocation_failure_(false),
119 #endif  // DEBUG
120       new_space_high_promotion_mode_active_(false),
121       old_gen_promotion_limit_(kMinimumPromotionLimit),
122       old_gen_allocation_limit_(kMinimumAllocationLimit),
123       old_gen_limit_factor_(1),
124       size_of_old_gen_at_last_old_space_gc_(0),
125       external_allocation_limit_(0),
126       amount_of_external_allocated_memory_(0),
127       amount_of_external_allocated_memory_at_last_global_gc_(0),
128       old_gen_exhausted_(false),
129       store_buffer_rebuilder_(store_buffer()),
130       hidden_string_(NULL),
131       global_gc_prologue_callback_(NULL),
132       global_gc_epilogue_callback_(NULL),
133       gc_safe_size_of_old_object_(NULL),
134       total_regexp_code_generated_(0),
135       tracer_(NULL),
136       young_survivors_after_last_gc_(0),
137       high_survival_rate_period_length_(0),
138       low_survival_rate_period_length_(0),
139       survival_rate_(0),
140       previous_survival_rate_trend_(Heap::STABLE),
141       survival_rate_trend_(Heap::STABLE),
142       max_gc_pause_(0.0),
143       total_gc_time_ms_(0.0),
144       max_alive_after_gc_(0),
145       min_in_mutator_(kMaxInt),
146       alive_after_last_gc_(0),
147       last_gc_end_timestamp_(0.0),
148       marking_time_(0.0),
149       sweeping_time_(0.0),
150       store_buffer_(this),
151       marking_(this),
152       incremental_marking_(this),
153       number_idle_notifications_(0),
154       last_idle_notification_gc_count_(0),
155       last_idle_notification_gc_count_init_(false),
156       mark_sweeps_since_idle_round_started_(0),
157       ms_count_at_last_idle_notification_(0),
158       gc_count_at_last_idle_gc_(0),
159       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
160       gcs_since_last_deopt_(0),
161 #ifdef VERIFY_HEAP
162       no_weak_embedded_maps_verification_scope_depth_(0),
163 #endif
164       promotion_queue_(this),
165       configured_(false),
166       chunks_queued_for_free_(NULL),
167       relocation_mutex_(NULL) {
168   // Allow build-time customization of the max semispace size. Building
169   // V8 with snapshots and a non-default max semispace size is much
170   // easier if you can define it as part of the build environment.
171 #if defined(V8_MAX_SEMISPACE_SIZE)
172   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 #endif
174
175   intptr_t max_virtual = OS::MaxVirtualMemory();
176
177   if (max_virtual > 0) {
178     if (code_range_size_ > 0) {
179       // Reserve no more than 1/8 of the memory for the code range.
180       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
181     }
182   }
183
184   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
185   native_contexts_list_ = NULL;
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity();
205 }
206
207
208 intptr_t Heap::CommittedMemory() {
209   if (!HasBeenSetUp()) return 0;
210
211   return new_space_.CommittedMemory() +
212       old_pointer_space_->CommittedMemory() +
213       old_data_space_->CommittedMemory() +
214       code_space_->CommittedMemory() +
215       map_space_->CommittedMemory() +
216       cell_space_->CommittedMemory() +
217       lo_space_->Size();
218 }
219
220
221 size_t Heap::CommittedPhysicalMemory() {
222   if (!HasBeenSetUp()) return 0;
223
224   return new_space_.CommittedPhysicalMemory() +
225       old_pointer_space_->CommittedPhysicalMemory() +
226       old_data_space_->CommittedPhysicalMemory() +
227       code_space_->CommittedPhysicalMemory() +
228       map_space_->CommittedPhysicalMemory() +
229       cell_space_->CommittedPhysicalMemory() +
230       lo_space_->CommittedPhysicalMemory();
231 }
232
233
234 intptr_t Heap::CommittedMemoryExecutable() {
235   if (!HasBeenSetUp()) return 0;
236
237   return isolate()->memory_allocator()->SizeExecutable();
238 }
239
240
241 intptr_t Heap::Available() {
242   if (!HasBeenSetUp()) return 0;
243
244   return new_space_.Available() +
245       old_pointer_space_->Available() +
246       old_data_space_->Available() +
247       code_space_->Available() +
248       map_space_->Available() +
249       cell_space_->Available();
250 }
251
252
253 bool Heap::HasBeenSetUp() {
254   return old_pointer_space_ != NULL &&
255          old_data_space_ != NULL &&
256          code_space_ != NULL &&
257          map_space_ != NULL &&
258          cell_space_ != NULL &&
259          lo_space_ != NULL;
260 }
261
262
263 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
264   if (IntrusiveMarking::IsMarked(object)) {
265     return IntrusiveMarking::SizeOfMarkedObject(object);
266   }
267   return object->SizeFromMap(object->map());
268 }
269
270
271 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
272                                               const char** reason) {
273   // Is global GC requested?
274   if (space != NEW_SPACE) {
275     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
276     *reason = "GC in old space requested";
277     return MARK_COMPACTOR;
278   }
279
280   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
281     *reason = "GC in old space forced by flags";
282     return MARK_COMPACTOR;
283   }
284
285   // Is enough data promoted to justify a global GC?
286   if (OldGenerationPromotionLimitReached()) {
287     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
288     *reason = "promotion limit reached";
289     return MARK_COMPACTOR;
290   }
291
292   // Have allocation in OLD and LO failed?
293   if (old_gen_exhausted_) {
294     isolate_->counters()->
295         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
296     *reason = "old generations exhausted";
297     return MARK_COMPACTOR;
298   }
299
300   // Is there enough space left in OLD to guarantee that a scavenge can
301   // succeed?
302   //
303   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
304   // for object promotion. It counts only the bytes that the memory
305   // allocator has not yet allocated from the OS and assigned to any space,
306   // and does not count available bytes already in the old space or code
307   // space.  Undercounting is safe---we may get an unrequested full GC when
308   // a scavenge would have succeeded.
309   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
310     isolate_->counters()->
311         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
312     *reason = "scavenge might not succeed";
313     return MARK_COMPACTOR;
314   }
315
316   // Default
317   *reason = NULL;
318   return SCAVENGER;
319 }
320
321
322 // TODO(1238405): Combine the infrastructure for --heap-stats and
323 // --log-gc to avoid the complicated preprocessor and flag testing.
324 void Heap::ReportStatisticsBeforeGC() {
325   // Heap::ReportHeapStatistics will also log NewSpace statistics when
326   // compiled --log-gc is set.  The following logic is used to avoid
327   // double logging.
328 #ifdef DEBUG
329   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
330   if (FLAG_heap_stats) {
331     ReportHeapStatistics("Before GC");
332   } else if (FLAG_log_gc) {
333     new_space_.ReportStatistics();
334   }
335   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
336 #else
337   if (FLAG_log_gc) {
338     new_space_.CollectStatistics();
339     new_space_.ReportStatistics();
340     new_space_.ClearHistograms();
341   }
342 #endif  // DEBUG
343 }
344
345
346 void Heap::PrintShortHeapStatistics() {
347   if (!FLAG_trace_gc_verbose) return;
348   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
349                ", available: %6" V8_PTR_PREFIX "d KB\n",
350            isolate_->memory_allocator()->Size() / KB,
351            isolate_->memory_allocator()->Available() / KB);
352   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
353                ", available: %6" V8_PTR_PREFIX "d KB"
354                ", committed: %6" V8_PTR_PREFIX "d KB\n",
355            new_space_.Size() / KB,
356            new_space_.Available() / KB,
357            new_space_.CommittedMemory() / KB);
358   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
359                ", available: %6" V8_PTR_PREFIX "d KB"
360                ", committed: %6" V8_PTR_PREFIX "d KB\n",
361            old_pointer_space_->SizeOfObjects() / KB,
362            old_pointer_space_->Available() / KB,
363            old_pointer_space_->CommittedMemory() / KB);
364   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
365                ", available: %6" V8_PTR_PREFIX "d KB"
366                ", committed: %6" V8_PTR_PREFIX "d KB\n",
367            old_data_space_->SizeOfObjects() / KB,
368            old_data_space_->Available() / KB,
369            old_data_space_->CommittedMemory() / KB);
370   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
371                ", available: %6" V8_PTR_PREFIX "d KB"
372                ", committed: %6" V8_PTR_PREFIX "d KB\n",
373            code_space_->SizeOfObjects() / KB,
374            code_space_->Available() / KB,
375            code_space_->CommittedMemory() / KB);
376   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
377                ", available: %6" V8_PTR_PREFIX "d KB"
378                ", committed: %6" V8_PTR_PREFIX "d KB\n",
379            map_space_->SizeOfObjects() / KB,
380            map_space_->Available() / KB,
381            map_space_->CommittedMemory() / KB);
382   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
383                ", available: %6" V8_PTR_PREFIX "d KB"
384                ", committed: %6" V8_PTR_PREFIX "d KB\n",
385            cell_space_->SizeOfObjects() / KB,
386            cell_space_->Available() / KB,
387            cell_space_->CommittedMemory() / KB);
388   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
389                ", available: %6" V8_PTR_PREFIX "d KB"
390                ", committed: %6" V8_PTR_PREFIX "d KB\n",
391            lo_space_->SizeOfObjects() / KB,
392            lo_space_->Available() / KB,
393            lo_space_->CommittedMemory() / KB);
394   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
395                ", available: %6" V8_PTR_PREFIX "d KB"
396                ", committed: %6" V8_PTR_PREFIX "d KB\n",
397            this->SizeOfObjects() / KB,
398            this->Available() / KB,
399            this->CommittedMemory() / KB);
400   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
401 }
402
403
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407   // Similar to the before GC, we use some complicated logic to ensure that
408   // NewSpace statistics are logged exactly once when --log-gc is turned on.
409 #if defined(DEBUG)
410   if (FLAG_heap_stats) {
411     new_space_.CollectStatistics();
412     ReportHeapStatistics("After GC");
413   } else if (FLAG_log_gc) {
414     new_space_.ReportStatistics();
415   }
416 #else
417   if (FLAG_log_gc) new_space_.ReportStatistics();
418 #endif  // DEBUG
419 }
420
421
422 void Heap::GarbageCollectionPrologue() {
423   isolate_->transcendental_cache()->Clear();
424   ClearJSFunctionResultCaches();
425   gc_count_++;
426   unflattened_strings_length_ = 0;
427
428   if (FLAG_flush_code && FLAG_flush_code_incrementally) {
429     mark_compact_collector()->EnableCodeFlushing(true);
430   }
431
432 #ifdef VERIFY_HEAP
433   if (FLAG_verify_heap) {
434     Verify();
435   }
436 #endif
437
438 #ifdef DEBUG
439   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
440   allow_allocation(false);
441
442   if (FLAG_gc_verbose) Print();
443
444   ReportStatisticsBeforeGC();
445 #endif  // DEBUG
446
447   store_buffer()->GCPrologue();
448 }
449
450
451 intptr_t Heap::SizeOfObjects() {
452   intptr_t total = 0;
453   AllSpaces spaces(this);
454   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
455     total += space->SizeOfObjects();
456   }
457   return total;
458 }
459
460
461 void Heap::RepairFreeListsAfterBoot() {
462   PagedSpaces spaces(this);
463   for (PagedSpace* space = spaces.next();
464        space != NULL;
465        space = spaces.next()) {
466     space->RepairFreeListsAfterBoot();
467   }
468 }
469
470
471 void Heap::GarbageCollectionEpilogue() {
472   store_buffer()->GCEpilogue();
473
474   // In release mode, we only zap the from space under heap verification.
475   if (Heap::ShouldZapGarbage()) {
476     ZapFromSpace();
477   }
478
479 #ifdef VERIFY_HEAP
480   if (FLAG_verify_heap) {
481     Verify();
482   }
483 #endif
484
485 #ifdef DEBUG
486   allow_allocation(true);
487   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488   if (FLAG_print_handles) PrintHandles();
489   if (FLAG_gc_verbose) Print();
490   if (FLAG_code_stats) ReportCodeStatistics("After GC");
491 #endif
492   if (FLAG_deopt_every_n_garbage_collections > 0) {
493     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494       Deoptimizer::DeoptimizeAll(isolate());
495       gcs_since_last_deopt_ = 0;
496     }
497   }
498
499   isolate_->counters()->alive_after_last_gc()->Set(
500       static_cast<int>(SizeOfObjects()));
501
502   isolate_->counters()->string_table_capacity()->Set(
503       string_table()->Capacity());
504   isolate_->counters()->number_of_symbols()->Set(
505       string_table()->NumberOfElements());
506
507   if (CommittedMemory() > 0) {
508     isolate_->counters()->external_fragmentation_total()->AddSample(
509         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
510
511     isolate_->counters()->heap_fraction_map_space()->AddSample(
512         static_cast<int>(
513             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514     isolate_->counters()->heap_fraction_cell_space()->AddSample(
515         static_cast<int>(
516             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
517
518     isolate_->counters()->heap_sample_total_committed()->AddSample(
519         static_cast<int>(CommittedMemory() / KB));
520     isolate_->counters()->heap_sample_total_used()->AddSample(
521         static_cast<int>(SizeOfObjects() / KB));
522     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523         static_cast<int>(map_space()->CommittedMemory() / KB));
524     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525         static_cast<int>(cell_space()->CommittedMemory() / KB));
526   }
527
528 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
529   isolate_->counters()->space##_bytes_available()->Set(                        \
530       static_cast<int>(space()->Available()));                                 \
531   isolate_->counters()->space##_bytes_committed()->Set(                        \
532       static_cast<int>(space()->CommittedMemory()));                           \
533   isolate_->counters()->space##_bytes_used()->Set(                             \
534       static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
536   if (space()->CommittedMemory() > 0) {                                        \
537     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
538         static_cast<int>(100 -                                                 \
539             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
540   }
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
542   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
543   UPDATE_FRAGMENTATION_FOR_SPACE(space)
544
545   UPDATE_COUNTERS_FOR_SPACE(new_space)
546   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
555
556 #if defined(DEBUG)
557   ReportStatisticsAfterGC();
558 #endif  // DEBUG
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560   isolate_->debug()->AfterGarbageCollection();
561 #endif  // ENABLE_DEBUGGER_SUPPORT
562
563   error_object_list_.DeferredFormatStackTrace(isolate());
564 }
565
566
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568   // Since we are ignoring the return value, the exact choice of space does
569   // not matter, so long as we do not specify NEW_SPACE, which would not
570   // cause a full GC.
571   mark_compact_collector_.SetFlags(flags);
572   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573   mark_compact_collector_.SetFlags(kNoGCFlags);
574 }
575
576
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578   // Since we are ignoring the return value, the exact choice of space does
579   // not matter, so long as we do not specify NEW_SPACE, which would not
580   // cause a full GC.
581   // Major GC would invoke weak handle callbacks on weakly reachable
582   // handles, but won't collect weakly reachable objects until next
583   // major GC.  Therefore if we collect aggressively and weak handle callback
584   // has been invoked, we rerun major GC to release objects which become
585   // garbage.
586   // Note: as weak callbacks can execute arbitrary code, we cannot
587   // hope that eventually there will be no weak callbacks invocations.
588   // Therefore stop recollecting after several attempts.
589   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590                                      kReduceMemoryFootprintMask);
591   isolate_->compilation_cache()->Clear();
592   const int kMaxNumberOfAttempts = 7;
593   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
595       break;
596     }
597   }
598   mark_compact_collector()->SetFlags(kNoGCFlags);
599   new_space_.Shrink();
600   UncommitFromSpace();
601   incremental_marking()->UncommitMarkingDeque();
602 }
603
604
605 bool Heap::CollectGarbage(AllocationSpace space,
606                           GarbageCollector collector,
607                           const char* gc_reason,
608                           const char* collector_reason) {
609   // The VM is in the GC state until exiting this function.
610   VMState<GC> state(isolate_);
611
612 #ifdef DEBUG
613   // Reset the allocation timeout to the GC interval, but make sure to
614   // allow at least a few allocations after a collection. The reason
615   // for this is that we have a lot of allocation sequences and we
616   // assume that a garbage collection will allow the subsequent
617   // allocation attempts to go through.
618   allocation_timeout_ = Max(6, FLAG_gc_interval);
619 #endif
620
621   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622     if (FLAG_trace_incremental_marking) {
623       PrintF("[IncrementalMarking] Scavenge during marking.\n");
624     }
625   }
626
627   if (collector == MARK_COMPACTOR &&
628       !mark_compact_collector()->abort_incremental_marking() &&
629       !incremental_marking()->IsStopped() &&
630       !incremental_marking()->should_hurry() &&
631       FLAG_incremental_marking_steps) {
632     // Make progress in incremental marking.
633     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636     if (!incremental_marking()->IsComplete()) {
637       if (FLAG_trace_incremental_marking) {
638         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
639       }
640       collector = SCAVENGER;
641       collector_reason = "incremental marking delaying mark-sweep";
642     }
643   }
644
645   bool next_gc_likely_to_collect_more = false;
646
647   { GCTracer tracer(this, gc_reason, collector_reason);
648     GarbageCollectionPrologue();
649     // The GC count was incremented in the prologue.  Tell the tracer about
650     // it.
651     tracer.set_gc_count(gc_count_);
652
653     // Tell the tracer which collector we've selected.
654     tracer.set_collector(collector);
655
656     {
657       HistogramTimerScope histogram_timer_scope(
658           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
659                                    : isolate_->counters()->gc_compactor());
660       next_gc_likely_to_collect_more =
661           PerformGarbageCollection(collector, &tracer);
662     }
663
664     GarbageCollectionEpilogue();
665   }
666
667   // Start incremental marking for the next cycle. The heap snapshot
668   // generator needs incremental marking to stay off after it aborted.
669   if (!mark_compact_collector()->abort_incremental_marking() &&
670       incremental_marking()->IsStopped() &&
671       incremental_marking()->WorthActivating() &&
672       NextGCIsLikelyToBeFull()) {
673     incremental_marking()->Start();
674   }
675
676   return next_gc_likely_to_collect_more;
677 }
678
679
680 void Heap::PerformScavenge() {
681   GCTracer tracer(this, NULL, NULL);
682   if (incremental_marking()->IsStopped()) {
683     PerformGarbageCollection(SCAVENGER, &tracer);
684   } else {
685     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
686   }
687 }
688
689
690 void Heap::MoveElements(FixedArray* array,
691                         int dst_index,
692                         int src_index,
693                         int len) {
694   if (len == 0) return;
695
696   ASSERT(array->map() != HEAP->fixed_cow_array_map());
697   Object** dst_objects = array->data_start() + dst_index;
698   OS::MemMove(dst_objects,
699               array->data_start() + src_index,
700               len * kPointerSize);
701   if (!InNewSpace(array)) {
702     for (int i = 0; i < len; i++) {
703       // TODO(hpayer): check store buffer for entries
704       if (InNewSpace(dst_objects[i])) {
705         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
706       }
707     }
708   }
709   incremental_marking()->RecordWrites(array);
710 }
711
712
713 #ifdef VERIFY_HEAP
714 // Helper class for verifying the string table.
715 class StringTableVerifier : public ObjectVisitor {
716  public:
717   void VisitPointers(Object** start, Object** end) {
718     // Visit all HeapObject pointers in [start, end).
719     for (Object** p = start; p < end; p++) {
720       if ((*p)->IsHeapObject()) {
721         // Check that the string is actually internalized.
722         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
723               (*p)->IsInternalizedString());
724       }
725     }
726   }
727 };
728
729
730 static void VerifyStringTable() {
731   StringTableVerifier verifier;
732   HEAP->string_table()->IterateElements(&verifier);
733 }
734 #endif  // VERIFY_HEAP
735
736
737 static bool AbortIncrementalMarkingAndCollectGarbage(
738     Heap* heap,
739     AllocationSpace space,
740     const char* gc_reason = NULL) {
741   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
742   bool result = heap->CollectGarbage(space, gc_reason);
743   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
744   return result;
745 }
746
747
748 void Heap::ReserveSpace(
749     int *sizes,
750     Address *locations_out) {
751   bool gc_performed = true;
752   int counter = 0;
753   static const int kThreshold = 20;
754   while (gc_performed && counter++ < kThreshold) {
755     gc_performed = false;
756     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
757     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
758       if (sizes[space] != 0) {
759         MaybeObject* allocation;
760         if (space == NEW_SPACE) {
761           allocation = new_space()->AllocateRaw(sizes[space]);
762         } else {
763           allocation = paged_space(space)->AllocateRaw(sizes[space]);
764         }
765         FreeListNode* node;
766         if (!allocation->To<FreeListNode>(&node)) {
767           if (space == NEW_SPACE) {
768             Heap::CollectGarbage(NEW_SPACE,
769                                  "failed to reserve space in the new space");
770           } else {
771             AbortIncrementalMarkingAndCollectGarbage(
772                 this,
773                 static_cast<AllocationSpace>(space),
774                 "failed to reserve space in paged space");
775           }
776           gc_performed = true;
777           break;
778         } else {
779           // Mark with a free list node, in case we have a GC before
780           // deserializing.
781           node->set_size(this, sizes[space]);
782           locations_out[space] = node->address();
783         }
784       }
785     }
786   }
787
788   if (gc_performed) {
789     // Failed to reserve the space after several attempts.
790     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
791   }
792 }
793
794
795 void Heap::EnsureFromSpaceIsCommitted() {
796   if (new_space_.CommitFromSpaceIfNeeded()) return;
797
798   // Committing memory to from space failed.
799   // Memory is exhausted and we will die.
800   V8::FatalProcessOutOfMemory("Committing semi space failed.");
801 }
802
803
804 void Heap::ClearJSFunctionResultCaches() {
805   if (isolate_->bootstrapper()->IsActive()) return;
806
807   Object* context = native_contexts_list_;
808   while (!context->IsUndefined()) {
809     // Get the caches for this context. GC can happen when the context
810     // is not fully initialized, so the caches can be undefined.
811     Object* caches_or_undefined =
812         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
813     if (!caches_or_undefined->IsUndefined()) {
814       FixedArray* caches = FixedArray::cast(caches_or_undefined);
815       // Clear the caches:
816       int length = caches->length();
817       for (int i = 0; i < length; i++) {
818         JSFunctionResultCache::cast(caches->get(i))->Clear();
819       }
820     }
821     // Get the next context:
822     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
823   }
824 }
825
826
827 void Heap::ClearNormalizedMapCaches() {
828   if (isolate_->bootstrapper()->IsActive() &&
829       !incremental_marking()->IsMarking()) {
830     return;
831   }
832
833   Object* context = native_contexts_list_;
834   while (!context->IsUndefined()) {
835     // GC can happen when the context is not fully initialized,
836     // so the cache can be undefined.
837     Object* cache =
838         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
839     if (!cache->IsUndefined()) {
840       NormalizedMapCache::cast(cache)->Clear();
841     }
842     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
843   }
844 }
845
846
847 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
848   double survival_rate =
849       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
850       start_new_space_size;
851
852   if (survival_rate > kYoungSurvivalRateHighThreshold) {
853     high_survival_rate_period_length_++;
854   } else {
855     high_survival_rate_period_length_ = 0;
856   }
857
858   if (survival_rate < kYoungSurvivalRateLowThreshold) {
859     low_survival_rate_period_length_++;
860   } else {
861     low_survival_rate_period_length_ = 0;
862   }
863
864   double survival_rate_diff = survival_rate_ - survival_rate;
865
866   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
867     set_survival_rate_trend(DECREASING);
868   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
869     set_survival_rate_trend(INCREASING);
870   } else {
871     set_survival_rate_trend(STABLE);
872   }
873
874   survival_rate_ = survival_rate;
875 }
876
877 bool Heap::PerformGarbageCollection(GarbageCollector collector,
878                                     GCTracer* tracer) {
879   bool next_gc_likely_to_collect_more = false;
880
881   if (collector != SCAVENGER) {
882     PROFILE(isolate_, CodeMovingGCEvent());
883   }
884
885 #ifdef VERIFY_HEAP
886   if (FLAG_verify_heap) {
887     VerifyStringTable();
888   }
889 #endif
890
891   GCType gc_type =
892       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
893
894   {
895     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
896     VMState<EXTERNAL> state(isolate_);
897     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
898   }
899
900   EnsureFromSpaceIsCommitted();
901
902   int start_new_space_size = Heap::new_space()->SizeAsInt();
903
904   if (IsHighSurvivalRate()) {
905     // We speed up the incremental marker if it is running so that it
906     // does not fall behind the rate of promotion, which would cause a
907     // constantly growing old space.
908     incremental_marking()->NotifyOfHighPromotionRate();
909   }
910
911   if (collector == MARK_COMPACTOR) {
912     // Perform mark-sweep with optional compaction.
913     MarkCompact(tracer);
914     sweep_generation_++;
915     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
916         IsStableOrIncreasingSurvivalTrend();
917
918     UpdateSurvivalRateTrend(start_new_space_size);
919
920     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
921
922     if (high_survival_rate_during_scavenges &&
923         IsStableOrIncreasingSurvivalTrend()) {
924       // Stable high survival rates of young objects both during partial and
925       // full collection indicate that mutator is either building or modifying
926       // a structure with a long lifetime.
927       // In this case we aggressively raise old generation memory limits to
928       // postpone subsequent mark-sweep collection and thus trade memory
929       // space for the mutation speed.
930       old_gen_limit_factor_ = 2;
931     } else {
932       old_gen_limit_factor_ = 1;
933     }
934
935     old_gen_promotion_limit_ =
936         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
937     old_gen_allocation_limit_ =
938         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
939
940     old_gen_exhausted_ = false;
941   } else {
942     tracer_ = tracer;
943     Scavenge();
944     tracer_ = NULL;
945
946     UpdateSurvivalRateTrend(start_new_space_size);
947   }
948
949   if (!new_space_high_promotion_mode_active_ &&
950       new_space_.Capacity() == new_space_.MaximumCapacity() &&
951       IsStableOrIncreasingSurvivalTrend() &&
952       IsHighSurvivalRate()) {
953     // Stable high survival rates even though young generation is at
954     // maximum capacity indicates that most objects will be promoted.
955     // To decrease scavenger pauses and final mark-sweep pauses, we
956     // have to limit maximal capacity of the young generation.
957     new_space_high_promotion_mode_active_ = true;
958     if (FLAG_trace_gc) {
959       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
960                new_space_.InitialCapacity() / MB);
961     }
962     // Support for global pre-tenuring uses the high promotion mode as a
963     // heuristic indicator of whether to pretenure or not, we trigger
964     // deoptimization here to take advantage of pre-tenuring as soon as
965     // possible.
966     if (FLAG_pretenure_literals) {
967       isolate_->stack_guard()->FullDeopt();
968     }
969   } else if (new_space_high_promotion_mode_active_ &&
970       IsStableOrDecreasingSurvivalTrend() &&
971       IsLowSurvivalRate()) {
972     // Decreasing low survival rates might indicate that the above high
973     // promotion mode is over and we should allow the young generation
974     // to grow again.
975     new_space_high_promotion_mode_active_ = false;
976     if (FLAG_trace_gc) {
977       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
978                new_space_.MaximumCapacity() / MB);
979     }
980     // Trigger deoptimization here to turn off pre-tenuring as soon as
981     // possible.
982     if (FLAG_pretenure_literals) {
983       isolate_->stack_guard()->FullDeopt();
984     }
985   }
986
987   if (new_space_high_promotion_mode_active_ &&
988       new_space_.Capacity() > new_space_.InitialCapacity()) {
989     new_space_.Shrink();
990   }
991
992   isolate_->counters()->objs_since_last_young()->Set(0);
993
994   // Callbacks that fire after this point might trigger nested GCs and
995   // restart incremental marking, the assertion can't be moved down.
996   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
997
998   gc_post_processing_depth_++;
999   { DisableAssertNoAllocation allow_allocation;
1000     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1001     next_gc_likely_to_collect_more =
1002         isolate_->global_handles()->PostGarbageCollectionProcessing(
1003             collector, tracer);
1004   }
1005   gc_post_processing_depth_--;
1006
1007   // Update relocatables.
1008   Relocatable::PostGarbageCollectionProcessing();
1009
1010   if (collector == MARK_COMPACTOR) {
1011     // Register the amount of external allocated memory.
1012     amount_of_external_allocated_memory_at_last_global_gc_ =
1013         amount_of_external_allocated_memory_;
1014   }
1015
1016   {
1017     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1018     VMState<EXTERNAL> state(isolate_);
1019     CallGCEpilogueCallbacks(gc_type);
1020   }
1021
1022 #ifdef VERIFY_HEAP
1023   if (FLAG_verify_heap) {
1024     VerifyStringTable();
1025   }
1026 #endif
1027
1028   return next_gc_likely_to_collect_more;
1029 }
1030
1031
1032 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1033   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1034     global_gc_prologue_callback_();
1035   }
1036   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1037     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1038       gc_prologue_callbacks_[i].callback(gc_type, flags);
1039     }
1040   }
1041 }
1042
1043
1044 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1045   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1046     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1047       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1048     }
1049   }
1050   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1051     global_gc_epilogue_callback_();
1052   }
1053 }
1054
1055
1056 void Heap::MarkCompact(GCTracer* tracer) {
1057   gc_state_ = MARK_COMPACT;
1058   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1059
1060   mark_compact_collector_.Prepare(tracer);
1061
1062   ms_count_++;
1063   tracer->set_full_gc_count(ms_count_);
1064
1065   MarkCompactPrologue();
1066
1067   mark_compact_collector_.CollectGarbage();
1068
1069   LOG(isolate_, ResourceEvent("markcompact", "end"));
1070
1071   gc_state_ = NOT_IN_GC;
1072
1073   isolate_->counters()->objs_since_last_full()->Set(0);
1074
1075   contexts_disposed_ = 0;
1076
1077   flush_monomorphic_ics_ = false;
1078 }
1079
1080
1081 void Heap::MarkCompactPrologue() {
1082   // At any old GC clear the keyed lookup cache to enable collection of unused
1083   // maps.
1084   isolate_->keyed_lookup_cache()->Clear();
1085   isolate_->context_slot_cache()->Clear();
1086   isolate_->descriptor_lookup_cache()->Clear();
1087   RegExpResultsCache::Clear(string_split_cache());
1088   RegExpResultsCache::Clear(regexp_multiple_cache());
1089
1090   isolate_->compilation_cache()->MarkCompactPrologue();
1091
1092   CompletelyClearInstanceofCache();
1093
1094   FlushNumberStringCache();
1095   if (FLAG_cleanup_code_caches_at_gc) {
1096     polymorphic_code_cache()->set_cache(undefined_value());
1097   }
1098
1099   ClearNormalizedMapCaches();
1100 }
1101
1102
1103 Object* Heap::FindCodeObject(Address a) {
1104   return isolate()->inner_pointer_to_code_cache()->
1105       GcSafeFindCodeForInnerPointer(a);
1106 }
1107
1108
1109 // Helper class for copying HeapObjects
1110 class ScavengeVisitor: public ObjectVisitor {
1111  public:
1112   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1113
1114   void VisitPointer(Object** p) { ScavengePointer(p); }
1115
1116   void VisitPointers(Object** start, Object** end) {
1117     // Copy all HeapObject pointers in [start, end)
1118     for (Object** p = start; p < end; p++) ScavengePointer(p);
1119   }
1120
1121  private:
1122   void ScavengePointer(Object** p) {
1123     Object* object = *p;
1124     if (!heap_->InNewSpace(object)) return;
1125     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1126                          reinterpret_cast<HeapObject*>(object));
1127   }
1128
1129   Heap* heap_;
1130 };
1131
1132
1133 #ifdef VERIFY_HEAP
1134 // Visitor class to verify pointers in code or data space do not point into
1135 // new space.
1136 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1137  public:
1138   void VisitPointers(Object** start, Object**end) {
1139     for (Object** current = start; current < end; current++) {
1140       if ((*current)->IsHeapObject()) {
1141         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1142       }
1143     }
1144   }
1145 };
1146
1147
1148 static void VerifyNonPointerSpacePointers() {
1149   // Verify that there are no pointers to new space in spaces where we
1150   // do not expect them.
1151   VerifyNonPointerSpacePointersVisitor v;
1152   HeapObjectIterator code_it(HEAP->code_space());
1153   for (HeapObject* object = code_it.Next();
1154        object != NULL; object = code_it.Next())
1155     object->Iterate(&v);
1156
1157   // The old data space was normally swept conservatively so that the iterator
1158   // doesn't work, so we normally skip the next bit.
1159   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1160     HeapObjectIterator data_it(HEAP->old_data_space());
1161     for (HeapObject* object = data_it.Next();
1162          object != NULL; object = data_it.Next())
1163       object->Iterate(&v);
1164   }
1165 }
1166 #endif  // VERIFY_HEAP
1167
1168
1169 void Heap::CheckNewSpaceExpansionCriteria() {
1170   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1171       survived_since_last_expansion_ > new_space_.Capacity() &&
1172       !new_space_high_promotion_mode_active_) {
1173     // Grow the size of new space if there is room to grow, enough data
1174     // has survived scavenge since the last expansion and we are not in
1175     // high promotion mode.
1176     new_space_.Grow();
1177     survived_since_last_expansion_ = 0;
1178   }
1179 }
1180
1181
1182 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1183   return heap->InNewSpace(*p) &&
1184       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1185 }
1186
1187
1188 void Heap::ScavengeStoreBufferCallback(
1189     Heap* heap,
1190     MemoryChunk* page,
1191     StoreBufferEvent event) {
1192   heap->store_buffer_rebuilder_.Callback(page, event);
1193 }
1194
1195
1196 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1197   if (event == kStoreBufferStartScanningPagesEvent) {
1198     start_of_current_page_ = NULL;
1199     current_page_ = NULL;
1200   } else if (event == kStoreBufferScanningPageEvent) {
1201     if (current_page_ != NULL) {
1202       // If this page already overflowed the store buffer during this iteration.
1203       if (current_page_->scan_on_scavenge()) {
1204         // Then we should wipe out the entries that have been added for it.
1205         store_buffer_->SetTop(start_of_current_page_);
1206       } else if (store_buffer_->Top() - start_of_current_page_ >=
1207                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1208         // Did we find too many pointers in the previous page?  The heuristic is
1209         // that no page can take more then 1/5 the remaining slots in the store
1210         // buffer.
1211         current_page_->set_scan_on_scavenge(true);
1212         store_buffer_->SetTop(start_of_current_page_);
1213       } else {
1214         // In this case the page we scanned took a reasonable number of slots in
1215         // the store buffer.  It has now been rehabilitated and is no longer
1216         // marked scan_on_scavenge.
1217         ASSERT(!current_page_->scan_on_scavenge());
1218       }
1219     }
1220     start_of_current_page_ = store_buffer_->Top();
1221     current_page_ = page;
1222   } else if (event == kStoreBufferFullEvent) {
1223     // The current page overflowed the store buffer again.  Wipe out its entries
1224     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1225     // several times while scanning.
1226     if (current_page_ == NULL) {
1227       // Store Buffer overflowed while scanning promoted objects.  These are not
1228       // in any particular page, though they are likely to be clustered by the
1229       // allocation routines.
1230       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1231     } else {
1232       // Store Buffer overflowed while scanning a particular old space page for
1233       // pointers to new space.
1234       ASSERT(current_page_ == page);
1235       ASSERT(page != NULL);
1236       current_page_->set_scan_on_scavenge(true);
1237       ASSERT(start_of_current_page_ != store_buffer_->Top());
1238       store_buffer_->SetTop(start_of_current_page_);
1239     }
1240   } else {
1241     UNREACHABLE();
1242   }
1243 }
1244
1245
1246 void PromotionQueue::Initialize() {
1247   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1248   // entries (where each is a pair of intptr_t). This allows us to simplify
1249   // the test fpr when to switch pages.
1250   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1251          == 0);
1252   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1253   front_ = rear_ =
1254       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1255   emergency_stack_ = NULL;
1256   guard_ = false;
1257 }
1258
1259
1260 void PromotionQueue::RelocateQueueHead() {
1261   ASSERT(emergency_stack_ == NULL);
1262
1263   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1264   intptr_t* head_start = rear_;
1265   intptr_t* head_end =
1266       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1267
1268   int entries_count =
1269       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1270
1271   emergency_stack_ = new List<Entry>(2 * entries_count);
1272
1273   while (head_start != head_end) {
1274     int size = static_cast<int>(*(head_start++));
1275     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1276     emergency_stack_->Add(Entry(obj, size));
1277   }
1278   rear_ = head_end;
1279 }
1280
1281
1282 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1283  public:
1284   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1285
1286   virtual Object* RetainAs(Object* object) {
1287     if (!heap_->InFromSpace(object)) {
1288       return object;
1289     }
1290
1291     MapWord map_word = HeapObject::cast(object)->map_word();
1292     if (map_word.IsForwardingAddress()) {
1293       return map_word.ToForwardingAddress();
1294     }
1295     return NULL;
1296   }
1297
1298  private:
1299   Heap* heap_;
1300 };
1301
1302
1303 void Heap::Scavenge() {
1304   RelocationLock relocation_lock(this);
1305
1306 #ifdef VERIFY_HEAP
1307   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1308 #endif
1309
1310   gc_state_ = SCAVENGE;
1311
1312   // Implements Cheney's copying algorithm
1313   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1314
1315   // Clear descriptor cache.
1316   isolate_->descriptor_lookup_cache()->Clear();
1317
1318   // Used for updating survived_since_last_expansion_ at function end.
1319   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1320
1321   CheckNewSpaceExpansionCriteria();
1322
1323   SelectScavengingVisitorsTable();
1324
1325   incremental_marking()->PrepareForScavenge();
1326
1327   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1328   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1329
1330   // Flip the semispaces.  After flipping, to space is empty, from space has
1331   // live objects.
1332   new_space_.Flip();
1333   new_space_.ResetAllocationInfo();
1334
1335   // We need to sweep newly copied objects which can be either in the
1336   // to space or promoted to the old generation.  For to-space
1337   // objects, we treat the bottom of the to space as a queue.  Newly
1338   // copied and unswept objects lie between a 'front' mark and the
1339   // allocation pointer.
1340   //
1341   // Promoted objects can go into various old-generation spaces, and
1342   // can be allocated internally in the spaces (from the free list).
1343   // We treat the top of the to space as a queue of addresses of
1344   // promoted objects.  The addresses of newly promoted and unswept
1345   // objects lie between a 'front' mark and a 'rear' mark that is
1346   // updated as a side effect of promoting an object.
1347   //
1348   // There is guaranteed to be enough room at the top of the to space
1349   // for the addresses of promoted objects: every object promoted
1350   // frees up its size in bytes from the top of the new space, and
1351   // objects are at least one pointer in size.
1352   Address new_space_front = new_space_.ToSpaceStart();
1353   promotion_queue_.Initialize();
1354
1355 #ifdef DEBUG
1356   store_buffer()->Clean();
1357 #endif
1358
1359   ScavengeVisitor scavenge_visitor(this);
1360   // Copy roots.
1361   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1362
1363   // Copy objects reachable from the old generation.
1364   {
1365     StoreBufferRebuildScope scope(this,
1366                                   store_buffer(),
1367                                   &ScavengeStoreBufferCallback);
1368     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1369   }
1370
1371   // Copy objects reachable from cells by scavenging cell values directly.
1372   HeapObjectIterator cell_iterator(cell_space_);
1373   for (HeapObject* heap_object = cell_iterator.Next();
1374        heap_object != NULL;
1375        heap_object = cell_iterator.Next()) {
1376     if (heap_object->IsJSGlobalPropertyCell()) {
1377       JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1378       Address value_address = cell->ValueAddress();
1379       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1380     }
1381   }
1382
1383   // Copy objects reachable from the code flushing candidates list.
1384   MarkCompactCollector* collector = mark_compact_collector();
1385   if (collector->is_code_flushing_enabled()) {
1386     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1387   }
1388
1389   // Scavenge object reachable from the native contexts list directly.
1390   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1391
1392   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1393
1394   while (isolate()->global_handles()->IterateObjectGroups(
1395       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1396     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1397   }
1398   isolate()->global_handles()->RemoveObjectGroups();
1399   isolate()->global_handles()->RemoveImplicitRefGroups();
1400
1401   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1402       &IsUnscavengedHeapObject);
1403   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1404       &scavenge_visitor);
1405   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1406
1407   UpdateNewSpaceReferencesInExternalStringTable(
1408       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1409
1410   error_object_list_.UpdateReferencesInNewSpace(this);
1411
1412   promotion_queue_.Destroy();
1413
1414   if (!FLAG_watch_ic_patching) {
1415     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1416   }
1417   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1418
1419   ScavengeWeakObjectRetainer weak_object_retainer(this);
1420   ProcessWeakReferences(&weak_object_retainer);
1421
1422   ASSERT(new_space_front == new_space_.top());
1423
1424   // Set age mark.
1425   new_space_.set_age_mark(new_space_.top());
1426
1427   new_space_.LowerInlineAllocationLimit(
1428       new_space_.inline_allocation_limit_step());
1429
1430   // Update how much has survived scavenge.
1431   IncrementYoungSurvivorsCounter(static_cast<int>(
1432       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1433
1434   LOG(isolate_, ResourceEvent("scavenge", "end"));
1435
1436   gc_state_ = NOT_IN_GC;
1437
1438   scavenges_since_last_idle_round_++;
1439 }
1440
1441
1442 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1443                                                                 Object** p) {
1444   MapWord first_word = HeapObject::cast(*p)->map_word();
1445
1446   if (!first_word.IsForwardingAddress()) {
1447     // Unreachable external string can be finalized.
1448     heap->FinalizeExternalString(String::cast(*p));
1449     return NULL;
1450   }
1451
1452   // String is still reachable.
1453   return String::cast(first_word.ToForwardingAddress());
1454 }
1455
1456
1457 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1458     ExternalStringTableUpdaterCallback updater_func) {
1459 #ifdef VERIFY_HEAP
1460   if (FLAG_verify_heap) {
1461     external_string_table_.Verify();
1462   }
1463 #endif
1464
1465   if (external_string_table_.new_space_strings_.is_empty()) return;
1466
1467   Object** start = &external_string_table_.new_space_strings_[0];
1468   Object** end = start + external_string_table_.new_space_strings_.length();
1469   Object** last = start;
1470
1471   for (Object** p = start; p < end; ++p) {
1472     ASSERT(InFromSpace(*p));
1473     String* target = updater_func(this, p);
1474
1475     if (target == NULL) continue;
1476
1477     ASSERT(target->IsExternalString());
1478
1479     if (InNewSpace(target)) {
1480       // String is still in new space.  Update the table entry.
1481       *last = target;
1482       ++last;
1483     } else {
1484       // String got promoted.  Move it to the old string list.
1485       external_string_table_.AddOldString(target);
1486     }
1487   }
1488
1489   ASSERT(last <= end);
1490   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1491 }
1492
1493
1494 void Heap::UpdateReferencesInExternalStringTable(
1495     ExternalStringTableUpdaterCallback updater_func) {
1496
1497   // Update old space string references.
1498   if (external_string_table_.old_space_strings_.length() > 0) {
1499     Object** start = &external_string_table_.old_space_strings_[0];
1500     Object** end = start + external_string_table_.old_space_strings_.length();
1501     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1502   }
1503
1504   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1505 }
1506
1507
1508 static Object* ProcessFunctionWeakReferences(Heap* heap,
1509                                              Object* function,
1510                                              WeakObjectRetainer* retainer,
1511                                              bool record_slots) {
1512   Object* undefined = heap->undefined_value();
1513   Object* head = undefined;
1514   JSFunction* tail = NULL;
1515   Object* candidate = function;
1516   while (candidate != undefined) {
1517     // Check whether to keep the candidate in the list.
1518     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1519     Object* retain = retainer->RetainAs(candidate);
1520     if (retain != NULL) {
1521       if (head == undefined) {
1522         // First element in the list.
1523         head = retain;
1524       } else {
1525         // Subsequent elements in the list.
1526         ASSERT(tail != NULL);
1527         tail->set_next_function_link(retain);
1528         if (record_slots) {
1529           Object** next_function =
1530               HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1531           heap->mark_compact_collector()->RecordSlot(
1532               next_function, next_function, retain);
1533         }
1534       }
1535       // Retained function is new tail.
1536       candidate_function = reinterpret_cast<JSFunction*>(retain);
1537       tail = candidate_function;
1538
1539       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1540
1541       if (retain == undefined) break;
1542     }
1543
1544     // Move to next element in the list.
1545     candidate = candidate_function->next_function_link();
1546   }
1547
1548   // Terminate the list if there is one or more elements.
1549   if (tail != NULL) {
1550     tail->set_next_function_link(undefined);
1551   }
1552
1553   return head;
1554 }
1555
1556
1557 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1558   Object* undefined = undefined_value();
1559   Object* head = undefined;
1560   Context* tail = NULL;
1561   Object* candidate = native_contexts_list_;
1562
1563   // We don't record weak slots during marking or scavenges.
1564   // Instead we do it once when we complete mark-compact cycle.
1565   // Note that write barrier has no effect if we are already in the middle of
1566   // compacting mark-sweep cycle and we have to record slots manually.
1567   bool record_slots =
1568       gc_state() == MARK_COMPACT &&
1569       mark_compact_collector()->is_compacting();
1570
1571   while (candidate != undefined) {
1572     // Check whether to keep the candidate in the list.
1573     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1574     Object* retain = retainer->RetainAs(candidate);
1575     if (retain != NULL) {
1576       if (head == undefined) {
1577         // First element in the list.
1578         head = retain;
1579       } else {
1580         // Subsequent elements in the list.
1581         ASSERT(tail != NULL);
1582         tail->set_unchecked(this,
1583                             Context::NEXT_CONTEXT_LINK,
1584                             retain,
1585                             UPDATE_WRITE_BARRIER);
1586
1587         if (record_slots) {
1588           Object** next_context =
1589               HeapObject::RawField(
1590                   tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1591           mark_compact_collector()->RecordSlot(
1592               next_context, next_context, retain);
1593         }
1594       }
1595       // Retained context is new tail.
1596       candidate_context = reinterpret_cast<Context*>(retain);
1597       tail = candidate_context;
1598
1599       if (retain == undefined) break;
1600
1601       // Process the weak list of optimized functions for the context.
1602       Object* function_list_head =
1603           ProcessFunctionWeakReferences(
1604               this,
1605               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1606               retainer,
1607               record_slots);
1608       candidate_context->set_unchecked(this,
1609                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1610                                        function_list_head,
1611                                        UPDATE_WRITE_BARRIER);
1612       if (record_slots) {
1613         Object** optimized_functions =
1614             HeapObject::RawField(
1615                 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1616         mark_compact_collector()->RecordSlot(
1617             optimized_functions, optimized_functions, function_list_head);
1618       }
1619     }
1620
1621     // Move to next element in the list.
1622     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1623   }
1624
1625   // Terminate the list if there is one or more elements.
1626   if (tail != NULL) {
1627     tail->set_unchecked(this,
1628                         Context::NEXT_CONTEXT_LINK,
1629                         Heap::undefined_value(),
1630                         UPDATE_WRITE_BARRIER);
1631   }
1632
1633   // Update the head of the list of contexts.
1634   native_contexts_list_ = head;
1635 }
1636
1637
1638 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1639   AssertNoAllocation no_allocation;
1640
1641   // Both the external string table and the string table may contain
1642   // external strings, but neither lists them exhaustively, nor is the
1643   // intersection set empty.  Therefore we iterate over the external string
1644   // table first, ignoring internalized strings, and then over the
1645   // internalized string table.
1646
1647   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1648    public:
1649     explicit ExternalStringTableVisitorAdapter(
1650         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1651     virtual void VisitPointers(Object** start, Object** end) {
1652       for (Object** p = start; p < end; p++) {
1653         // Visit non-internalized external strings,
1654         // since internalized strings are listed in the string table.
1655         if (!(*p)->IsInternalizedString()) {
1656           ASSERT((*p)->IsExternalString());
1657           visitor_->VisitExternalString(Utils::ToLocal(
1658               Handle<String>(String::cast(*p))));
1659         }
1660       }
1661     }
1662    private:
1663     v8::ExternalResourceVisitor* visitor_;
1664   } external_string_table_visitor(visitor);
1665
1666   external_string_table_.Iterate(&external_string_table_visitor);
1667
1668   class StringTableVisitorAdapter : public ObjectVisitor {
1669    public:
1670     explicit StringTableVisitorAdapter(
1671         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1672     virtual void VisitPointers(Object** start, Object** end) {
1673       for (Object** p = start; p < end; p++) {
1674         if ((*p)->IsExternalString()) {
1675           ASSERT((*p)->IsInternalizedString());
1676           visitor_->VisitExternalString(Utils::ToLocal(
1677               Handle<String>(String::cast(*p))));
1678         }
1679       }
1680     }
1681    private:
1682     v8::ExternalResourceVisitor* visitor_;
1683   } string_table_visitor(visitor);
1684
1685   string_table()->IterateElements(&string_table_visitor);
1686 }
1687
1688
1689 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1690  public:
1691   static inline void VisitPointer(Heap* heap, Object** p) {
1692     Object* object = *p;
1693     if (!heap->InNewSpace(object)) return;
1694     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1695                          reinterpret_cast<HeapObject*>(object));
1696   }
1697 };
1698
1699
1700 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1701                          Address new_space_front) {
1702   do {
1703     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1704     // The addresses new_space_front and new_space_.top() define a
1705     // queue of unprocessed copied objects.  Process them until the
1706     // queue is empty.
1707     while (new_space_front != new_space_.top()) {
1708       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1709         HeapObject* object = HeapObject::FromAddress(new_space_front);
1710         new_space_front +=
1711           NewSpaceScavenger::IterateBody(object->map(), object);
1712       } else {
1713         new_space_front =
1714             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1715       }
1716     }
1717
1718     // Promote and process all the to-be-promoted objects.
1719     {
1720       StoreBufferRebuildScope scope(this,
1721                                     store_buffer(),
1722                                     &ScavengeStoreBufferCallback);
1723       while (!promotion_queue()->is_empty()) {
1724         HeapObject* target;
1725         int size;
1726         promotion_queue()->remove(&target, &size);
1727
1728         // Promoted object might be already partially visited
1729         // during old space pointer iteration. Thus we search specificly
1730         // for pointers to from semispace instead of looking for pointers
1731         // to new space.
1732         ASSERT(!target->IsMap());
1733         IterateAndMarkPointersToFromSpace(target->address(),
1734                                           target->address() + size,
1735                                           &ScavengeObject);
1736       }
1737     }
1738
1739     // Take another spin if there are now unswept objects in new space
1740     // (there are currently no more unswept promoted objects).
1741   } while (new_space_front != new_space_.top());
1742
1743   return new_space_front;
1744 }
1745
1746
1747 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1748
1749
1750 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1751                                               HeapObject* object,
1752                                               int size));
1753
1754 static HeapObject* EnsureDoubleAligned(Heap* heap,
1755                                        HeapObject* object,
1756                                        int size) {
1757   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1758     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1759     return HeapObject::FromAddress(object->address() + kPointerSize);
1760   } else {
1761     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1762                                kPointerSize);
1763     return object;
1764   }
1765 }
1766
1767
1768 enum LoggingAndProfiling {
1769   LOGGING_AND_PROFILING_ENABLED,
1770   LOGGING_AND_PROFILING_DISABLED
1771 };
1772
1773
1774 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1775
1776
1777 template<MarksHandling marks_handling,
1778          LoggingAndProfiling logging_and_profiling_mode>
1779 class ScavengingVisitor : public StaticVisitorBase {
1780  public:
1781   static void Initialize() {
1782     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1783     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1784     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1785     table_.Register(kVisitByteArray, &EvacuateByteArray);
1786     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1787     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1788
1789     table_.Register(kVisitNativeContext,
1790                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1791                         template VisitSpecialized<Context::kSize>);
1792
1793     table_.Register(kVisitConsString,
1794                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1795                         template VisitSpecialized<ConsString::kSize>);
1796
1797     table_.Register(kVisitSlicedString,
1798                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1799                         template VisitSpecialized<SlicedString::kSize>);
1800
1801     table_.Register(kVisitSymbol,
1802                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1803                         template VisitSpecialized<Symbol::kSize>);
1804
1805     table_.Register(kVisitSharedFunctionInfo,
1806                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1807                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1808
1809     table_.Register(kVisitJSWeakMap,
1810                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1811                     Visit);
1812
1813     table_.Register(kVisitJSRegExp,
1814                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1815                     Visit);
1816
1817     if (marks_handling == IGNORE_MARKS) {
1818       table_.Register(kVisitJSFunction,
1819                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1820                           template VisitSpecialized<JSFunction::kSize>);
1821     } else {
1822       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1823     }
1824
1825     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1826                                    kVisitDataObject,
1827                                    kVisitDataObjectGeneric>();
1828
1829     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1830                                    kVisitJSObject,
1831                                    kVisitJSObjectGeneric>();
1832
1833     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1834                                    kVisitStruct,
1835                                    kVisitStructGeneric>();
1836   }
1837
1838   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1839     return &table_;
1840   }
1841
1842  private:
1843   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1844   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1845
1846   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1847     bool should_record = false;
1848 #ifdef DEBUG
1849     should_record = FLAG_heap_stats;
1850 #endif
1851     should_record = should_record || FLAG_log_gc;
1852     if (should_record) {
1853       if (heap->new_space()->Contains(obj)) {
1854         heap->new_space()->RecordAllocation(obj);
1855       } else {
1856         heap->new_space()->RecordPromotion(obj);
1857       }
1858     }
1859   }
1860
1861   // Helper function used by CopyObject to copy a source object to an
1862   // allocated target object and update the forwarding pointer in the source
1863   // object.  Returns the target object.
1864   INLINE(static void MigrateObject(Heap* heap,
1865                                    HeapObject* source,
1866                                    HeapObject* target,
1867                                    int size)) {
1868     // Copy the content of source to target.
1869     heap->CopyBlock(target->address(), source->address(), size);
1870
1871     // Set the forwarding address.
1872     source->set_map_word(MapWord::FromForwardingAddress(target));
1873
1874     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1875       // Update NewSpace stats if necessary.
1876       RecordCopiedObject(heap, target);
1877       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1878       Isolate* isolate = heap->isolate();
1879       if (isolate->logger()->is_logging_code_events() ||
1880           isolate->cpu_profiler()->is_profiling()) {
1881         if (target->IsSharedFunctionInfo()) {
1882           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1883               source->address(), target->address()));
1884         }
1885       }
1886     }
1887
1888     if (marks_handling == TRANSFER_MARKS) {
1889       if (Marking::TransferColor(source, target)) {
1890         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1891       }
1892     }
1893   }
1894
1895
1896   template<ObjectContents object_contents,
1897            SizeRestriction size_restriction,
1898            int alignment>
1899   static inline void EvacuateObject(Map* map,
1900                                     HeapObject** slot,
1901                                     HeapObject* object,
1902                                     int object_size) {
1903     SLOW_ASSERT((size_restriction != SMALL) ||
1904                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1905     SLOW_ASSERT(object->Size() == object_size);
1906
1907     int allocation_size = object_size;
1908     if (alignment != kObjectAlignment) {
1909       ASSERT(alignment == kDoubleAlignment);
1910       allocation_size += kPointerSize;
1911     }
1912
1913     Heap* heap = map->GetHeap();
1914     if (heap->ShouldBePromoted(object->address(), object_size)) {
1915       MaybeObject* maybe_result;
1916
1917       if ((size_restriction != SMALL) &&
1918           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1919         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1920                                                      NOT_EXECUTABLE);
1921       } else {
1922         if (object_contents == DATA_OBJECT) {
1923           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1924         } else {
1925           maybe_result =
1926               heap->old_pointer_space()->AllocateRaw(allocation_size);
1927         }
1928       }
1929
1930       Object* result = NULL;  // Initialization to please compiler.
1931       if (maybe_result->ToObject(&result)) {
1932         HeapObject* target = HeapObject::cast(result);
1933
1934         if (alignment != kObjectAlignment) {
1935           target = EnsureDoubleAligned(heap, target, allocation_size);
1936         }
1937
1938         // Order is important: slot might be inside of the target if target
1939         // was allocated over a dead object and slot comes from the store
1940         // buffer.
1941         *slot = target;
1942         MigrateObject(heap, object, target, object_size);
1943
1944         if (object_contents == POINTER_OBJECT) {
1945           if (map->instance_type() == JS_FUNCTION_TYPE) {
1946             heap->promotion_queue()->insert(
1947                 target, JSFunction::kNonWeakFieldsEndOffset);
1948           } else {
1949             heap->promotion_queue()->insert(target, object_size);
1950           }
1951         }
1952
1953         heap->tracer()->increment_promoted_objects_size(object_size);
1954         return;
1955       }
1956     }
1957     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1958     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1959     Object* result = allocation->ToObjectUnchecked();
1960     HeapObject* target = HeapObject::cast(result);
1961
1962     if (alignment != kObjectAlignment) {
1963       target = EnsureDoubleAligned(heap, target, allocation_size);
1964     }
1965
1966     // Order is important: slot might be inside of the target if target
1967     // was allocated over a dead object and slot comes from the store
1968     // buffer.
1969     *slot = target;
1970     MigrateObject(heap, object, target, object_size);
1971     return;
1972   }
1973
1974
1975   static inline void EvacuateJSFunction(Map* map,
1976                                         HeapObject** slot,
1977                                         HeapObject* object) {
1978     ObjectEvacuationStrategy<POINTER_OBJECT>::
1979         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1980
1981     HeapObject* target = *slot;
1982     MarkBit mark_bit = Marking::MarkBitFrom(target);
1983     if (Marking::IsBlack(mark_bit)) {
1984       // This object is black and it might not be rescanned by marker.
1985       // We should explicitly record code entry slot for compaction because
1986       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1987       // miss it as it is not HeapObject-tagged.
1988       Address code_entry_slot =
1989           target->address() + JSFunction::kCodeEntryOffset;
1990       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1991       map->GetHeap()->mark_compact_collector()->
1992           RecordCodeEntrySlot(code_entry_slot, code);
1993     }
1994   }
1995
1996
1997   static inline void EvacuateFixedArray(Map* map,
1998                                         HeapObject** slot,
1999                                         HeapObject* object) {
2000     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2001     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2002                                                  slot,
2003                                                  object,
2004                                                  object_size);
2005   }
2006
2007
2008   static inline void EvacuateFixedDoubleArray(Map* map,
2009                                               HeapObject** slot,
2010                                               HeapObject* object) {
2011     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2012     int object_size = FixedDoubleArray::SizeFor(length);
2013     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2014         map,
2015         slot,
2016         object,
2017         object_size);
2018   }
2019
2020
2021   static inline void EvacuateByteArray(Map* map,
2022                                        HeapObject** slot,
2023                                        HeapObject* object) {
2024     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2025     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2026         map, slot, object, object_size);
2027   }
2028
2029
2030   static inline void EvacuateSeqOneByteString(Map* map,
2031                                             HeapObject** slot,
2032                                             HeapObject* object) {
2033     int object_size = SeqOneByteString::cast(object)->
2034         SeqOneByteStringSize(map->instance_type());
2035     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2036         map, slot, object, object_size);
2037   }
2038
2039
2040   static inline void EvacuateSeqTwoByteString(Map* map,
2041                                               HeapObject** slot,
2042                                               HeapObject* object) {
2043     int object_size = SeqTwoByteString::cast(object)->
2044         SeqTwoByteStringSize(map->instance_type());
2045     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2046         map, slot, object, object_size);
2047   }
2048
2049
2050   static inline bool IsShortcutCandidate(int type) {
2051     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2052   }
2053
2054   static inline void EvacuateShortcutCandidate(Map* map,
2055                                                HeapObject** slot,
2056                                                HeapObject* object) {
2057     ASSERT(IsShortcutCandidate(map->instance_type()));
2058
2059     Heap* heap = map->GetHeap();
2060
2061     if (marks_handling == IGNORE_MARKS &&
2062         ConsString::cast(object)->unchecked_second() ==
2063         heap->empty_string()) {
2064       HeapObject* first =
2065           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2066
2067       *slot = first;
2068
2069       if (!heap->InNewSpace(first)) {
2070         object->set_map_word(MapWord::FromForwardingAddress(first));
2071         return;
2072       }
2073
2074       MapWord first_word = first->map_word();
2075       if (first_word.IsForwardingAddress()) {
2076         HeapObject* target = first_word.ToForwardingAddress();
2077
2078         *slot = target;
2079         object->set_map_word(MapWord::FromForwardingAddress(target));
2080         return;
2081       }
2082
2083       heap->DoScavengeObject(first->map(), slot, first);
2084       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2085       return;
2086     }
2087
2088     int object_size = ConsString::kSize;
2089     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2090         map, slot, object, object_size);
2091   }
2092
2093   template<ObjectContents object_contents>
2094   class ObjectEvacuationStrategy {
2095    public:
2096     template<int object_size>
2097     static inline void VisitSpecialized(Map* map,
2098                                         HeapObject** slot,
2099                                         HeapObject* object) {
2100       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2101           map, slot, object, object_size);
2102     }
2103
2104     static inline void Visit(Map* map,
2105                              HeapObject** slot,
2106                              HeapObject* object) {
2107       int object_size = map->instance_size();
2108       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2109           map, slot, object, object_size);
2110     }
2111   };
2112
2113   static VisitorDispatchTable<ScavengingCallback> table_;
2114 };
2115
2116
2117 template<MarksHandling marks_handling,
2118          LoggingAndProfiling logging_and_profiling_mode>
2119 VisitorDispatchTable<ScavengingCallback>
2120     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2121
2122
2123 static void InitializeScavengingVisitorsTables() {
2124   ScavengingVisitor<TRANSFER_MARKS,
2125                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2126   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2127   ScavengingVisitor<TRANSFER_MARKS,
2128                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2129   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2130 }
2131
2132
2133 void Heap::SelectScavengingVisitorsTable() {
2134   bool logging_and_profiling =
2135       isolate()->logger()->is_logging() ||
2136       isolate()->cpu_profiler()->is_profiling() ||
2137       (isolate()->heap_profiler() != NULL &&
2138        isolate()->heap_profiler()->is_profiling());
2139
2140   if (!incremental_marking()->IsMarking()) {
2141     if (!logging_and_profiling) {
2142       scavenging_visitors_table_.CopyFrom(
2143           ScavengingVisitor<IGNORE_MARKS,
2144                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2145     } else {
2146       scavenging_visitors_table_.CopyFrom(
2147           ScavengingVisitor<IGNORE_MARKS,
2148                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2149     }
2150   } else {
2151     if (!logging_and_profiling) {
2152       scavenging_visitors_table_.CopyFrom(
2153           ScavengingVisitor<TRANSFER_MARKS,
2154                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2155     } else {
2156       scavenging_visitors_table_.CopyFrom(
2157           ScavengingVisitor<TRANSFER_MARKS,
2158                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2159     }
2160
2161     if (incremental_marking()->IsCompacting()) {
2162       // When compacting forbid short-circuiting of cons-strings.
2163       // Scavenging code relies on the fact that new space object
2164       // can't be evacuated into evacuation candidate but
2165       // short-circuiting violates this assumption.
2166       scavenging_visitors_table_.Register(
2167           StaticVisitorBase::kVisitShortcutCandidate,
2168           scavenging_visitors_table_.GetVisitorById(
2169               StaticVisitorBase::kVisitConsString));
2170     }
2171   }
2172 }
2173
2174
2175 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2176   SLOW_ASSERT(HEAP->InFromSpace(object));
2177   MapWord first_word = object->map_word();
2178   SLOW_ASSERT(!first_word.IsForwardingAddress());
2179   Map* map = first_word.ToMap();
2180   map->GetHeap()->DoScavengeObject(map, p, object);
2181 }
2182
2183
2184 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2185                                       int instance_size) {
2186   Object* result;
2187   MaybeObject* maybe_result = AllocateRawMap();
2188   if (!maybe_result->ToObject(&result)) return maybe_result;
2189
2190   // Map::cast cannot be used due to uninitialized map field.
2191   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2192   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2193   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2194   reinterpret_cast<Map*>(result)->set_visitor_id(
2195         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2196   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2197   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2198   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2199   reinterpret_cast<Map*>(result)->set_bit_field(0);
2200   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2201   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2202                    Map::OwnsDescriptors::encode(true);
2203   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2204   return result;
2205 }
2206
2207
2208 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2209                                int instance_size,
2210                                ElementsKind elements_kind) {
2211   Object* result;
2212   MaybeObject* maybe_result = AllocateRawMap();
2213   if (!maybe_result->To(&result)) return maybe_result;
2214
2215   Map* map = reinterpret_cast<Map*>(result);
2216   map->set_map_no_write_barrier(meta_map());
2217   map->set_instance_type(instance_type);
2218   map->set_visitor_id(
2219       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2220   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2221   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2222   map->set_instance_size(instance_size);
2223   map->set_inobject_properties(0);
2224   map->set_pre_allocated_property_fields(0);
2225   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2226   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2227                           SKIP_WRITE_BARRIER);
2228   map->init_back_pointer(undefined_value());
2229   map->set_unused_property_fields(0);
2230   map->set_instance_descriptors(empty_descriptor_array());
2231   map->set_bit_field(0);
2232   map->set_bit_field2(1 << Map::kIsExtensible);
2233   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2234                    Map::OwnsDescriptors::encode(true);
2235   map->set_bit_field3(bit_field3);
2236   map->set_elements_kind(elements_kind);
2237
2238   return map;
2239 }
2240
2241
2242 MaybeObject* Heap::AllocateCodeCache() {
2243   CodeCache* code_cache;
2244   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2245     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2246   }
2247   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2248   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2249   return code_cache;
2250 }
2251
2252
2253 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2254   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2255 }
2256
2257
2258 MaybeObject* Heap::AllocateAccessorPair() {
2259   AccessorPair* accessors;
2260   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2261     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2262   }
2263   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2264   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2265   return accessors;
2266 }
2267
2268
2269 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2270   TypeFeedbackInfo* info;
2271   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2272     if (!maybe_info->To(&info)) return maybe_info;
2273   }
2274   info->initialize_storage();
2275   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2276                                 SKIP_WRITE_BARRIER);
2277   return info;
2278 }
2279
2280
2281 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2282   AliasedArgumentsEntry* entry;
2283   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2284     if (!maybe_entry->To(&entry)) return maybe_entry;
2285   }
2286   entry->set_aliased_context_slot(aliased_context_slot);
2287   return entry;
2288 }
2289
2290
2291 const Heap::StringTypeTable Heap::string_type_table[] = {
2292 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2293   {type, size, k##camel_name##MapRootIndex},
2294   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2295 #undef STRING_TYPE_ELEMENT
2296 };
2297
2298
2299 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2300 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2301   {contents, k##name##RootIndex},
2302   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2303 #undef CONSTANT_STRING_ELEMENT
2304 };
2305
2306
2307 const Heap::StructTable Heap::struct_table[] = {
2308 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2309   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2310   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2311 #undef STRUCT_TABLE_ELEMENT
2312 };
2313
2314
2315 bool Heap::CreateInitialMaps() {
2316   Object* obj;
2317   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2318     if (!maybe_obj->ToObject(&obj)) return false;
2319   }
2320   // Map::cast cannot be used due to uninitialized map field.
2321   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2322   set_meta_map(new_meta_map);
2323   new_meta_map->set_map(new_meta_map);
2324
2325   { MaybeObject* maybe_obj =
2326         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2327     if (!maybe_obj->ToObject(&obj)) return false;
2328   }
2329   set_fixed_array_map(Map::cast(obj));
2330
2331   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2332     if (!maybe_obj->ToObject(&obj)) return false;
2333   }
2334   set_oddball_map(Map::cast(obj));
2335
2336   // Allocate the empty array.
2337   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2338     if (!maybe_obj->ToObject(&obj)) return false;
2339   }
2340   set_empty_fixed_array(FixedArray::cast(obj));
2341
2342   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2343     if (!maybe_obj->ToObject(&obj)) return false;
2344   }
2345   set_null_value(Oddball::cast(obj));
2346   Oddball::cast(obj)->set_kind(Oddball::kNull);
2347
2348   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2349     if (!maybe_obj->ToObject(&obj)) return false;
2350   }
2351   set_undefined_value(Oddball::cast(obj));
2352   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2353   ASSERT(!InNewSpace(undefined_value()));
2354
2355   // Allocate the empty descriptor array.
2356   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2357     if (!maybe_obj->ToObject(&obj)) return false;
2358   }
2359   set_empty_descriptor_array(DescriptorArray::cast(obj));
2360
2361   // Fix the instance_descriptors for the existing maps.
2362   meta_map()->set_code_cache(empty_fixed_array());
2363   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2364   meta_map()->init_back_pointer(undefined_value());
2365   meta_map()->set_instance_descriptors(empty_descriptor_array());
2366
2367   fixed_array_map()->set_code_cache(empty_fixed_array());
2368   fixed_array_map()->set_dependent_code(
2369       DependentCode::cast(empty_fixed_array()));
2370   fixed_array_map()->init_back_pointer(undefined_value());
2371   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2372
2373   oddball_map()->set_code_cache(empty_fixed_array());
2374   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2375   oddball_map()->init_back_pointer(undefined_value());
2376   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2377
2378   // Fix prototype object for existing maps.
2379   meta_map()->set_prototype(null_value());
2380   meta_map()->set_constructor(null_value());
2381
2382   fixed_array_map()->set_prototype(null_value());
2383   fixed_array_map()->set_constructor(null_value());
2384
2385   oddball_map()->set_prototype(null_value());
2386   oddball_map()->set_constructor(null_value());
2387
2388   { MaybeObject* maybe_obj =
2389         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2390     if (!maybe_obj->ToObject(&obj)) return false;
2391   }
2392   set_fixed_cow_array_map(Map::cast(obj));
2393   ASSERT(fixed_array_map() != fixed_cow_array_map());
2394
2395   { MaybeObject* maybe_obj =
2396         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2397     if (!maybe_obj->ToObject(&obj)) return false;
2398   }
2399   set_scope_info_map(Map::cast(obj));
2400
2401   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2402     if (!maybe_obj->ToObject(&obj)) return false;
2403   }
2404   set_heap_number_map(Map::cast(obj));
2405
2406   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2407     if (!maybe_obj->ToObject(&obj)) return false;
2408   }
2409   set_symbol_map(Map::cast(obj));
2410
2411   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2412     if (!maybe_obj->ToObject(&obj)) return false;
2413   }
2414   set_foreign_map(Map::cast(obj));
2415
2416   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2417     const StringTypeTable& entry = string_type_table[i];
2418     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2419       if (!maybe_obj->ToObject(&obj)) return false;
2420     }
2421     roots_[entry.index] = Map::cast(obj);
2422   }
2423
2424   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2425     if (!maybe_obj->ToObject(&obj)) return false;
2426   }
2427   set_undetectable_string_map(Map::cast(obj));
2428   Map::cast(obj)->set_is_undetectable();
2429
2430   { MaybeObject* maybe_obj =
2431         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2432     if (!maybe_obj->ToObject(&obj)) return false;
2433   }
2434   set_undetectable_ascii_string_map(Map::cast(obj));
2435   Map::cast(obj)->set_is_undetectable();
2436
2437   { MaybeObject* maybe_obj =
2438         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2439     if (!maybe_obj->ToObject(&obj)) return false;
2440   }
2441   set_fixed_double_array_map(Map::cast(obj));
2442
2443   { MaybeObject* maybe_obj =
2444         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2445     if (!maybe_obj->ToObject(&obj)) return false;
2446   }
2447   set_byte_array_map(Map::cast(obj));
2448
2449   { MaybeObject* maybe_obj =
2450         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2451     if (!maybe_obj->ToObject(&obj)) return false;
2452   }
2453   set_free_space_map(Map::cast(obj));
2454
2455   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2456     if (!maybe_obj->ToObject(&obj)) return false;
2457   }
2458   set_empty_byte_array(ByteArray::cast(obj));
2459
2460   { MaybeObject* maybe_obj =
2461         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2462     if (!maybe_obj->ToObject(&obj)) return false;
2463   }
2464   set_external_pixel_array_map(Map::cast(obj));
2465
2466   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2467                                          ExternalArray::kAlignedSize);
2468     if (!maybe_obj->ToObject(&obj)) return false;
2469   }
2470   set_external_byte_array_map(Map::cast(obj));
2471
2472   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2473                                          ExternalArray::kAlignedSize);
2474     if (!maybe_obj->ToObject(&obj)) return false;
2475   }
2476   set_external_unsigned_byte_array_map(Map::cast(obj));
2477
2478   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2479                                          ExternalArray::kAlignedSize);
2480     if (!maybe_obj->ToObject(&obj)) return false;
2481   }
2482   set_external_short_array_map(Map::cast(obj));
2483
2484   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2485                                          ExternalArray::kAlignedSize);
2486     if (!maybe_obj->ToObject(&obj)) return false;
2487   }
2488   set_external_unsigned_short_array_map(Map::cast(obj));
2489
2490   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2491                                          ExternalArray::kAlignedSize);
2492     if (!maybe_obj->ToObject(&obj)) return false;
2493   }
2494   set_external_int_array_map(Map::cast(obj));
2495
2496   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2497                                          ExternalArray::kAlignedSize);
2498     if (!maybe_obj->ToObject(&obj)) return false;
2499   }
2500   set_external_unsigned_int_array_map(Map::cast(obj));
2501
2502   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2503                                          ExternalArray::kAlignedSize);
2504     if (!maybe_obj->ToObject(&obj)) return false;
2505   }
2506   set_external_float_array_map(Map::cast(obj));
2507
2508   { MaybeObject* maybe_obj =
2509         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2510     if (!maybe_obj->ToObject(&obj)) return false;
2511   }
2512   set_non_strict_arguments_elements_map(Map::cast(obj));
2513
2514   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2515                                          ExternalArray::kAlignedSize);
2516     if (!maybe_obj->ToObject(&obj)) return false;
2517   }
2518   set_external_double_array_map(Map::cast(obj));
2519
2520   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2521     if (!maybe_obj->ToObject(&obj)) return false;
2522   }
2523   set_code_map(Map::cast(obj));
2524
2525   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2526                                          JSGlobalPropertyCell::kSize);
2527     if (!maybe_obj->ToObject(&obj)) return false;
2528   }
2529   set_global_property_cell_map(Map::cast(obj));
2530
2531   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2532     if (!maybe_obj->ToObject(&obj)) return false;
2533   }
2534   set_one_pointer_filler_map(Map::cast(obj));
2535
2536   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2537     if (!maybe_obj->ToObject(&obj)) return false;
2538   }
2539   set_two_pointer_filler_map(Map::cast(obj));
2540
2541   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2542     const StructTable& entry = struct_table[i];
2543     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2544       if (!maybe_obj->ToObject(&obj)) return false;
2545     }
2546     roots_[entry.index] = Map::cast(obj);
2547   }
2548
2549   { MaybeObject* maybe_obj =
2550         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2551     if (!maybe_obj->ToObject(&obj)) return false;
2552   }
2553   set_hash_table_map(Map::cast(obj));
2554
2555   { MaybeObject* maybe_obj =
2556         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557     if (!maybe_obj->ToObject(&obj)) return false;
2558   }
2559   set_function_context_map(Map::cast(obj));
2560
2561   { MaybeObject* maybe_obj =
2562         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2563     if (!maybe_obj->ToObject(&obj)) return false;
2564   }
2565   set_catch_context_map(Map::cast(obj));
2566
2567   { MaybeObject* maybe_obj =
2568         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2569     if (!maybe_obj->ToObject(&obj)) return false;
2570   }
2571   set_with_context_map(Map::cast(obj));
2572
2573   { MaybeObject* maybe_obj =
2574         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2575     if (!maybe_obj->ToObject(&obj)) return false;
2576   }
2577   set_block_context_map(Map::cast(obj));
2578
2579   { MaybeObject* maybe_obj =
2580         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2581     if (!maybe_obj->ToObject(&obj)) return false;
2582   }
2583   set_module_context_map(Map::cast(obj));
2584
2585   { MaybeObject* maybe_obj =
2586         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2587     if (!maybe_obj->ToObject(&obj)) return false;
2588   }
2589   set_global_context_map(Map::cast(obj));
2590
2591   { MaybeObject* maybe_obj =
2592         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2593     if (!maybe_obj->ToObject(&obj)) return false;
2594   }
2595   Map* native_context_map = Map::cast(obj);
2596   native_context_map->set_dictionary_map(true);
2597   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2598   set_native_context_map(native_context_map);
2599
2600   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2601                                          SharedFunctionInfo::kAlignedSize);
2602     if (!maybe_obj->ToObject(&obj)) return false;
2603   }
2604   set_shared_function_info_map(Map::cast(obj));
2605
2606   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2607                                          JSMessageObject::kSize);
2608     if (!maybe_obj->ToObject(&obj)) return false;
2609   }
2610   set_message_object_map(Map::cast(obj));
2611
2612   Map* external_map;
2613   { MaybeObject* maybe_obj =
2614         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2615     if (!maybe_obj->To(&external_map)) return false;
2616   }
2617   external_map->set_is_extensible(false);
2618   set_external_map(external_map);
2619
2620   ASSERT(!InNewSpace(empty_fixed_array()));
2621   return true;
2622 }
2623
2624
2625 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2626   // Statically ensure that it is safe to allocate heap numbers in paged
2627   // spaces.
2628   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2629   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2630
2631   Object* result;
2632   { MaybeObject* maybe_result =
2633         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2634     if (!maybe_result->ToObject(&result)) return maybe_result;
2635   }
2636
2637   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2638   HeapNumber::cast(result)->set_value(value);
2639   return result;
2640 }
2641
2642
2643 MaybeObject* Heap::AllocateHeapNumber(double value) {
2644   // Use general version, if we're forced to always allocate.
2645   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2646
2647   // This version of AllocateHeapNumber is optimized for
2648   // allocation in new space.
2649   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2650   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2651   Object* result;
2652   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2653     if (!maybe_result->ToObject(&result)) return maybe_result;
2654   }
2655   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2656   HeapNumber::cast(result)->set_value(value);
2657   return result;
2658 }
2659
2660
2661 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2662   Object* result;
2663   { MaybeObject* maybe_result = AllocateRawCell();
2664     if (!maybe_result->ToObject(&result)) return maybe_result;
2665   }
2666   HeapObject::cast(result)->set_map_no_write_barrier(
2667       global_property_cell_map());
2668   JSGlobalPropertyCell::cast(result)->set_value(value);
2669   return result;
2670 }
2671
2672
2673 MaybeObject* Heap::CreateOddball(const char* to_string,
2674                                  Object* to_number,
2675                                  byte kind) {
2676   Object* result;
2677   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2678     if (!maybe_result->ToObject(&result)) return maybe_result;
2679   }
2680   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2681 }
2682
2683
2684 bool Heap::CreateApiObjects() {
2685   Object* obj;
2686
2687   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2688     if (!maybe_obj->ToObject(&obj)) return false;
2689   }
2690   // Don't use Smi-only elements optimizations for objects with the neander
2691   // map. There are too many cases where element values are set directly with a
2692   // bottleneck to trap the Smi-only -> fast elements transition, and there
2693   // appears to be no benefit for optimize this case.
2694   Map* new_neander_map = Map::cast(obj);
2695   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2696   set_neander_map(new_neander_map);
2697
2698   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2699     if (!maybe_obj->ToObject(&obj)) return false;
2700   }
2701   Object* elements;
2702   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2703     if (!maybe_elements->ToObject(&elements)) return false;
2704   }
2705   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2706   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2707   set_message_listeners(JSObject::cast(obj));
2708
2709   return true;
2710 }
2711
2712
2713 void Heap::CreateJSEntryStub() {
2714   JSEntryStub stub;
2715   set_js_entry_code(*stub.GetCode(isolate()));
2716 }
2717
2718
2719 void Heap::CreateJSConstructEntryStub() {
2720   JSConstructEntryStub stub;
2721   set_js_construct_entry_code(*stub.GetCode(isolate()));
2722 }
2723
2724
2725 void Heap::CreateFixedStubs() {
2726   // Here we create roots for fixed stubs. They are needed at GC
2727   // for cooking and uncooking (check out frames.cc).
2728   // The eliminates the need for doing dictionary lookup in the
2729   // stub cache for these stubs.
2730   HandleScope scope(isolate());
2731   // gcc-4.4 has problem generating correct code of following snippet:
2732   // {  JSEntryStub stub;
2733   //    js_entry_code_ = *stub.GetCode();
2734   // }
2735   // {  JSConstructEntryStub stub;
2736   //    js_construct_entry_code_ = *stub.GetCode();
2737   // }
2738   // To workaround the problem, make separate functions without inlining.
2739   Heap::CreateJSEntryStub();
2740   Heap::CreateJSConstructEntryStub();
2741
2742   // Create stubs that should be there, so we don't unexpectedly have to
2743   // create them if we need them during the creation of another stub.
2744   // Stub creation mixes raw pointers and handles in an unsafe manner so
2745   // we cannot create stubs while we are creating stubs.
2746   CodeStub::GenerateStubsAheadOfTime(isolate());
2747 }
2748
2749
2750 bool Heap::CreateInitialObjects() {
2751   Object* obj;
2752
2753   // The -0 value must be set before NumberFromDouble works.
2754   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2755     if (!maybe_obj->ToObject(&obj)) return false;
2756   }
2757   set_minus_zero_value(HeapNumber::cast(obj));
2758   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2759
2760   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2761     if (!maybe_obj->ToObject(&obj)) return false;
2762   }
2763   set_nan_value(HeapNumber::cast(obj));
2764
2765   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2766     if (!maybe_obj->ToObject(&obj)) return false;
2767   }
2768   set_infinity_value(HeapNumber::cast(obj));
2769
2770   // The hole has not been created yet, but we want to put something
2771   // predictable in the gaps in the string table, so lets make that Smi zero.
2772   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2773
2774   // Allocate initial string table.
2775   { MaybeObject* maybe_obj =
2776         StringTable::Allocate(this, kInitialStringTableSize);
2777     if (!maybe_obj->ToObject(&obj)) return false;
2778   }
2779   // Don't use set_string_table() due to asserts.
2780   roots_[kStringTableRootIndex] = obj;
2781
2782   // Finish initializing oddballs after creating the string table.
2783   { MaybeObject* maybe_obj =
2784         undefined_value()->Initialize("undefined",
2785                                       nan_value(),
2786                                       Oddball::kUndefined);
2787     if (!maybe_obj->ToObject(&obj)) return false;
2788   }
2789
2790   // Initialize the null_value.
2791   { MaybeObject* maybe_obj =
2792         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2793     if (!maybe_obj->ToObject(&obj)) return false;
2794   }
2795
2796   { MaybeObject* maybe_obj = CreateOddball("true",
2797                                            Smi::FromInt(1),
2798                                            Oddball::kTrue);
2799     if (!maybe_obj->ToObject(&obj)) return false;
2800   }
2801   set_true_value(Oddball::cast(obj));
2802
2803   { MaybeObject* maybe_obj = CreateOddball("false",
2804                                            Smi::FromInt(0),
2805                                            Oddball::kFalse);
2806     if (!maybe_obj->ToObject(&obj)) return false;
2807   }
2808   set_false_value(Oddball::cast(obj));
2809
2810   { MaybeObject* maybe_obj = CreateOddball("hole",
2811                                            Smi::FromInt(-1),
2812                                            Oddball::kTheHole);
2813     if (!maybe_obj->ToObject(&obj)) return false;
2814   }
2815   set_the_hole_value(Oddball::cast(obj));
2816
2817   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2818                                            Smi::FromInt(-4),
2819                                            Oddball::kArgumentMarker);
2820     if (!maybe_obj->ToObject(&obj)) return false;
2821   }
2822   set_arguments_marker(Oddball::cast(obj));
2823
2824   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2825                                            Smi::FromInt(-2),
2826                                            Oddball::kOther);
2827     if (!maybe_obj->ToObject(&obj)) return false;
2828   }
2829   set_no_interceptor_result_sentinel(obj);
2830
2831   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2832                                            Smi::FromInt(-3),
2833                                            Oddball::kOther);
2834     if (!maybe_obj->ToObject(&obj)) return false;
2835   }
2836   set_termination_exception(obj);
2837
2838   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2839     { MaybeObject* maybe_obj =
2840           InternalizeUtf8String(constant_string_table[i].contents);
2841       if (!maybe_obj->ToObject(&obj)) return false;
2842     }
2843     roots_[constant_string_table[i].index] = String::cast(obj);
2844   }
2845
2846   // Allocate the hidden string which is used to identify the hidden properties
2847   // in JSObjects. The hash code has a special value so that it will not match
2848   // the empty string when searching for the property. It cannot be part of the
2849   // loop above because it needs to be allocated manually with the special
2850   // hash code in place. The hash code for the hidden_string is zero to ensure
2851   // that it will always be at the first entry in property descriptors.
2852   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
2853       OneByteVector("", 0), String::kEmptyStringHash);
2854     if (!maybe_obj->ToObject(&obj)) return false;
2855   }
2856   hidden_string_ = String::cast(obj);
2857
2858   // Allocate the code_stubs dictionary. The initial size is set to avoid
2859   // expanding the dictionary during bootstrapping.
2860   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
2861     if (!maybe_obj->ToObject(&obj)) return false;
2862   }
2863   set_code_stubs(UnseededNumberDictionary::cast(obj));
2864
2865
2866   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2867   // is set to avoid expanding the dictionary during bootstrapping.
2868   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
2869     if (!maybe_obj->ToObject(&obj)) return false;
2870   }
2871   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2872
2873   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2874     if (!maybe_obj->ToObject(&obj)) return false;
2875   }
2876   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2877
2878   set_instanceof_cache_function(Smi::FromInt(0));
2879   set_instanceof_cache_map(Smi::FromInt(0));
2880   set_instanceof_cache_answer(Smi::FromInt(0));
2881
2882   CreateFixedStubs();
2883
2884   // Allocate the dictionary of intrinsic function names.
2885   { MaybeObject* maybe_obj =
2886         NameDictionary::Allocate(this, Runtime::kNumFunctions);
2887     if (!maybe_obj->ToObject(&obj)) return false;
2888   }
2889   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2890                                                                        obj);
2891     if (!maybe_obj->ToObject(&obj)) return false;
2892   }
2893   set_intrinsic_function_names(NameDictionary::cast(obj));
2894
2895   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2896     if (!maybe_obj->ToObject(&obj)) return false;
2897   }
2898   set_number_string_cache(FixedArray::cast(obj));
2899
2900   // Allocate cache for single character one byte strings.
2901   { MaybeObject* maybe_obj =
2902         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
2903     if (!maybe_obj->ToObject(&obj)) return false;
2904   }
2905   set_single_character_string_cache(FixedArray::cast(obj));
2906
2907   // Allocate cache for string split.
2908   { MaybeObject* maybe_obj = AllocateFixedArray(
2909       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2910     if (!maybe_obj->ToObject(&obj)) return false;
2911   }
2912   set_string_split_cache(FixedArray::cast(obj));
2913
2914   { MaybeObject* maybe_obj = AllocateFixedArray(
2915       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2916     if (!maybe_obj->ToObject(&obj)) return false;
2917   }
2918   set_regexp_multiple_cache(FixedArray::cast(obj));
2919
2920   // Allocate cache for external strings pointing to native source code.
2921   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2922     if (!maybe_obj->ToObject(&obj)) return false;
2923   }
2924   set_natives_source_cache(FixedArray::cast(obj));
2925
2926   // Allocate object to hold object observation state.
2927   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2928     if (!maybe_obj->ToObject(&obj)) return false;
2929   }
2930   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2931     if (!maybe_obj->ToObject(&obj)) return false;
2932   }
2933   set_observation_state(JSObject::cast(obj));
2934
2935   // Handling of script id generation is in FACTORY->NewScript.
2936   set_last_script_id(undefined_value());
2937
2938   // Initialize keyed lookup cache.
2939   isolate_->keyed_lookup_cache()->Clear();
2940
2941   // Initialize context slot cache.
2942   isolate_->context_slot_cache()->Clear();
2943
2944   // Initialize descriptor cache.
2945   isolate_->descriptor_lookup_cache()->Clear();
2946
2947   // Initialize compilation cache.
2948   isolate_->compilation_cache()->Clear();
2949
2950   return true;
2951 }
2952
2953
2954 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2955   RootListIndex writable_roots[] = {
2956     kStoreBufferTopRootIndex,
2957     kStackLimitRootIndex,
2958     kNumberStringCacheRootIndex,
2959     kInstanceofCacheFunctionRootIndex,
2960     kInstanceofCacheMapRootIndex,
2961     kInstanceofCacheAnswerRootIndex,
2962     kCodeStubsRootIndex,
2963     kNonMonomorphicCacheRootIndex,
2964     kPolymorphicCodeCacheRootIndex,
2965     kLastScriptIdRootIndex,
2966     kEmptyScriptRootIndex,
2967     kRealStackLimitRootIndex,
2968     kArgumentsAdaptorDeoptPCOffsetRootIndex,
2969     kConstructStubDeoptPCOffsetRootIndex,
2970     kGetterStubDeoptPCOffsetRootIndex,
2971     kSetterStubDeoptPCOffsetRootIndex,
2972     kStringTableRootIndex,
2973   };
2974
2975   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2976     if (root_index == writable_roots[i])
2977       return true;
2978   }
2979   return false;
2980 }
2981
2982
2983 Object* RegExpResultsCache::Lookup(Heap* heap,
2984                                    String* key_string,
2985                                    Object* key_pattern,
2986                                    ResultsCacheType type) {
2987   FixedArray* cache;
2988   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2989   if (type == STRING_SPLIT_SUBSTRINGS) {
2990     ASSERT(key_pattern->IsString());
2991     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2992     cache = heap->string_split_cache();
2993   } else {
2994     ASSERT(type == REGEXP_MULTIPLE_INDICES);
2995     ASSERT(key_pattern->IsFixedArray());
2996     cache = heap->regexp_multiple_cache();
2997   }
2998
2999   uint32_t hash = key_string->Hash();
3000   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3001       ~(kArrayEntriesPerCacheEntry - 1));
3002   if (cache->get(index + kStringOffset) == key_string &&
3003       cache->get(index + kPatternOffset) == key_pattern) {
3004     return cache->get(index + kArrayOffset);
3005   }
3006   index =
3007       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3008   if (cache->get(index + kStringOffset) == key_string &&
3009       cache->get(index + kPatternOffset) == key_pattern) {
3010     return cache->get(index + kArrayOffset);
3011   }
3012   return Smi::FromInt(0);
3013 }
3014
3015
3016 void RegExpResultsCache::Enter(Heap* heap,
3017                                String* key_string,
3018                                Object* key_pattern,
3019                                FixedArray* value_array,
3020                                ResultsCacheType type) {
3021   FixedArray* cache;
3022   if (!key_string->IsInternalizedString()) return;
3023   if (type == STRING_SPLIT_SUBSTRINGS) {
3024     ASSERT(key_pattern->IsString());
3025     if (!key_pattern->IsInternalizedString()) return;
3026     cache = heap->string_split_cache();
3027   } else {
3028     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3029     ASSERT(key_pattern->IsFixedArray());
3030     cache = heap->regexp_multiple_cache();
3031   }
3032
3033   uint32_t hash = key_string->Hash();
3034   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3035       ~(kArrayEntriesPerCacheEntry - 1));
3036   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3037     cache->set(index + kStringOffset, key_string);
3038     cache->set(index + kPatternOffset, key_pattern);
3039     cache->set(index + kArrayOffset, value_array);
3040   } else {
3041     uint32_t index2 =
3042         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3043     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3044       cache->set(index2 + kStringOffset, key_string);
3045       cache->set(index2 + kPatternOffset, key_pattern);
3046       cache->set(index2 + kArrayOffset, value_array);
3047     } else {
3048       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3049       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3050       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3051       cache->set(index + kStringOffset, key_string);
3052       cache->set(index + kPatternOffset, key_pattern);
3053       cache->set(index + kArrayOffset, value_array);
3054     }
3055   }
3056   // If the array is a reasonably short list of substrings, convert it into a
3057   // list of internalized strings.
3058   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3059     for (int i = 0; i < value_array->length(); i++) {
3060       String* str = String::cast(value_array->get(i));
3061       Object* internalized_str;
3062       MaybeObject* maybe_string = heap->InternalizeString(str);
3063       if (maybe_string->ToObject(&internalized_str)) {
3064         value_array->set(i, internalized_str);
3065       }
3066     }
3067   }
3068   // Convert backing store to a copy-on-write array.
3069   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3070 }
3071
3072
3073 void RegExpResultsCache::Clear(FixedArray* cache) {
3074   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3075     cache->set(i, Smi::FromInt(0));
3076   }
3077 }
3078
3079
3080 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3081   MaybeObject* maybe_obj =
3082       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3083   return maybe_obj;
3084 }
3085
3086
3087 int Heap::FullSizeNumberStringCacheLength() {
3088   // Compute the size of the number string cache based on the max newspace size.
3089   // The number string cache has a minimum size based on twice the initial cache
3090   // size to ensure that it is bigger after being made 'full size'.
3091   int number_string_cache_size = max_semispace_size_ / 512;
3092   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3093                                  Min(0x4000, number_string_cache_size));
3094   // There is a string and a number per entry so the length is twice the number
3095   // of entries.
3096   return number_string_cache_size * 2;
3097 }
3098
3099
3100 void Heap::AllocateFullSizeNumberStringCache() {
3101   // The idea is to have a small number string cache in the snapshot to keep
3102   // boot-time memory usage down.  If we expand the number string cache already
3103   // while creating the snapshot then that didn't work out.
3104   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3105   MaybeObject* maybe_obj =
3106       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3107   Object* new_cache;
3108   if (maybe_obj->ToObject(&new_cache)) {
3109     // We don't bother to repopulate the cache with entries from the old cache.
3110     // It will be repopulated soon enough with new strings.
3111     set_number_string_cache(FixedArray::cast(new_cache));
3112   }
3113   // If allocation fails then we just return without doing anything.  It is only
3114   // a cache, so best effort is OK here.
3115 }
3116
3117
3118 void Heap::FlushNumberStringCache() {
3119   // Flush the number to string cache.
3120   int len = number_string_cache()->length();
3121   for (int i = 0; i < len; i++) {
3122     number_string_cache()->set_undefined(this, i);
3123   }
3124 }
3125
3126
3127 static inline int double_get_hash(double d) {
3128   DoubleRepresentation rep(d);
3129   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3130 }
3131
3132
3133 static inline int smi_get_hash(Smi* smi) {
3134   return smi->value();
3135 }
3136
3137
3138 Object* Heap::GetNumberStringCache(Object* number) {
3139   int hash;
3140   int mask = (number_string_cache()->length() >> 1) - 1;
3141   if (number->IsSmi()) {
3142     hash = smi_get_hash(Smi::cast(number)) & mask;
3143   } else {
3144     hash = double_get_hash(number->Number()) & mask;
3145   }
3146   Object* key = number_string_cache()->get(hash * 2);
3147   if (key == number) {
3148     return String::cast(number_string_cache()->get(hash * 2 + 1));
3149   } else if (key->IsHeapNumber() &&
3150              number->IsHeapNumber() &&
3151              key->Number() == number->Number()) {
3152     return String::cast(number_string_cache()->get(hash * 2 + 1));
3153   }
3154   return undefined_value();
3155 }
3156
3157
3158 void Heap::SetNumberStringCache(Object* number, String* string) {
3159   int hash;
3160   int mask = (number_string_cache()->length() >> 1) - 1;
3161   if (number->IsSmi()) {
3162     hash = smi_get_hash(Smi::cast(number)) & mask;
3163   } else {
3164     hash = double_get_hash(number->Number()) & mask;
3165   }
3166   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3167       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3168     // The first time we have a hash collision, we move to the full sized
3169     // number string cache.
3170     AllocateFullSizeNumberStringCache();
3171     return;
3172   }
3173   number_string_cache()->set(hash * 2, number);
3174   number_string_cache()->set(hash * 2 + 1, string);
3175 }
3176
3177
3178 MaybeObject* Heap::NumberToString(Object* number,
3179                                   bool check_number_string_cache,
3180                                   PretenureFlag pretenure) {
3181   isolate_->counters()->number_to_string_runtime()->Increment();
3182   if (check_number_string_cache) {
3183     Object* cached = GetNumberStringCache(number);
3184     if (cached != undefined_value()) {
3185       return cached;
3186     }
3187   }
3188
3189   char arr[100];
3190   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3191   const char* str;
3192   if (number->IsSmi()) {
3193     int num = Smi::cast(number)->value();
3194     str = IntToCString(num, buffer);
3195   } else {
3196     double num = HeapNumber::cast(number)->value();
3197     str = DoubleToCString(num, buffer);
3198   }
3199
3200   Object* js_string;
3201   MaybeObject* maybe_js_string =
3202       AllocateStringFromOneByte(CStrVector(str), pretenure);
3203   if (maybe_js_string->ToObject(&js_string)) {
3204     SetNumberStringCache(number, String::cast(js_string));
3205   }
3206   return maybe_js_string;
3207 }
3208
3209
3210 MaybeObject* Heap::Uint32ToString(uint32_t value,
3211                                   bool check_number_string_cache) {
3212   Object* number;
3213   MaybeObject* maybe = NumberFromUint32(value);
3214   if (!maybe->To<Object>(&number)) return maybe;
3215   return NumberToString(number, check_number_string_cache);
3216 }
3217
3218
3219 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3220   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3221 }
3222
3223
3224 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3225     ExternalArrayType array_type) {
3226   switch (array_type) {
3227     case kExternalByteArray:
3228       return kExternalByteArrayMapRootIndex;
3229     case kExternalUnsignedByteArray:
3230       return kExternalUnsignedByteArrayMapRootIndex;
3231     case kExternalShortArray:
3232       return kExternalShortArrayMapRootIndex;
3233     case kExternalUnsignedShortArray:
3234       return kExternalUnsignedShortArrayMapRootIndex;
3235     case kExternalIntArray:
3236       return kExternalIntArrayMapRootIndex;
3237     case kExternalUnsignedIntArray:
3238       return kExternalUnsignedIntArrayMapRootIndex;
3239     case kExternalFloatArray:
3240       return kExternalFloatArrayMapRootIndex;
3241     case kExternalDoubleArray:
3242       return kExternalDoubleArrayMapRootIndex;
3243     case kExternalPixelArray:
3244       return kExternalPixelArrayMapRootIndex;
3245     default:
3246       UNREACHABLE();
3247       return kUndefinedValueRootIndex;
3248   }
3249 }
3250
3251
3252 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3253   // We need to distinguish the minus zero value and this cannot be
3254   // done after conversion to int. Doing this by comparing bit
3255   // patterns is faster than using fpclassify() et al.
3256   static const DoubleRepresentation minus_zero(-0.0);
3257
3258   DoubleRepresentation rep(value);
3259   if (rep.bits == minus_zero.bits) {
3260     return AllocateHeapNumber(-0.0, pretenure);
3261   }
3262
3263   int int_value = FastD2I(value);
3264   if (value == int_value && Smi::IsValid(int_value)) {
3265     return Smi::FromInt(int_value);
3266   }
3267
3268   // Materialize the value in the heap.
3269   return AllocateHeapNumber(value, pretenure);
3270 }
3271
3272
3273 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3274   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3275   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3276   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3277   Foreign* result;
3278   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3279   if (!maybe_result->To(&result)) return maybe_result;
3280   result->set_foreign_address(address);
3281   return result;
3282 }
3283
3284
3285 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3286   SharedFunctionInfo* share;
3287   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3288   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3289
3290   // Set pointer fields.
3291   share->set_name(name);
3292   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3293   share->set_code(illegal);
3294   share->ClearOptimizedCodeMap();
3295   share->set_scope_info(ScopeInfo::Empty(isolate_));
3296   Code* construct_stub =
3297       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3298   share->set_construct_stub(construct_stub);
3299   share->set_instance_class_name(Object_string());
3300   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3301   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3302   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3303   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3304   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3305   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3306   share->set_ast_node_count(0);
3307   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3308   share->set_counters(0);
3309
3310   // Set integer fields (smi or int, depending on the architecture).
3311   share->set_length(0);
3312   share->set_formal_parameter_count(0);
3313   share->set_expected_nof_properties(0);
3314   share->set_num_literals(0);
3315   share->set_start_position_and_type(0);
3316   share->set_end_position(0);
3317   share->set_function_token_position(0);
3318   // All compiler hints default to false or 0.
3319   share->set_compiler_hints(0);
3320   share->set_this_property_assignments_count(0);
3321   share->set_opt_count(0);
3322
3323   return share;
3324 }
3325
3326
3327 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3328                                            JSArray* arguments,
3329                                            int start_position,
3330                                            int end_position,
3331                                            Object* script,
3332                                            Object* stack_trace,
3333                                            Object* stack_frames) {
3334   Object* result;
3335   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3336     if (!maybe_result->ToObject(&result)) return maybe_result;
3337   }
3338   JSMessageObject* message = JSMessageObject::cast(result);
3339   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3340   message->initialize_elements();
3341   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3342   message->set_type(type);
3343   message->set_arguments(arguments);
3344   message->set_start_position(start_position);
3345   message->set_end_position(end_position);
3346   message->set_script(script);
3347   message->set_stack_trace(stack_trace);
3348   message->set_stack_frames(stack_frames);
3349   return result;
3350 }
3351
3352
3353
3354 // Returns true for a character in a range.  Both limits are inclusive.
3355 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3356   // This makes uses of the the unsigned wraparound.
3357   return character - from <= to - from;
3358 }
3359
3360
3361 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3362     Heap* heap,
3363     uint16_t c1,
3364     uint16_t c2) {
3365   String* result;
3366   // Numeric strings have a different hash algorithm not known by
3367   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3368   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3369       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3370     return result;
3371   // Now we know the length is 2, we might as well make use of that fact
3372   // when building the new string.
3373   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3374     // We can do this.
3375     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3376     Object* result;
3377     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3378       if (!maybe_result->ToObject(&result)) return maybe_result;
3379     }
3380     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3381     dest[0] = static_cast<uint8_t>(c1);
3382     dest[1] = static_cast<uint8_t>(c2);
3383     return result;
3384   } else {
3385     Object* result;
3386     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3387       if (!maybe_result->ToObject(&result)) return maybe_result;
3388     }
3389     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3390     dest[0] = c1;
3391     dest[1] = c2;
3392     return result;
3393   }
3394 }
3395
3396
3397 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3398   int first_length = first->length();
3399   if (first_length == 0) {
3400     return second;
3401   }
3402
3403   int second_length = second->length();
3404   if (second_length == 0) {
3405     return first;
3406   }
3407
3408   int length = first_length + second_length;
3409
3410   // Optimization for 2-byte strings often used as keys in a decompression
3411   // dictionary.  Check whether we already have the string in the string
3412   // table to prevent creation of many unneccesary strings.
3413   if (length == 2) {
3414     uint16_t c1 = first->Get(0);
3415     uint16_t c2 = second->Get(0);
3416     return MakeOrFindTwoCharacterString(this, c1, c2);
3417   }
3418
3419   bool first_is_one_byte = first->IsOneByteRepresentation();
3420   bool second_is_one_byte = second->IsOneByteRepresentation();
3421   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3422   // Make sure that an out of memory exception is thrown if the length
3423   // of the new cons string is too large.
3424   if (length > String::kMaxLength || length < 0) {
3425     isolate()->context()->mark_out_of_memory();
3426     return Failure::OutOfMemoryException(0x4);
3427   }
3428
3429   bool is_one_byte_data_in_two_byte_string = false;
3430   if (!is_one_byte) {
3431     // At least one of the strings uses two-byte representation so we
3432     // can't use the fast case code for short ASCII strings below, but
3433     // we can try to save memory if all chars actually fit in ASCII.
3434     is_one_byte_data_in_two_byte_string =
3435         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3436     if (is_one_byte_data_in_two_byte_string) {
3437       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3438     }
3439   }
3440
3441   // If the resulting string is small make a flat string.
3442   if (length < ConsString::kMinLength) {
3443     // Note that neither of the two inputs can be a slice because:
3444     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3445     ASSERT(first->IsFlat());
3446     ASSERT(second->IsFlat());
3447     if (is_one_byte) {
3448       Object* result;
3449       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3450         if (!maybe_result->ToObject(&result)) return maybe_result;
3451       }
3452       // Copy the characters into the new object.
3453       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3454       // Copy first part.
3455       const uint8_t* src;
3456       if (first->IsExternalString()) {
3457         src = ExternalAsciiString::cast(first)->GetChars();
3458       } else {
3459         src = SeqOneByteString::cast(first)->GetChars();
3460       }
3461       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3462       // Copy second part.
3463       if (second->IsExternalString()) {
3464         src = ExternalAsciiString::cast(second)->GetChars();
3465       } else {
3466         src = SeqOneByteString::cast(second)->GetChars();
3467       }
3468       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3469       return result;
3470     } else {
3471       if (is_one_byte_data_in_two_byte_string) {
3472         Object* result;
3473         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3474           if (!maybe_result->ToObject(&result)) return maybe_result;
3475         }
3476         // Copy the characters into the new object.
3477         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3478         String::WriteToFlat(first, dest, 0, first_length);
3479         String::WriteToFlat(second, dest + first_length, 0, second_length);
3480         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3481         return result;
3482       }
3483
3484       Object* result;
3485       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3486         if (!maybe_result->ToObject(&result)) return maybe_result;
3487       }
3488       // Copy the characters into the new object.
3489       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3490       String::WriteToFlat(first, dest, 0, first_length);
3491       String::WriteToFlat(second, dest + first_length, 0, second_length);
3492       return result;
3493     }
3494   }
3495
3496   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3497       cons_ascii_string_map() : cons_string_map();
3498
3499   Object* result;
3500   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3501     if (!maybe_result->ToObject(&result)) return maybe_result;
3502   }
3503
3504   AssertNoAllocation no_gc;
3505   ConsString* cons_string = ConsString::cast(result);
3506   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3507   cons_string->set_length(length);
3508   cons_string->set_hash_field(String::kEmptyHashField);
3509   cons_string->set_first(first, mode);
3510   cons_string->set_second(second, mode);
3511   return result;
3512 }
3513
3514
3515 MaybeObject* Heap::AllocateSubString(String* buffer,
3516                                      int start,
3517                                      int end,
3518                                      PretenureFlag pretenure) {
3519   int length = end - start;
3520   if (length <= 0) {
3521     return empty_string();
3522   } else if (length == 1) {
3523     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3524   } else if (length == 2) {
3525     // Optimization for 2-byte strings often used as keys in a decompression
3526     // dictionary.  Check whether we already have the string in the string
3527     // table to prevent creation of many unnecessary strings.
3528     uint16_t c1 = buffer->Get(start);
3529     uint16_t c2 = buffer->Get(start + 1);
3530     return MakeOrFindTwoCharacterString(this, c1, c2);
3531   }
3532
3533   // Make an attempt to flatten the buffer to reduce access time.
3534   buffer = buffer->TryFlattenGetString();
3535
3536   if (!FLAG_string_slices ||
3537       !buffer->IsFlat() ||
3538       length < SlicedString::kMinLength ||
3539       pretenure == TENURED) {
3540     Object* result;
3541     // WriteToFlat takes care of the case when an indirect string has a
3542     // different encoding from its underlying string.  These encodings may
3543     // differ because of externalization.
3544     bool is_one_byte = buffer->IsOneByteRepresentation();
3545     { MaybeObject* maybe_result = is_one_byte
3546                                   ? AllocateRawOneByteString(length, pretenure)
3547                                   : AllocateRawTwoByteString(length, pretenure);
3548       if (!maybe_result->ToObject(&result)) return maybe_result;
3549     }
3550     String* string_result = String::cast(result);
3551     // Copy the characters into the new object.
3552     if (is_one_byte) {
3553       ASSERT(string_result->IsOneByteRepresentation());
3554       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3555       String::WriteToFlat(buffer, dest, start, end);
3556     } else {
3557       ASSERT(string_result->IsTwoByteRepresentation());
3558       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3559       String::WriteToFlat(buffer, dest, start, end);
3560     }
3561     return result;
3562   }
3563
3564   ASSERT(buffer->IsFlat());
3565 #if VERIFY_HEAP
3566   if (FLAG_verify_heap) {
3567     buffer->StringVerify();
3568   }
3569 #endif
3570
3571   Object* result;
3572   // When slicing an indirect string we use its encoding for a newly created
3573   // slice and don't check the encoding of the underlying string.  This is safe
3574   // even if the encodings are different because of externalization.  If an
3575   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3576   // codes of the underlying string must still fit into ASCII (because
3577   // externalization must not change char codes).
3578   { Map* map = buffer->IsOneByteRepresentation()
3579                  ? sliced_ascii_string_map()
3580                  : sliced_string_map();
3581     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3582     if (!maybe_result->ToObject(&result)) return maybe_result;
3583   }
3584
3585   AssertNoAllocation no_gc;
3586   SlicedString* sliced_string = SlicedString::cast(result);
3587   sliced_string->set_length(length);
3588   sliced_string->set_hash_field(String::kEmptyHashField);
3589   if (buffer->IsConsString()) {
3590     ConsString* cons = ConsString::cast(buffer);
3591     ASSERT(cons->second()->length() == 0);
3592     sliced_string->set_parent(cons->first());
3593     sliced_string->set_offset(start);
3594   } else if (buffer->IsSlicedString()) {
3595     // Prevent nesting sliced strings.
3596     SlicedString* parent_slice = SlicedString::cast(buffer);
3597     sliced_string->set_parent(parent_slice->parent());
3598     sliced_string->set_offset(start + parent_slice->offset());
3599   } else {
3600     sliced_string->set_parent(buffer);
3601     sliced_string->set_offset(start);
3602   }
3603   ASSERT(sliced_string->parent()->IsSeqString() ||
3604          sliced_string->parent()->IsExternalString());
3605   return result;
3606 }
3607
3608
3609 MaybeObject* Heap::AllocateExternalStringFromAscii(
3610     const ExternalAsciiString::Resource* resource) {
3611   size_t length = resource->length();
3612   if (length > static_cast<size_t>(String::kMaxLength)) {
3613     isolate()->context()->mark_out_of_memory();
3614     return Failure::OutOfMemoryException(0x5);
3615   }
3616
3617   Map* map = external_ascii_string_map();
3618   Object* result;
3619   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3620     if (!maybe_result->ToObject(&result)) return maybe_result;
3621   }
3622
3623   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3624   external_string->set_length(static_cast<int>(length));
3625   external_string->set_hash_field(String::kEmptyHashField);
3626   external_string->set_resource(resource);
3627
3628   return result;
3629 }
3630
3631
3632 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3633     const ExternalTwoByteString::Resource* resource) {
3634   size_t length = resource->length();
3635   if (length > static_cast<size_t>(String::kMaxLength)) {
3636     isolate()->context()->mark_out_of_memory();
3637     return Failure::OutOfMemoryException(0x6);
3638   }
3639
3640   // For small strings we check whether the resource contains only
3641   // one byte characters.  If yes, we use a different string map.
3642   static const size_t kOneByteCheckLengthLimit = 32;
3643   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3644       String::IsOneByte(resource->data(), static_cast<int>(length));
3645   Map* map = is_one_byte ?
3646       external_string_with_one_byte_data_map() : external_string_map();
3647   Object* result;
3648   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3649     if (!maybe_result->ToObject(&result)) return maybe_result;
3650   }
3651
3652   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3653   external_string->set_length(static_cast<int>(length));
3654   external_string->set_hash_field(String::kEmptyHashField);
3655   external_string->set_resource(resource);
3656
3657   return result;
3658 }
3659
3660
3661 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3662   if (code <= String::kMaxOneByteCharCode) {
3663     Object* value = single_character_string_cache()->get(code);
3664     if (value != undefined_value()) return value;
3665
3666     uint8_t buffer[1];
3667     buffer[0] = static_cast<uint8_t>(code);
3668     Object* result;
3669     MaybeObject* maybe_result =
3670         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3671
3672     if (!maybe_result->ToObject(&result)) return maybe_result;
3673     single_character_string_cache()->set(code, result);
3674     return result;
3675   }
3676
3677   Object* result;
3678   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3679     if (!maybe_result->ToObject(&result)) return maybe_result;
3680   }
3681   String* answer = String::cast(result);
3682   answer->Set(0, code);
3683   return answer;
3684 }
3685
3686
3687 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3688   if (length < 0 || length > ByteArray::kMaxLength) {
3689     return Failure::OutOfMemoryException(0x7);
3690   }
3691   if (pretenure == NOT_TENURED) {
3692     return AllocateByteArray(length);
3693   }
3694   int size = ByteArray::SizeFor(length);
3695   Object* result;
3696   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3697                    ? old_data_space_->AllocateRaw(size)
3698                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3699     if (!maybe_result->ToObject(&result)) return maybe_result;
3700   }
3701
3702   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3703       byte_array_map());
3704   reinterpret_cast<ByteArray*>(result)->set_length(length);
3705   return result;
3706 }
3707
3708
3709 MaybeObject* Heap::AllocateByteArray(int length) {
3710   if (length < 0 || length > ByteArray::kMaxLength) {
3711     return Failure::OutOfMemoryException(0x8);
3712   }
3713   int size = ByteArray::SizeFor(length);
3714   AllocationSpace space =
3715       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3716   Object* result;
3717   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3718     if (!maybe_result->ToObject(&result)) return maybe_result;
3719   }
3720
3721   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3722       byte_array_map());
3723   reinterpret_cast<ByteArray*>(result)->set_length(length);
3724   return result;
3725 }
3726
3727
3728 void Heap::CreateFillerObjectAt(Address addr, int size) {
3729   if (size == 0) return;
3730   HeapObject* filler = HeapObject::FromAddress(addr);
3731   if (size == kPointerSize) {
3732     filler->set_map_no_write_barrier(one_pointer_filler_map());
3733   } else if (size == 2 * kPointerSize) {
3734     filler->set_map_no_write_barrier(two_pointer_filler_map());
3735   } else {
3736     filler->set_map_no_write_barrier(free_space_map());
3737     FreeSpace::cast(filler)->set_size(size);
3738   }
3739 }
3740
3741
3742 MaybeObject* Heap::AllocateExternalArray(int length,
3743                                          ExternalArrayType array_type,
3744                                          void* external_pointer,
3745                                          PretenureFlag pretenure) {
3746   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3747   Object* result;
3748   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3749                                             space,
3750                                             OLD_DATA_SPACE);
3751     if (!maybe_result->ToObject(&result)) return maybe_result;
3752   }
3753
3754   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3755       MapForExternalArrayType(array_type));
3756   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3757   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3758       external_pointer);
3759
3760   return result;
3761 }
3762
3763
3764 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3765                               Code::Flags flags,
3766                               Handle<Object> self_reference,
3767                               bool immovable,
3768                               bool crankshafted) {
3769   // Allocate ByteArray before the Code object, so that we do not risk
3770   // leaving uninitialized Code object (and breaking the heap).
3771   ByteArray* reloc_info;
3772   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3773   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3774
3775   // Compute size.
3776   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3777   int obj_size = Code::SizeFor(body_size);
3778   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3779   MaybeObject* maybe_result;
3780   // Large code objects and code objects which should stay at a fixed address
3781   // are allocated in large object space.
3782   HeapObject* result;
3783   bool force_lo_space = obj_size > code_space()->AreaSize();
3784   if (force_lo_space) {
3785     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3786   } else {
3787     maybe_result = code_space_->AllocateRaw(obj_size);
3788   }
3789   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3790
3791   if (immovable && !force_lo_space &&
3792       // Objects on the first page of each space are never moved.
3793       !code_space_->FirstPage()->Contains(result->address())) {
3794     // Discard the first code allocation, which was on a page where it could be
3795     // moved.
3796     CreateFillerObjectAt(result->address(), obj_size);
3797     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3798     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3799   }
3800
3801   // Initialize the object
3802   result->set_map_no_write_barrier(code_map());
3803   Code* code = Code::cast(result);
3804   ASSERT(!isolate_->code_range()->exists() ||
3805       isolate_->code_range()->contains(code->address()));
3806   code->set_instruction_size(desc.instr_size);
3807   code->set_relocation_info(reloc_info);
3808   code->set_flags(flags);
3809   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3810     code->set_check_type(RECEIVER_MAP_CHECK);
3811   }
3812   code->set_is_crankshafted(crankshafted);
3813   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3814   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3815   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3816   code->set_gc_metadata(Smi::FromInt(0));
3817   code->set_ic_age(global_ic_age_);
3818   code->set_prologue_offset(kPrologueOffsetNotSet);
3819   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3820     code->set_marked_for_deoptimization(false);
3821   }
3822   // Allow self references to created code object by patching the handle to
3823   // point to the newly allocated Code object.
3824   if (!self_reference.is_null()) {
3825     *(self_reference.location()) = code;
3826   }
3827   // Migrate generated code.
3828   // The generated code can contain Object** values (typically from handles)
3829   // that are dereferenced during the copy to point directly to the actual heap
3830   // objects. These pointers can include references to the code object itself,
3831   // through the self_reference parameter.
3832   code->CopyFrom(desc);
3833
3834 #ifdef VERIFY_HEAP
3835   if (FLAG_verify_heap) {
3836     code->Verify();
3837   }
3838 #endif
3839   return code;
3840 }
3841
3842
3843 MaybeObject* Heap::CopyCode(Code* code) {
3844   // Allocate an object the same size as the code object.
3845   int obj_size = code->Size();
3846   MaybeObject* maybe_result;
3847   if (obj_size > code_space()->AreaSize()) {
3848     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3849   } else {
3850     maybe_result = code_space_->AllocateRaw(obj_size);
3851   }
3852
3853   Object* result;
3854   if (!maybe_result->ToObject(&result)) return maybe_result;
3855
3856   // Copy code object.
3857   Address old_addr = code->address();
3858   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3859   CopyBlock(new_addr, old_addr, obj_size);
3860   // Relocate the copy.
3861   Code* new_code = Code::cast(result);
3862   ASSERT(!isolate_->code_range()->exists() ||
3863       isolate_->code_range()->contains(code->address()));
3864   new_code->Relocate(new_addr - old_addr);
3865   return new_code;
3866 }
3867
3868
3869 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3870   // Allocate ByteArray before the Code object, so that we do not risk
3871   // leaving uninitialized Code object (and breaking the heap).
3872   Object* reloc_info_array;
3873   { MaybeObject* maybe_reloc_info_array =
3874         AllocateByteArray(reloc_info.length(), TENURED);
3875     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3876       return maybe_reloc_info_array;
3877     }
3878   }
3879
3880   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3881
3882   int new_obj_size = Code::SizeFor(new_body_size);
3883
3884   Address old_addr = code->address();
3885
3886   size_t relocation_offset =
3887       static_cast<size_t>(code->instruction_end() - old_addr);
3888
3889   MaybeObject* maybe_result;
3890   if (new_obj_size > code_space()->AreaSize()) {
3891     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3892   } else {
3893     maybe_result = code_space_->AllocateRaw(new_obj_size);
3894   }
3895
3896   Object* result;
3897   if (!maybe_result->ToObject(&result)) return maybe_result;
3898
3899   // Copy code object.
3900   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3901
3902   // Copy header and instructions.
3903   CopyBytes(new_addr, old_addr, relocation_offset);
3904
3905   Code* new_code = Code::cast(result);
3906   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3907
3908   // Copy patched rinfo.
3909   CopyBytes(new_code->relocation_start(),
3910             reloc_info.start(),
3911             static_cast<size_t>(reloc_info.length()));
3912
3913   // Relocate the copy.
3914   ASSERT(!isolate_->code_range()->exists() ||
3915       isolate_->code_range()->contains(code->address()));
3916   new_code->Relocate(new_addr - old_addr);
3917
3918 #ifdef VERIFY_HEAP
3919   if (FLAG_verify_heap) {
3920     code->Verify();
3921   }
3922 #endif
3923   return new_code;
3924 }
3925
3926
3927 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
3928     Handle<Object> allocation_site_info_payload) {
3929   ASSERT(gc_state_ == NOT_IN_GC);
3930   ASSERT(map->instance_type() != MAP_TYPE);
3931   // If allocation failures are disallowed, we may allocate in a different
3932   // space when new space is full and the object is not a large object.
3933   AllocationSpace retry_space =
3934       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3935   int size = map->instance_size() + AllocationSiteInfo::kSize;
3936   Object* result;
3937   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3938   if (!maybe_result->ToObject(&result)) return maybe_result;
3939   // No need for write barrier since object is white and map is in old space.
3940   HeapObject::cast(result)->set_map_no_write_barrier(map);
3941   AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
3942       reinterpret_cast<Address>(result) + map->instance_size());
3943   alloc_info->set_map_no_write_barrier(allocation_site_info_map());
3944   alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
3945   return result;
3946 }
3947
3948
3949 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3950   ASSERT(gc_state_ == NOT_IN_GC);
3951   ASSERT(map->instance_type() != MAP_TYPE);
3952   // If allocation failures are disallowed, we may allocate in a different
3953   // space when new space is full and the object is not a large object.
3954   AllocationSpace retry_space =
3955       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3956   int size = map->instance_size();
3957   Object* result;
3958   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3959   if (!maybe_result->ToObject(&result)) return maybe_result;
3960   // No need for write barrier since object is white and map is in old space.
3961   HeapObject::cast(result)->set_map_no_write_barrier(map);
3962   return result;
3963 }
3964
3965
3966 void Heap::InitializeFunction(JSFunction* function,
3967                               SharedFunctionInfo* shared,
3968                               Object* prototype) {
3969   ASSERT(!prototype->IsMap());
3970   function->initialize_properties();
3971   function->initialize_elements();
3972   function->set_shared(shared);
3973   function->set_code(shared->code());
3974   function->set_prototype_or_initial_map(prototype);
3975   function->set_context(undefined_value());
3976   function->set_literals_or_bindings(empty_fixed_array());
3977   function->set_next_function_link(undefined_value());
3978 }
3979
3980
3981 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3982   // Make sure to use globals from the function's context, since the function
3983   // can be from a different context.
3984   Context* native_context = function->context()->native_context();
3985   bool needs_constructor_property;
3986   Map* new_map;
3987   if (function->shared()->is_generator()) {
3988     // Generator prototypes can share maps since they don't have "constructor"
3989     // properties.
3990     new_map = native_context->generator_object_prototype_map();
3991     needs_constructor_property = false;
3992   } else {
3993     // Each function prototype gets a fresh map to avoid unwanted sharing of
3994     // maps between prototypes of different constructors.
3995     JSFunction* object_function = native_context->object_function();
3996     ASSERT(object_function->has_initial_map());
3997     MaybeObject* maybe_map = object_function->initial_map()->Copy();
3998     if (!maybe_map->To(&new_map)) return maybe_map;
3999     needs_constructor_property = true;
4000   }
4001
4002   Object* prototype;
4003   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4004   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4005
4006   if (needs_constructor_property) {
4007     MaybeObject* maybe_failure =
4008         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4009             constructor_string(), function, DONT_ENUM);
4010     if (maybe_failure->IsFailure()) return maybe_failure;
4011   }
4012
4013   return prototype;
4014 }
4015
4016
4017 MaybeObject* Heap::AllocateFunction(Map* function_map,
4018                                     SharedFunctionInfo* shared,
4019                                     Object* prototype,
4020                                     PretenureFlag pretenure) {
4021   AllocationSpace space =
4022       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4023   Object* result;
4024   { MaybeObject* maybe_result = Allocate(function_map, space);
4025     if (!maybe_result->ToObject(&result)) return maybe_result;
4026   }
4027   InitializeFunction(JSFunction::cast(result), shared, prototype);
4028   return result;
4029 }
4030
4031
4032 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4033   // To get fast allocation and map sharing for arguments objects we
4034   // allocate them based on an arguments boilerplate.
4035
4036   JSObject* boilerplate;
4037   int arguments_object_size;
4038   bool strict_mode_callee = callee->IsJSFunction() &&
4039       !JSFunction::cast(callee)->shared()->is_classic_mode();
4040   if (strict_mode_callee) {
4041     boilerplate =
4042         isolate()->context()->native_context()->
4043             strict_mode_arguments_boilerplate();
4044     arguments_object_size = kArgumentsObjectSizeStrict;
4045   } else {
4046     boilerplate =
4047         isolate()->context()->native_context()->arguments_boilerplate();
4048     arguments_object_size = kArgumentsObjectSize;
4049   }
4050
4051   // This calls Copy directly rather than using Heap::AllocateRaw so we
4052   // duplicate the check here.
4053   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
4054
4055   // Check that the size of the boilerplate matches our
4056   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4057   // on the size being a known constant.
4058   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4059
4060   // Do the allocation.
4061   Object* result;
4062   { MaybeObject* maybe_result =
4063         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4064     if (!maybe_result->ToObject(&result)) return maybe_result;
4065   }
4066
4067   // Copy the content. The arguments boilerplate doesn't have any
4068   // fields that point to new space so it's safe to skip the write
4069   // barrier here.
4070   CopyBlock(HeapObject::cast(result)->address(),
4071             boilerplate->address(),
4072             JSObject::kHeaderSize);
4073
4074   // Set the length property.
4075   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4076                                                 Smi::FromInt(length),
4077                                                 SKIP_WRITE_BARRIER);
4078   // Set the callee property for non-strict mode arguments object only.
4079   if (!strict_mode_callee) {
4080     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4081                                                   callee);
4082   }
4083
4084   // Check the state of the object
4085   ASSERT(JSObject::cast(result)->HasFastProperties());
4086   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4087
4088   return result;
4089 }
4090
4091
4092 static bool HasDuplicates(DescriptorArray* descriptors) {
4093   int count = descriptors->number_of_descriptors();
4094   if (count > 1) {
4095     Name* prev_key = descriptors->GetKey(0);
4096     for (int i = 1; i != count; i++) {
4097       Name* current_key = descriptors->GetKey(i);
4098       if (prev_key == current_key) return true;
4099       prev_key = current_key;
4100     }
4101   }
4102   return false;
4103 }
4104
4105
4106 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4107   ASSERT(!fun->has_initial_map());
4108
4109   // First create a new map with the size and number of in-object properties
4110   // suggested by the function.
4111   InstanceType instance_type;
4112   int instance_size;
4113   int in_object_properties;
4114   if (fun->shared()->is_generator()) {
4115     instance_type = JS_GENERATOR_OBJECT_TYPE;
4116     instance_size = JSGeneratorObject::kSize;
4117     in_object_properties = 0;
4118   } else {
4119     instance_type = JS_OBJECT_TYPE;
4120     instance_size = fun->shared()->CalculateInstanceSize();
4121     in_object_properties = fun->shared()->CalculateInObjectProperties();
4122   }
4123   Map* map;
4124   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4125   if (!maybe_map->To(&map)) return maybe_map;
4126
4127   // Fetch or allocate prototype.
4128   Object* prototype;
4129   if (fun->has_instance_prototype()) {
4130     prototype = fun->instance_prototype();
4131   } else {
4132     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4133     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4134   }
4135   map->set_inobject_properties(in_object_properties);
4136   map->set_unused_property_fields(in_object_properties);
4137   map->set_prototype(prototype);
4138   ASSERT(map->has_fast_object_elements());
4139
4140   // If the function has only simple this property assignments add
4141   // field descriptors for these to the initial map as the object
4142   // cannot be constructed without having these properties.  Guard by
4143   // the inline_new flag so we only change the map if we generate a
4144   // specialized construct stub.
4145   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4146   if (instance_type == JS_OBJECT_TYPE &&
4147       fun->shared()->CanGenerateInlineConstructor(prototype)) {
4148     int count = fun->shared()->this_property_assignments_count();
4149     if (count > in_object_properties) {
4150       // Inline constructor can only handle inobject properties.
4151       fun->shared()->ForbidInlineConstructor();
4152     } else {
4153       DescriptorArray* descriptors;
4154       MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4155       if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4156
4157       DescriptorArray::WhitenessWitness witness(descriptors);
4158       for (int i = 0; i < count; i++) {
4159         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4160         ASSERT(name->IsInternalizedString());
4161         // TODO(verwaest): Since we cannot update the boilerplate's map yet,
4162         // initialize to the worst case.
4163         FieldDescriptor field(name, i, NONE, Representation::Tagged());
4164         descriptors->Set(i, &field, witness);
4165       }
4166       descriptors->Sort();
4167
4168       // The descriptors may contain duplicates because the compiler does not
4169       // guarantee the uniqueness of property names (it would have required
4170       // quadratic time). Once the descriptors are sorted we can check for
4171       // duplicates in linear time.
4172       if (HasDuplicates(descriptors)) {
4173         fun->shared()->ForbidInlineConstructor();
4174       } else {
4175         map->InitializeDescriptors(descriptors);
4176         map->set_pre_allocated_property_fields(count);
4177         map->set_unused_property_fields(in_object_properties - count);
4178       }
4179     }
4180   }
4181
4182   if (instance_type == JS_OBJECT_TYPE) {
4183     fun->shared()->StartInobjectSlackTracking(map);
4184   }
4185
4186   return map;
4187 }
4188
4189
4190 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4191                                      FixedArray* properties,
4192                                      Map* map) {
4193   obj->set_properties(properties);
4194   obj->initialize_elements();
4195   // TODO(1240798): Initialize the object's body using valid initial values
4196   // according to the object's initial map.  For example, if the map's
4197   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4198   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4199   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4200   // verification code has to cope with (temporarily) invalid objects.  See
4201   // for example, JSArray::JSArrayVerify).
4202   Object* filler;
4203   // We cannot always fill with one_pointer_filler_map because objects
4204   // created from API functions expect their internal fields to be initialized
4205   // with undefined_value.
4206   // Pre-allocated fields need to be initialized with undefined_value as well
4207   // so that object accesses before the constructor completes (e.g. in the
4208   // debugger) will not cause a crash.
4209   if (map->constructor()->IsJSFunction() &&
4210       JSFunction::cast(map->constructor())->shared()->
4211           IsInobjectSlackTrackingInProgress()) {
4212     // We might want to shrink the object later.
4213     ASSERT(obj->GetInternalFieldCount() == 0);
4214     filler = Heap::one_pointer_filler_map();
4215   } else {
4216     filler = Heap::undefined_value();
4217   }
4218   obj->InitializeBody(map, Heap::undefined_value(), filler);
4219 }
4220
4221
4222 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4223   // JSFunctions should be allocated using AllocateFunction to be
4224   // properly initialized.
4225   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4226
4227   // Both types of global objects should be allocated using
4228   // AllocateGlobalObject to be properly initialized.
4229   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4230   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4231
4232   // Allocate the backing storage for the properties.
4233   int prop_size =
4234       map->pre_allocated_property_fields() +
4235       map->unused_property_fields() -
4236       map->inobject_properties();
4237   ASSERT(prop_size >= 0);
4238   Object* properties;
4239   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4240     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4241   }
4242
4243   // Allocate the JSObject.
4244   AllocationSpace space =
4245       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4246   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4247   Object* obj;
4248   MaybeObject* maybe_obj = Allocate(map, space);
4249   if (!maybe_obj->To(&obj)) return maybe_obj;
4250
4251   // Initialize the JSObject.
4252   InitializeJSObjectFromMap(JSObject::cast(obj),
4253                             FixedArray::cast(properties),
4254                             map);
4255   ASSERT(JSObject::cast(obj)->HasFastElements());
4256   return obj;
4257 }
4258
4259
4260 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4261     Handle<Object> allocation_site_info_payload) {
4262   // JSFunctions should be allocated using AllocateFunction to be
4263   // properly initialized.
4264   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4265
4266   // Both types of global objects should be allocated using
4267   // AllocateGlobalObject to be properly initialized.
4268   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4269   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4270
4271   // Allocate the backing storage for the properties.
4272   int prop_size =
4273       map->pre_allocated_property_fields() +
4274       map->unused_property_fields() -
4275       map->inobject_properties();
4276   ASSERT(prop_size >= 0);
4277   Object* properties;
4278   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4279     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4280   }
4281
4282   // Allocate the JSObject.
4283   AllocationSpace space = NEW_SPACE;
4284   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4285   Object* obj;
4286   MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4287       allocation_site_info_payload);
4288   if (!maybe_obj->To(&obj)) return maybe_obj;
4289
4290   // Initialize the JSObject.
4291   InitializeJSObjectFromMap(JSObject::cast(obj),
4292                             FixedArray::cast(properties),
4293                             map);
4294   ASSERT(JSObject::cast(obj)->HasFastElements());
4295   return obj;
4296 }
4297
4298
4299 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4300                                     PretenureFlag pretenure) {
4301   // Allocate the initial map if absent.
4302   if (!constructor->has_initial_map()) {
4303     Object* initial_map;
4304     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4305       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4306     }
4307     constructor->set_initial_map(Map::cast(initial_map));
4308     Map::cast(initial_map)->set_constructor(constructor);
4309   }
4310   // Allocate the object based on the constructors initial map.
4311   MaybeObject* result = AllocateJSObjectFromMap(
4312       constructor->initial_map(), pretenure);
4313 #ifdef DEBUG
4314   // Make sure result is NOT a global object if valid.
4315   Object* non_failure;
4316   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4317 #endif
4318   return result;
4319 }
4320
4321
4322 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4323     Handle<Object> allocation_site_info_payload) {
4324   // Allocate the initial map if absent.
4325   if (!constructor->has_initial_map()) {
4326     Object* initial_map;
4327     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4328       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4329     }
4330     constructor->set_initial_map(Map::cast(initial_map));
4331     Map::cast(initial_map)->set_constructor(constructor);
4332   }
4333   // Allocate the object based on the constructors initial map, or the payload
4334   // advice
4335   Map* initial_map = constructor->initial_map();
4336
4337   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4338       *allocation_site_info_payload);
4339   Smi* smi = Smi::cast(cell->value());
4340   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4341   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4342   if (to_kind != initial_map->elements_kind()) {
4343     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4344     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4345     // Possibly alter the mode, since we found an updated elements kind
4346     // in the type info cell.
4347     mode = AllocationSiteInfo::GetMode(to_kind);
4348   }
4349
4350   MaybeObject* result;
4351   if (mode == TRACK_ALLOCATION_SITE) {
4352     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4353         allocation_site_info_payload);
4354   } else {
4355     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4356   }
4357 #ifdef DEBUG
4358   // Make sure result is NOT a global object if valid.
4359   Object* non_failure;
4360   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4361 #endif
4362   return result;
4363 }
4364
4365
4366 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4367   ASSERT(function->shared()->is_generator());
4368   Map *map;
4369   if (function->has_initial_map()) {
4370     map = function->initial_map();
4371   } else {
4372     // Allocate the initial map if absent.
4373     MaybeObject* maybe_map = AllocateInitialMap(function);
4374     if (!maybe_map->To(&map)) return maybe_map;
4375     function->set_initial_map(map);
4376     map->set_constructor(function);
4377   }
4378   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4379   return AllocateJSObjectFromMap(map);
4380 }
4381
4382
4383 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4384   // Allocate a fresh map. Modules do not have a prototype.
4385   Map* map;
4386   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4387   if (!maybe_map->To(&map)) return maybe_map;
4388   // Allocate the object based on the map.
4389   JSModule* module;
4390   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4391   if (!maybe_module->To(&module)) return maybe_module;
4392   module->set_context(context);
4393   module->set_scope_info(scope_info);
4394   return module;
4395 }
4396
4397
4398 MaybeObject* Heap::AllocateJSArrayAndStorage(
4399     ElementsKind elements_kind,
4400     int length,
4401     int capacity,
4402     ArrayStorageAllocationMode mode,
4403     PretenureFlag pretenure) {
4404   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4405   JSArray* array;
4406   if (!maybe_array->To(&array)) return maybe_array;
4407
4408   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4409   // for performance reasons.
4410   ASSERT(capacity >= length);
4411
4412   if (capacity == 0) {
4413     array->set_length(Smi::FromInt(0));
4414     array->set_elements(empty_fixed_array());
4415     return array;
4416   }
4417
4418   FixedArrayBase* elms;
4419   MaybeObject* maybe_elms = NULL;
4420   if (IsFastDoubleElementsKind(elements_kind)) {
4421     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4422       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4423     } else {
4424       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4425       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4426     }
4427   } else {
4428     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4429     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4430       maybe_elms = AllocateUninitializedFixedArray(capacity);
4431     } else {
4432       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4433       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4434     }
4435   }
4436   if (!maybe_elms->To(&elms)) return maybe_elms;
4437
4438   array->set_elements(elms);
4439   array->set_length(Smi::FromInt(length));
4440   return array;
4441 }
4442
4443
4444 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4445     ElementsKind elements_kind,
4446     int length,
4447     int capacity,
4448     Handle<Object> allocation_site_payload,
4449     ArrayStorageAllocationMode mode) {
4450   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4451       allocation_site_payload);
4452   JSArray* array;
4453   if (!maybe_array->To(&array)) return maybe_array;
4454   return AllocateJSArrayStorage(array, length, capacity, mode);
4455 }
4456
4457
4458 MaybeObject* Heap::AllocateJSArrayStorage(
4459     JSArray* array,
4460     int length,
4461     int capacity,
4462     ArrayStorageAllocationMode mode) {
4463   ASSERT(capacity >= length);
4464
4465   if (capacity == 0) {
4466     array->set_length(Smi::FromInt(0));
4467     array->set_elements(empty_fixed_array());
4468     return array;
4469   }
4470
4471   FixedArrayBase* elms;
4472   MaybeObject* maybe_elms = NULL;
4473   ElementsKind elements_kind = array->GetElementsKind();
4474   if (IsFastDoubleElementsKind(elements_kind)) {
4475     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4476       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4477     } else {
4478       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4479       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4480     }
4481   } else {
4482     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4483     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4484       maybe_elms = AllocateUninitializedFixedArray(capacity);
4485     } else {
4486       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4487       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4488     }
4489   }
4490   if (!maybe_elms->To(&elms)) return maybe_elms;
4491
4492   array->set_elements(elms);
4493   array->set_length(Smi::FromInt(length));
4494   return array;
4495 }
4496
4497
4498 MaybeObject* Heap::AllocateJSArrayWithElements(
4499     FixedArrayBase* elements,
4500     ElementsKind elements_kind,
4501     int length,
4502     PretenureFlag pretenure) {
4503   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4504   JSArray* array;
4505   if (!maybe_array->To(&array)) return maybe_array;
4506
4507   array->set_elements(elements);
4508   array->set_length(Smi::FromInt(length));
4509   array->ValidateElements();
4510   return array;
4511 }
4512
4513
4514 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4515   // Allocate map.
4516   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4517   // maps. Will probably depend on the identity of the handler object, too.
4518   Map* map;
4519   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4520   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4521   map->set_prototype(prototype);
4522
4523   // Allocate the proxy object.
4524   JSProxy* result;
4525   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4526   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4527   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4528   result->set_handler(handler);
4529   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4530   return result;
4531 }
4532
4533
4534 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4535                                            Object* call_trap,
4536                                            Object* construct_trap,
4537                                            Object* prototype) {
4538   // Allocate map.
4539   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4540   // maps. Will probably depend on the identity of the handler object, too.
4541   Map* map;
4542   MaybeObject* maybe_map_obj =
4543       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4544   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4545   map->set_prototype(prototype);
4546
4547   // Allocate the proxy object.
4548   JSFunctionProxy* result;
4549   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4550   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4551   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4552   result->set_handler(handler);
4553   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4554   result->set_call_trap(call_trap);
4555   result->set_construct_trap(construct_trap);
4556   return result;
4557 }
4558
4559
4560 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4561   ASSERT(constructor->has_initial_map());
4562   Map* map = constructor->initial_map();
4563   ASSERT(map->is_dictionary_map());
4564
4565   // Make sure no field properties are described in the initial map.
4566   // This guarantees us that normalizing the properties does not
4567   // require us to change property values to JSGlobalPropertyCells.
4568   ASSERT(map->NextFreePropertyIndex() == 0);
4569
4570   // Make sure we don't have a ton of pre-allocated slots in the
4571   // global objects. They will be unused once we normalize the object.
4572   ASSERT(map->unused_property_fields() == 0);
4573   ASSERT(map->inobject_properties() == 0);
4574
4575   // Initial size of the backing store to avoid resize of the storage during
4576   // bootstrapping. The size differs between the JS global object ad the
4577   // builtins object.
4578   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4579
4580   // Allocate a dictionary object for backing storage.
4581   NameDictionary* dictionary;
4582   MaybeObject* maybe_dictionary =
4583       NameDictionary::Allocate(
4584           this,
4585           map->NumberOfOwnDescriptors() * 2 + initial_size);
4586   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4587
4588   // The global object might be created from an object template with accessors.
4589   // Fill these accessors into the dictionary.
4590   DescriptorArray* descs = map->instance_descriptors();
4591   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4592     PropertyDetails details = descs->GetDetails(i);
4593     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4594     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4595     Object* value = descs->GetCallbacksObject(i);
4596     MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4597     if (!maybe_value->ToObject(&value)) return maybe_value;
4598
4599     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4600     if (!maybe_added->To(&dictionary)) return maybe_added;
4601   }
4602
4603   // Allocate the global object and initialize it with the backing store.
4604   JSObject* global;
4605   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4606   if (!maybe_global->To(&global)) return maybe_global;
4607
4608   InitializeJSObjectFromMap(global, dictionary, map);
4609
4610   // Create a new map for the global object.
4611   Map* new_map;
4612   MaybeObject* maybe_map = map->CopyDropDescriptors();
4613   if (!maybe_map->To(&new_map)) return maybe_map;
4614   new_map->set_dictionary_map(true);
4615
4616   // Set up the global object as a normalized object.
4617   global->set_map(new_map);
4618   global->set_properties(dictionary);
4619
4620   // Make sure result is a global object with properties in dictionary.
4621   ASSERT(global->IsGlobalObject());
4622   ASSERT(!global->HasFastProperties());
4623   return global;
4624 }
4625
4626
4627 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4628   // Never used to copy functions.  If functions need to be copied we
4629   // have to be careful to clear the literals array.
4630   SLOW_ASSERT(!source->IsJSFunction());
4631
4632   // Make the clone.
4633   Map* map = source->map();
4634   int object_size = map->instance_size();
4635   Object* clone;
4636
4637   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4638
4639   // If we're forced to always allocate, we use the general allocation
4640   // functions which may leave us with an object in old space.
4641   if (always_allocate()) {
4642     { MaybeObject* maybe_clone =
4643           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4644       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4645     }
4646     Address clone_address = HeapObject::cast(clone)->address();
4647     CopyBlock(clone_address,
4648               source->address(),
4649               object_size);
4650     // Update write barrier for all fields that lie beyond the header.
4651     RecordWrites(clone_address,
4652                  JSObject::kHeaderSize,
4653                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4654   } else {
4655     wb_mode = SKIP_WRITE_BARRIER;
4656
4657     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4658       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4659     }
4660     SLOW_ASSERT(InNewSpace(clone));
4661     // Since we know the clone is allocated in new space, we can copy
4662     // the contents without worrying about updating the write barrier.
4663     CopyBlock(HeapObject::cast(clone)->address(),
4664               source->address(),
4665               object_size);
4666   }
4667
4668   SLOW_ASSERT(
4669       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4670   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4671   FixedArray* properties = FixedArray::cast(source->properties());
4672   // Update elements if necessary.
4673   if (elements->length() > 0) {
4674     Object* elem;
4675     { MaybeObject* maybe_elem;
4676       if (elements->map() == fixed_cow_array_map()) {
4677         maybe_elem = FixedArray::cast(elements);
4678       } else if (source->HasFastDoubleElements()) {
4679         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4680       } else {
4681         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4682       }
4683       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4684     }
4685     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4686   }
4687   // Update properties if necessary.
4688   if (properties->length() > 0) {
4689     Object* prop;
4690     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4691       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4692     }
4693     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4694   }
4695   // Return the new clone.
4696   return clone;
4697 }
4698
4699
4700 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4701   // Never used to copy functions.  If functions need to be copied we
4702   // have to be careful to clear the literals array.
4703   SLOW_ASSERT(!source->IsJSFunction());
4704
4705   // Make the clone.
4706   Map* map = source->map();
4707   int object_size = map->instance_size();
4708   Object* clone;
4709
4710   ASSERT(map->CanTrackAllocationSite());
4711   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4712   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4713
4714   // If we're forced to always allocate, we use the general allocation
4715   // functions which may leave us with an object in old space.
4716   int adjusted_object_size = object_size;
4717   if (always_allocate()) {
4718     // We'll only track origin if we are certain to allocate in new space
4719     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4720     if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4721       adjusted_object_size += AllocationSiteInfo::kSize;
4722     }
4723
4724     { MaybeObject* maybe_clone =
4725           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4726       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4727     }
4728     Address clone_address = HeapObject::cast(clone)->address();
4729     CopyBlock(clone_address,
4730               source->address(),
4731               object_size);
4732     // Update write barrier for all fields that lie beyond the header.
4733     int write_barrier_offset = adjusted_object_size > object_size
4734         ? JSArray::kSize + AllocationSiteInfo::kSize
4735         : JSObject::kHeaderSize;
4736     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4737       RecordWrites(clone_address,
4738                    write_barrier_offset,
4739                    (object_size - write_barrier_offset) / kPointerSize);
4740     }
4741
4742     // Track allocation site information, if we failed to allocate it inline.
4743     if (InNewSpace(clone) &&
4744         adjusted_object_size == object_size) {
4745       MaybeObject* maybe_alloc_info =
4746           AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4747       AllocationSiteInfo* alloc_info;
4748       if (maybe_alloc_info->To(&alloc_info)) {
4749         alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4750         alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4751       }
4752     }
4753   } else {
4754     wb_mode = SKIP_WRITE_BARRIER;
4755     adjusted_object_size += AllocationSiteInfo::kSize;
4756
4757     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4758       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4759     }
4760     SLOW_ASSERT(InNewSpace(clone));
4761     // Since we know the clone is allocated in new space, we can copy
4762     // the contents without worrying about updating the write barrier.
4763     CopyBlock(HeapObject::cast(clone)->address(),
4764               source->address(),
4765               object_size);
4766   }
4767
4768   if (adjusted_object_size > object_size) {
4769     AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4770         reinterpret_cast<Address>(clone) + object_size);
4771     alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4772     alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4773   }
4774
4775   SLOW_ASSERT(
4776       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4777   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4778   FixedArray* properties = FixedArray::cast(source->properties());
4779   // Update elements if necessary.
4780   if (elements->length() > 0) {
4781     Object* elem;
4782     { MaybeObject* maybe_elem;
4783       if (elements->map() == fixed_cow_array_map()) {
4784         maybe_elem = FixedArray::cast(elements);
4785       } else if (source->HasFastDoubleElements()) {
4786         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4787       } else {
4788         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4789       }
4790       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4791     }
4792     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4793   }
4794   // Update properties if necessary.
4795   if (properties->length() > 0) {
4796     Object* prop;
4797     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4798       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4799     }
4800     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4801   }
4802   // Return the new clone.
4803   return clone;
4804 }
4805
4806
4807 MaybeObject* Heap::ReinitializeJSReceiver(
4808     JSReceiver* object, InstanceType type, int size) {
4809   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4810
4811   // Allocate fresh map.
4812   // TODO(rossberg): Once we optimize proxies, cache these maps.
4813   Map* map;
4814   MaybeObject* maybe = AllocateMap(type, size);
4815   if (!maybe->To<Map>(&map)) return maybe;
4816
4817   // Check that the receiver has at least the size of the fresh object.
4818   int size_difference = object->map()->instance_size() - map->instance_size();
4819   ASSERT(size_difference >= 0);
4820
4821   map->set_prototype(object->map()->prototype());
4822
4823   // Allocate the backing storage for the properties.
4824   int prop_size = map->unused_property_fields() - map->inobject_properties();
4825   Object* properties;
4826   maybe = AllocateFixedArray(prop_size, TENURED);
4827   if (!maybe->ToObject(&properties)) return maybe;
4828
4829   // Functions require some allocation, which might fail here.
4830   SharedFunctionInfo* shared = NULL;
4831   if (type == JS_FUNCTION_TYPE) {
4832     String* name;
4833     maybe =
4834         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4835     if (!maybe->To<String>(&name)) return maybe;
4836     maybe = AllocateSharedFunctionInfo(name);
4837     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4838   }
4839
4840   // Because of possible retries of this function after failure,
4841   // we must NOT fail after this point, where we have changed the type!
4842
4843   // Reset the map for the object.
4844   object->set_map(map);
4845   JSObject* jsobj = JSObject::cast(object);
4846
4847   // Reinitialize the object from the constructor map.
4848   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4849
4850   // Functions require some minimal initialization.
4851   if (type == JS_FUNCTION_TYPE) {
4852     map->set_function_with_prototype(true);
4853     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4854     JSFunction::cast(object)->set_context(
4855         isolate()->context()->native_context());
4856   }
4857
4858   // Put in filler if the new object is smaller than the old.
4859   if (size_difference > 0) {
4860     CreateFillerObjectAt(
4861         object->address() + map->instance_size(), size_difference);
4862   }
4863
4864   return object;
4865 }
4866
4867
4868 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4869                                              JSGlobalProxy* object) {
4870   ASSERT(constructor->has_initial_map());
4871   Map* map = constructor->initial_map();
4872
4873   // Check that the already allocated object has the same size and type as
4874   // objects allocated using the constructor.
4875   ASSERT(map->instance_size() == object->map()->instance_size());
4876   ASSERT(map->instance_type() == object->map()->instance_type());
4877
4878   // Allocate the backing storage for the properties.
4879   int prop_size = map->unused_property_fields() - map->inobject_properties();
4880   Object* properties;
4881   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4882     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4883   }
4884
4885   // Reset the map for the object.
4886   object->set_map(constructor->initial_map());
4887
4888   // Reinitialize the object from the constructor map.
4889   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4890   return object;
4891 }
4892
4893
4894 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4895                                            PretenureFlag pretenure) {
4896   int length = string.length();
4897   if (length == 1) {
4898     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4899   }
4900   Object* result;
4901   { MaybeObject* maybe_result =
4902         AllocateRawOneByteString(string.length(), pretenure);
4903     if (!maybe_result->ToObject(&result)) return maybe_result;
4904   }
4905
4906   // Copy the characters into the new object.
4907   CopyChars(SeqOneByteString::cast(result)->GetChars(),
4908             string.start(),
4909             length);
4910   return result;
4911 }
4912
4913
4914 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4915                                               int non_ascii_start,
4916                                               PretenureFlag pretenure) {
4917   // Continue counting the number of characters in the UTF-8 string, starting
4918   // from the first non-ascii character or word.
4919   Access<UnicodeCache::Utf8Decoder>
4920       decoder(isolate_->unicode_cache()->utf8_decoder());
4921   decoder->Reset(string.start() + non_ascii_start,
4922                  string.length() - non_ascii_start);
4923   int utf16_length = decoder->Utf16Length();
4924   ASSERT(utf16_length > 0);
4925   // Allocate string.
4926   Object* result;
4927   {
4928     int chars = non_ascii_start + utf16_length;
4929     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4930     if (!maybe_result->ToObject(&result)) return maybe_result;
4931   }
4932   // Convert and copy the characters into the new object.
4933   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4934   // Copy ascii portion.
4935   uint16_t* data = twobyte->GetChars();
4936   if (non_ascii_start != 0) {
4937     const char* ascii_data = string.start();
4938     for (int i = 0; i < non_ascii_start; i++) {
4939       *data++ = *ascii_data++;
4940     }
4941   }
4942   // Now write the remainder.
4943   decoder->WriteUtf16(data, utf16_length);
4944   return result;
4945 }
4946
4947
4948 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4949                                              PretenureFlag pretenure) {
4950   // Check if the string is an ASCII string.
4951   Object* result;
4952   int length = string.length();
4953   const uc16* start = string.start();
4954
4955   if (String::IsOneByte(start, length)) {
4956     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4957     if (!maybe_result->ToObject(&result)) return maybe_result;
4958     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4959   } else {  // It's not a one byte string.
4960     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4961     if (!maybe_result->ToObject(&result)) return maybe_result;
4962     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4963   }
4964   return result;
4965 }
4966
4967
4968 Map* Heap::InternalizedStringMapForString(String* string) {
4969   // If the string is in new space it cannot be used as internalized.
4970   if (InNewSpace(string)) return NULL;
4971
4972   // Find the corresponding internalized string map for strings.
4973   switch (string->map()->instance_type()) {
4974     case STRING_TYPE: return internalized_string_map();
4975     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4976     case CONS_STRING_TYPE: return cons_internalized_string_map();
4977     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4978     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4979     case EXTERNAL_ASCII_STRING_TYPE:
4980       return external_ascii_internalized_string_map();
4981     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4982       return external_internalized_string_with_one_byte_data_map();
4983     case SHORT_EXTERNAL_STRING_TYPE:
4984       return short_external_internalized_string_map();
4985     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4986       return short_external_ascii_internalized_string_map();
4987     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4988       return short_external_internalized_string_with_one_byte_data_map();
4989     default: return NULL;  // No match found.
4990   }
4991 }
4992
4993
4994 static inline void WriteOneByteData(Vector<const char> vector,
4995                                     uint8_t* chars,
4996                                     int len) {
4997   // Only works for ascii.
4998   ASSERT(vector.length() == len);
4999   OS::MemCopy(chars, vector.start(), len);
5000 }
5001
5002 static inline void WriteTwoByteData(Vector<const char> vector,
5003                                     uint16_t* chars,
5004                                     int len) {
5005   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5006   unsigned stream_length = vector.length();
5007   while (stream_length != 0) {
5008     unsigned consumed = 0;
5009     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5010     ASSERT(c != unibrow::Utf8::kBadChar);
5011     ASSERT(consumed <= stream_length);
5012     stream_length -= consumed;
5013     stream += consumed;
5014     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5015       len -= 2;
5016       if (len < 0) break;
5017       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5018       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5019     } else {
5020       len -= 1;
5021       if (len < 0) break;
5022       *chars++ = c;
5023     }
5024   }
5025   ASSERT(stream_length == 0);
5026   ASSERT(len == 0);
5027 }
5028
5029
5030 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5031   ASSERT(s->length() == len);
5032   String::WriteToFlat(s, chars, 0, len);
5033 }
5034
5035 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5036   ASSERT(s->length() == len);
5037   String::WriteToFlat(s, chars, 0, len);
5038 }
5039
5040
5041 template<bool is_one_byte, typename T>
5042 MaybeObject* Heap::AllocateInternalizedStringImpl(
5043     T t, int chars, uint32_t hash_field) {
5044   ASSERT(chars >= 0);
5045   // Compute map and object size.
5046   int size;
5047   Map* map;
5048
5049   if (is_one_byte) {
5050     if (chars > SeqOneByteString::kMaxLength) {
5051       return Failure::OutOfMemoryException(0x9);
5052     }
5053     map = ascii_internalized_string_map();
5054     size = SeqOneByteString::SizeFor(chars);
5055   } else {
5056     if (chars > SeqTwoByteString::kMaxLength) {
5057       return Failure::OutOfMemoryException(0xa);
5058     }
5059     map = internalized_string_map();
5060     size = SeqTwoByteString::SizeFor(chars);
5061   }
5062
5063   // Allocate string.
5064   Object* result;
5065   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5066                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5067                    : old_data_space_->AllocateRaw(size);
5068     if (!maybe_result->ToObject(&result)) return maybe_result;
5069   }
5070
5071   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5072   // Set length and hash fields of the allocated string.
5073   String* answer = String::cast(result);
5074   answer->set_length(chars);
5075   answer->set_hash_field(hash_field);
5076
5077   ASSERT_EQ(size, answer->Size());
5078
5079   if (is_one_byte) {
5080     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5081   } else {
5082     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5083   }
5084   return answer;
5085 }
5086
5087
5088 // Need explicit instantiations.
5089 template
5090 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5091 template
5092 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5093     String*, int, uint32_t);
5094 template
5095 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5096     Vector<const char>, int, uint32_t);
5097
5098
5099 MaybeObject* Heap::AllocateRawOneByteString(int length,
5100                                             PretenureFlag pretenure) {
5101   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5102     return Failure::OutOfMemoryException(0xb);
5103   }
5104
5105   int size = SeqOneByteString::SizeFor(length);
5106   ASSERT(size <= SeqOneByteString::kMaxSize);
5107
5108   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5109   AllocationSpace retry_space = OLD_DATA_SPACE;
5110
5111   if (space == NEW_SPACE) {
5112     if (size > kMaxObjectSizeInNewSpace) {
5113       // Allocate in large object space, retry space will be ignored.
5114       space = LO_SPACE;
5115     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5116       // Allocate in new space, retry in large object space.
5117       retry_space = LO_SPACE;
5118     }
5119   } else if (space == OLD_DATA_SPACE &&
5120              size > Page::kMaxNonCodeHeapObjectSize) {
5121     space = LO_SPACE;
5122   }
5123   Object* result;
5124   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5125     if (!maybe_result->ToObject(&result)) return maybe_result;
5126   }
5127
5128   // Partially initialize the object.
5129   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5130   String::cast(result)->set_length(length);
5131   String::cast(result)->set_hash_field(String::kEmptyHashField);
5132   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5133
5134   return result;
5135 }
5136
5137
5138 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5139                                             PretenureFlag pretenure) {
5140   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5141     return Failure::OutOfMemoryException(0xc);
5142   }
5143   int size = SeqTwoByteString::SizeFor(length);
5144   ASSERT(size <= SeqTwoByteString::kMaxSize);
5145   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5146   AllocationSpace retry_space = OLD_DATA_SPACE;
5147
5148   if (space == NEW_SPACE) {
5149     if (size > kMaxObjectSizeInNewSpace) {
5150       // Allocate in large object space, retry space will be ignored.
5151       space = LO_SPACE;
5152     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5153       // Allocate in new space, retry in large object space.
5154       retry_space = LO_SPACE;
5155     }
5156   } else if (space == OLD_DATA_SPACE &&
5157              size > Page::kMaxNonCodeHeapObjectSize) {
5158     space = LO_SPACE;
5159   }
5160   Object* result;
5161   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5162     if (!maybe_result->ToObject(&result)) return maybe_result;
5163   }
5164
5165   // Partially initialize the object.
5166   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5167   String::cast(result)->set_length(length);
5168   String::cast(result)->set_hash_field(String::kEmptyHashField);
5169   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5170   return result;
5171 }
5172
5173
5174 MaybeObject* Heap::AllocateJSArray(
5175     ElementsKind elements_kind,
5176     PretenureFlag pretenure) {
5177   Context* native_context = isolate()->context()->native_context();
5178   JSFunction* array_function = native_context->array_function();
5179   Map* map = array_function->initial_map();
5180   Object* maybe_map_array = native_context->js_array_maps();
5181   if (!maybe_map_array->IsUndefined()) {
5182     Object* maybe_transitioned_map =
5183         FixedArray::cast(maybe_map_array)->get(elements_kind);
5184     if (!maybe_transitioned_map->IsUndefined()) {
5185       map = Map::cast(maybe_transitioned_map);
5186     }
5187   }
5188
5189   return AllocateJSObjectFromMap(map, pretenure);
5190 }
5191
5192
5193 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5194     ElementsKind elements_kind,
5195     Handle<Object> allocation_site_info_payload) {
5196   Context* native_context = isolate()->context()->native_context();
5197   JSFunction* array_function = native_context->array_function();
5198   Map* map = array_function->initial_map();
5199   Object* maybe_map_array = native_context->js_array_maps();
5200   if (!maybe_map_array->IsUndefined()) {
5201     Object* maybe_transitioned_map =
5202         FixedArray::cast(maybe_map_array)->get(elements_kind);
5203     if (!maybe_transitioned_map->IsUndefined()) {
5204       map = Map::cast(maybe_transitioned_map);
5205     }
5206   }
5207   return AllocateJSObjectFromMapWithAllocationSite(map,
5208       allocation_site_info_payload);
5209 }
5210
5211
5212 MaybeObject* Heap::AllocateEmptyFixedArray() {
5213   int size = FixedArray::SizeFor(0);
5214   Object* result;
5215   { MaybeObject* maybe_result =
5216         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5217     if (!maybe_result->ToObject(&result)) return maybe_result;
5218   }
5219   // Initialize the object.
5220   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5221       fixed_array_map());
5222   reinterpret_cast<FixedArray*>(result)->set_length(0);
5223   return result;
5224 }
5225
5226
5227 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5228   if (length < 0 || length > FixedArray::kMaxLength) {
5229     return Failure::OutOfMemoryException(0xd);
5230   }
5231   ASSERT(length > 0);
5232   // Use the general function if we're forced to always allocate.
5233   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5234   // Allocate the raw data for a fixed array.
5235   int size = FixedArray::SizeFor(length);
5236   return size <= kMaxObjectSizeInNewSpace
5237       ? new_space_.AllocateRaw(size)
5238       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5239 }
5240
5241
5242 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5243   int len = src->length();
5244   Object* obj;
5245   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5246     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5247   }
5248   if (InNewSpace(obj)) {
5249     HeapObject* dst = HeapObject::cast(obj);
5250     dst->set_map_no_write_barrier(map);
5251     CopyBlock(dst->address() + kPointerSize,
5252               src->address() + kPointerSize,
5253               FixedArray::SizeFor(len) - kPointerSize);
5254     return obj;
5255   }
5256   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5257   FixedArray* result = FixedArray::cast(obj);
5258   result->set_length(len);
5259
5260   // Copy the content
5261   AssertNoAllocation no_gc;
5262   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5263   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5264   return result;
5265 }
5266
5267
5268 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5269                                                Map* map) {
5270   int len = src->length();
5271   Object* obj;
5272   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5273     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5274   }
5275   HeapObject* dst = HeapObject::cast(obj);
5276   dst->set_map_no_write_barrier(map);
5277   CopyBlock(
5278       dst->address() + FixedDoubleArray::kLengthOffset,
5279       src->address() + FixedDoubleArray::kLengthOffset,
5280       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5281   return obj;
5282 }
5283
5284
5285 MaybeObject* Heap::AllocateFixedArray(int length) {
5286   ASSERT(length >= 0);
5287   if (length == 0) return empty_fixed_array();
5288   Object* result;
5289   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5290     if (!maybe_result->ToObject(&result)) return maybe_result;
5291   }
5292   // Initialize header.
5293   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5294   array->set_map_no_write_barrier(fixed_array_map());
5295   array->set_length(length);
5296   // Initialize body.
5297   ASSERT(!InNewSpace(undefined_value()));
5298   MemsetPointer(array->data_start(), undefined_value(), length);
5299   return result;
5300 }
5301
5302
5303 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5304   if (length < 0 || length > FixedArray::kMaxLength) {
5305     return Failure::OutOfMemoryException(0xe);
5306   }
5307
5308   AllocationSpace space =
5309       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5310   int size = FixedArray::SizeFor(length);
5311   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5312     // Too big for new space.
5313     space = LO_SPACE;
5314   } else if (space == OLD_POINTER_SPACE &&
5315              size > Page::kMaxNonCodeHeapObjectSize) {
5316     // Too big for old pointer space.
5317     space = LO_SPACE;
5318   }
5319
5320   AllocationSpace retry_space =
5321       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5322
5323   return AllocateRaw(size, space, retry_space);
5324 }
5325
5326
5327 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5328     Heap* heap,
5329     int length,
5330     PretenureFlag pretenure,
5331     Object* filler) {
5332   ASSERT(length >= 0);
5333   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5334   if (length == 0) return heap->empty_fixed_array();
5335
5336   ASSERT(!heap->InNewSpace(filler));
5337   Object* result;
5338   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5339     if (!maybe_result->ToObject(&result)) return maybe_result;
5340   }
5341
5342   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5343   FixedArray* array = FixedArray::cast(result);
5344   array->set_length(length);
5345   MemsetPointer(array->data_start(), filler, length);
5346   return array;
5347 }
5348
5349
5350 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5351   return AllocateFixedArrayWithFiller(this,
5352                                       length,
5353                                       pretenure,
5354                                       undefined_value());
5355 }
5356
5357
5358 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5359                                                PretenureFlag pretenure) {
5360   return AllocateFixedArrayWithFiller(this,
5361                                       length,
5362                                       pretenure,
5363                                       the_hole_value());
5364 }
5365
5366
5367 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5368   if (length == 0) return empty_fixed_array();
5369
5370   Object* obj;
5371   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5372     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5373   }
5374
5375   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5376       fixed_array_map());
5377   FixedArray::cast(obj)->set_length(length);
5378   return obj;
5379 }
5380
5381
5382 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5383   int size = FixedDoubleArray::SizeFor(0);
5384   Object* result;
5385   { MaybeObject* maybe_result =
5386         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5387     if (!maybe_result->ToObject(&result)) return maybe_result;
5388   }
5389   // Initialize the object.
5390   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5391       fixed_double_array_map());
5392   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5393   return result;
5394 }
5395
5396
5397 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5398     int length,
5399     PretenureFlag pretenure) {
5400   if (length == 0) return empty_fixed_array();
5401
5402   Object* elements_object;
5403   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5404   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5405   FixedDoubleArray* elements =
5406       reinterpret_cast<FixedDoubleArray*>(elements_object);
5407
5408   elements->set_map_no_write_barrier(fixed_double_array_map());
5409   elements->set_length(length);
5410   return elements;
5411 }
5412
5413
5414 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5415     int length,
5416     PretenureFlag pretenure) {
5417   if (length == 0) return empty_fixed_array();
5418
5419   Object* elements_object;
5420   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5421   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5422   FixedDoubleArray* elements =
5423       reinterpret_cast<FixedDoubleArray*>(elements_object);
5424
5425   for (int i = 0; i < length; ++i) {
5426     elements->set_the_hole(i);
5427   }
5428
5429   elements->set_map_no_write_barrier(fixed_double_array_map());
5430   elements->set_length(length);
5431   return elements;
5432 }
5433
5434
5435 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5436                                                PretenureFlag pretenure) {
5437   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5438     return Failure::OutOfMemoryException(0xf);
5439   }
5440
5441   AllocationSpace space =
5442       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5443   int size = FixedDoubleArray::SizeFor(length);
5444
5445 #ifndef V8_HOST_ARCH_64_BIT
5446   size += kPointerSize;
5447 #endif
5448
5449   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5450     // Too big for new space.
5451     space = LO_SPACE;
5452   } else if (space == OLD_DATA_SPACE &&
5453              size > Page::kMaxNonCodeHeapObjectSize) {
5454     // Too big for old data space.
5455     space = LO_SPACE;
5456   }
5457
5458   AllocationSpace retry_space =
5459       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5460
5461   HeapObject* object;
5462   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5463     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5464   }
5465
5466   return EnsureDoubleAligned(this, object, size);
5467 }
5468
5469
5470 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5471   Object* result;
5472   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5473     if (!maybe_result->ToObject(&result)) return maybe_result;
5474   }
5475   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5476       hash_table_map());
5477   ASSERT(result->IsHashTable());
5478   return result;
5479 }
5480
5481
5482 MaybeObject* Heap::AllocateSymbol() {
5483   // Statically ensure that it is safe to allocate symbols in paged spaces.
5484   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5485
5486   Object* result;
5487   MaybeObject* maybe =
5488       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5489   if (!maybe->ToObject(&result)) return maybe;
5490
5491   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5492
5493   // Generate a random hash value.
5494   int hash;
5495   int attempts = 0;
5496   do {
5497     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5498     attempts++;
5499   } while (hash == 0 && attempts < 30);
5500   if (hash == 0) hash = 1;  // never return 0
5501
5502   Symbol::cast(result)->set_hash_field(
5503       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5504   Symbol::cast(result)->set_name(undefined_value());
5505
5506   ASSERT(result->IsSymbol());
5507   return result;
5508 }
5509
5510
5511 MaybeObject* Heap::AllocateNativeContext() {
5512   Object* result;
5513   { MaybeObject* maybe_result =
5514         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5515     if (!maybe_result->ToObject(&result)) return maybe_result;
5516   }
5517   Context* context = reinterpret_cast<Context*>(result);
5518   context->set_map_no_write_barrier(native_context_map());
5519   context->set_js_array_maps(undefined_value());
5520   ASSERT(context->IsNativeContext());
5521   ASSERT(result->IsContext());
5522   return result;
5523 }
5524
5525
5526 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5527                                          ScopeInfo* scope_info) {
5528   Object* result;
5529   { MaybeObject* maybe_result =
5530         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5531     if (!maybe_result->ToObject(&result)) return maybe_result;
5532   }
5533   Context* context = reinterpret_cast<Context*>(result);
5534   context->set_map_no_write_barrier(global_context_map());
5535   context->set_closure(function);
5536   context->set_previous(function->context());
5537   context->set_extension(scope_info);
5538   context->set_global_object(function->context()->global_object());
5539   ASSERT(context->IsGlobalContext());
5540   ASSERT(result->IsContext());
5541   return context;
5542 }
5543
5544
5545 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5546   Object* result;
5547   { MaybeObject* maybe_result =
5548         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5549     if (!maybe_result->ToObject(&result)) return maybe_result;
5550   }
5551   Context* context = reinterpret_cast<Context*>(result);
5552   context->set_map_no_write_barrier(module_context_map());
5553   // Instance link will be set later.
5554   context->set_extension(Smi::FromInt(0));
5555   return context;
5556 }
5557
5558
5559 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5560   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5561   Object* result;
5562   { MaybeObject* maybe_result = AllocateFixedArray(length);
5563     if (!maybe_result->ToObject(&result)) return maybe_result;
5564   }
5565   Context* context = reinterpret_cast<Context*>(result);
5566   context->set_map_no_write_barrier(function_context_map());
5567   context->set_closure(function);
5568   context->set_previous(function->context());
5569   context->set_extension(Smi::FromInt(0));
5570   context->set_global_object(function->context()->global_object());
5571   return context;
5572 }
5573
5574
5575 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5576                                         Context* previous,
5577                                         String* name,
5578                                         Object* thrown_object) {
5579   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5580   Object* result;
5581   { MaybeObject* maybe_result =
5582         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5583     if (!maybe_result->ToObject(&result)) return maybe_result;
5584   }
5585   Context* context = reinterpret_cast<Context*>(result);
5586   context->set_map_no_write_barrier(catch_context_map());
5587   context->set_closure(function);
5588   context->set_previous(previous);
5589   context->set_extension(name);
5590   context->set_global_object(previous->global_object());
5591   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5592   return context;
5593 }
5594
5595
5596 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5597                                        Context* previous,
5598                                        JSObject* extension) {
5599   Object* result;
5600   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5601     if (!maybe_result->ToObject(&result)) return maybe_result;
5602   }
5603   Context* context = reinterpret_cast<Context*>(result);
5604   context->set_map_no_write_barrier(with_context_map());
5605   context->set_closure(function);
5606   context->set_previous(previous);
5607   context->set_extension(extension);
5608   context->set_global_object(previous->global_object());
5609   return context;
5610 }
5611
5612
5613 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5614                                         Context* previous,
5615                                         ScopeInfo* scope_info) {
5616   Object* result;
5617   { MaybeObject* maybe_result =
5618         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5619     if (!maybe_result->ToObject(&result)) return maybe_result;
5620   }
5621   Context* context = reinterpret_cast<Context*>(result);
5622   context->set_map_no_write_barrier(block_context_map());
5623   context->set_closure(function);
5624   context->set_previous(previous);
5625   context->set_extension(scope_info);
5626   context->set_global_object(previous->global_object());
5627   return context;
5628 }
5629
5630
5631 MaybeObject* Heap::AllocateScopeInfo(int length) {
5632   FixedArray* scope_info;
5633   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5634   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5635   scope_info->set_map_no_write_barrier(scope_info_map());
5636   return scope_info;
5637 }
5638
5639
5640 MaybeObject* Heap::AllocateExternal(void* value) {
5641   Foreign* foreign;
5642   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5643     if (!maybe_result->To(&foreign)) return maybe_result;
5644   }
5645   JSObject* external;
5646   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5647     if (!maybe_result->To(&external)) return maybe_result;
5648   }
5649   external->SetInternalField(0, foreign);
5650   return external;
5651 }
5652
5653
5654 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5655   Map* map;
5656   switch (type) {
5657 #define MAKE_CASE(NAME, Name, name) \
5658     case NAME##_TYPE: map = name##_map(); break;
5659 STRUCT_LIST(MAKE_CASE)
5660 #undef MAKE_CASE
5661     default:
5662       UNREACHABLE();
5663       return Failure::InternalError();
5664   }
5665   int size = map->instance_size();
5666   AllocationSpace space =
5667       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5668   Object* result;
5669   { MaybeObject* maybe_result = Allocate(map, space);
5670     if (!maybe_result->ToObject(&result)) return maybe_result;
5671   }
5672   Struct::cast(result)->InitializeBody(size);
5673   return result;
5674 }
5675
5676
5677 bool Heap::IsHeapIterable() {
5678   return (!old_pointer_space()->was_swept_conservatively() &&
5679           !old_data_space()->was_swept_conservatively());
5680 }
5681
5682
5683 void Heap::EnsureHeapIsIterable() {
5684   ASSERT(IsAllocationAllowed());
5685   if (!IsHeapIterable()) {
5686     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5687   }
5688   ASSERT(IsHeapIterable());
5689 }
5690
5691
5692 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5693   incremental_marking()->Step(step_size,
5694                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5695
5696   if (incremental_marking()->IsComplete()) {
5697     bool uncommit = false;
5698     if (gc_count_at_last_idle_gc_ == gc_count_) {
5699       // No GC since the last full GC, the mutator is probably not active.
5700       isolate_->compilation_cache()->Clear();
5701       uncommit = true;
5702     }
5703     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5704     gc_count_at_last_idle_gc_ = gc_count_;
5705     if (uncommit) {
5706       new_space_.Shrink();
5707       UncommitFromSpace();
5708     }
5709   }
5710 }
5711
5712
5713 bool Heap::IdleNotification(int hint) {
5714   // Hints greater than this value indicate that
5715   // the embedder is requesting a lot of GC work.
5716   const int kMaxHint = 1000;
5717   // Minimal hint that allows to do full GC.
5718   const int kMinHintForFullGC = 100;
5719   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5720   // The size factor is in range [5..250]. The numbers here are chosen from
5721   // experiments. If you changes them, make sure to test with
5722   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5723   intptr_t step_size =
5724       size_factor * IncrementalMarking::kAllocatedThreshold;
5725
5726   if (contexts_disposed_ > 0) {
5727     if (hint >= kMaxHint) {
5728       // The embedder is requesting a lot of GC work after context disposal,
5729       // we age inline caches so that they don't keep objects from
5730       // the old context alive.
5731       AgeInlineCaches();
5732     }
5733     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5734     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5735         incremental_marking()->IsStopped()) {
5736       HistogramTimerScope scope(isolate_->counters()->gc_context());
5737       CollectAllGarbage(kReduceMemoryFootprintMask,
5738                         "idle notification: contexts disposed");
5739     } else {
5740       AdvanceIdleIncrementalMarking(step_size);
5741       contexts_disposed_ = 0;
5742     }
5743     // After context disposal there is likely a lot of garbage remaining, reset
5744     // the idle notification counters in order to trigger more incremental GCs
5745     // on subsequent idle notifications.
5746     StartIdleRound();
5747     return false;
5748   }
5749
5750   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5751     return IdleGlobalGC();
5752   }
5753
5754   // By doing small chunks of GC work in each IdleNotification,
5755   // perform a round of incremental GCs and after that wait until
5756   // the mutator creates enough garbage to justify a new round.
5757   // An incremental GC progresses as follows:
5758   // 1. many incremental marking steps,
5759   // 2. one old space mark-sweep-compact,
5760   // 3. many lazy sweep steps.
5761   // Use mark-sweep-compact events to count incremental GCs in a round.
5762
5763   if (incremental_marking()->IsStopped()) {
5764     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5765         !IsSweepingComplete() &&
5766         !AdvanceSweepers(static_cast<int>(step_size))) {
5767       return false;
5768     }
5769   }
5770
5771   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5772     if (EnoughGarbageSinceLastIdleRound()) {
5773       StartIdleRound();
5774     } else {
5775       return true;
5776     }
5777   }
5778
5779   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5780   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5781   ms_count_at_last_idle_notification_ = ms_count_;
5782
5783   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5784                               mark_sweeps_since_idle_round_started_;
5785
5786   if (remaining_mark_sweeps <= 0) {
5787     FinishIdleRound();
5788     return true;
5789   }
5790
5791   if (incremental_marking()->IsStopped()) {
5792     // If there are no more than two GCs left in this idle round and we are
5793     // allowed to do a full GC, then make those GCs full in order to compact
5794     // the code space.
5795     // TODO(ulan): Once we enable code compaction for incremental marking,
5796     // we can get rid of this special case and always start incremental marking.
5797     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5798       CollectAllGarbage(kReduceMemoryFootprintMask,
5799                         "idle notification: finalize idle round");
5800     } else {
5801       incremental_marking()->Start();
5802     }
5803   }
5804   if (!incremental_marking()->IsStopped()) {
5805     AdvanceIdleIncrementalMarking(step_size);
5806   }
5807   return false;
5808 }
5809
5810
5811 bool Heap::IdleGlobalGC() {
5812   static const int kIdlesBeforeScavenge = 4;
5813   static const int kIdlesBeforeMarkSweep = 7;
5814   static const int kIdlesBeforeMarkCompact = 8;
5815   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5816   static const unsigned int kGCsBetweenCleanup = 4;
5817
5818   if (!last_idle_notification_gc_count_init_) {
5819     last_idle_notification_gc_count_ = gc_count_;
5820     last_idle_notification_gc_count_init_ = true;
5821   }
5822
5823   bool uncommit = true;
5824   bool finished = false;
5825
5826   // Reset the number of idle notifications received when a number of
5827   // GCs have taken place. This allows another round of cleanup based
5828   // on idle notifications if enough work has been carried out to
5829   // provoke a number of garbage collections.
5830   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5831     number_idle_notifications_ =
5832         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5833   } else {
5834     number_idle_notifications_ = 0;
5835     last_idle_notification_gc_count_ = gc_count_;
5836   }
5837
5838   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5839     CollectGarbage(NEW_SPACE, "idle notification");
5840     new_space_.Shrink();
5841     last_idle_notification_gc_count_ = gc_count_;
5842   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5843     // Before doing the mark-sweep collections we clear the
5844     // compilation cache to avoid hanging on to source code and
5845     // generated code for cached functions.
5846     isolate_->compilation_cache()->Clear();
5847
5848     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5849     new_space_.Shrink();
5850     last_idle_notification_gc_count_ = gc_count_;
5851
5852   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5853     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5854     new_space_.Shrink();
5855     last_idle_notification_gc_count_ = gc_count_;
5856     number_idle_notifications_ = 0;
5857     finished = true;
5858   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5859     // If we have received more than kIdlesBeforeMarkCompact idle
5860     // notifications we do not perform any cleanup because we don't
5861     // expect to gain much by doing so.
5862     finished = true;
5863   }
5864
5865   if (uncommit) UncommitFromSpace();
5866
5867   return finished;
5868 }
5869
5870
5871 #ifdef DEBUG
5872
5873 void Heap::Print() {
5874   if (!HasBeenSetUp()) return;
5875   isolate()->PrintStack();
5876   AllSpaces spaces(this);
5877   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5878     space->Print();
5879   }
5880 }
5881
5882
5883 void Heap::ReportCodeStatistics(const char* title) {
5884   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5885   PagedSpace::ResetCodeStatistics();
5886   // We do not look for code in new space, map space, or old space.  If code
5887   // somehow ends up in those spaces, we would miss it here.
5888   code_space_->CollectCodeStatistics();
5889   lo_space_->CollectCodeStatistics();
5890   PagedSpace::ReportCodeStatistics();
5891 }
5892
5893
5894 // This function expects that NewSpace's allocated objects histogram is
5895 // populated (via a call to CollectStatistics or else as a side effect of a
5896 // just-completed scavenge collection).
5897 void Heap::ReportHeapStatistics(const char* title) {
5898   USE(title);
5899   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5900          title, gc_count_);
5901   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5902          old_gen_promotion_limit_);
5903   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5904          old_gen_allocation_limit_);
5905   PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5906
5907   PrintF("\n");
5908   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5909   isolate_->global_handles()->PrintStats();
5910   PrintF("\n");
5911
5912   PrintF("Heap statistics : ");
5913   isolate_->memory_allocator()->ReportStatistics();
5914   PrintF("To space : ");
5915   new_space_.ReportStatistics();
5916   PrintF("Old pointer space : ");
5917   old_pointer_space_->ReportStatistics();
5918   PrintF("Old data space : ");
5919   old_data_space_->ReportStatistics();
5920   PrintF("Code space : ");
5921   code_space_->ReportStatistics();
5922   PrintF("Map space : ");
5923   map_space_->ReportStatistics();
5924   PrintF("Cell space : ");
5925   cell_space_->ReportStatistics();
5926   PrintF("Large object space : ");
5927   lo_space_->ReportStatistics();
5928   PrintF(">>>>>> ========================================= >>>>>>\n");
5929 }
5930
5931 #endif  // DEBUG
5932
5933 bool Heap::Contains(HeapObject* value) {
5934   return Contains(value->address());
5935 }
5936
5937
5938 bool Heap::Contains(Address addr) {
5939   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5940   return HasBeenSetUp() &&
5941     (new_space_.ToSpaceContains(addr) ||
5942      old_pointer_space_->Contains(addr) ||
5943      old_data_space_->Contains(addr) ||
5944      code_space_->Contains(addr) ||
5945      map_space_->Contains(addr) ||
5946      cell_space_->Contains(addr) ||
5947      lo_space_->SlowContains(addr));
5948 }
5949
5950
5951 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5952   return InSpace(value->address(), space);
5953 }
5954
5955
5956 bool Heap::InSpace(Address addr, AllocationSpace space) {
5957   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5958   if (!HasBeenSetUp()) return false;
5959
5960   switch (space) {
5961     case NEW_SPACE:
5962       return new_space_.ToSpaceContains(addr);
5963     case OLD_POINTER_SPACE:
5964       return old_pointer_space_->Contains(addr);
5965     case OLD_DATA_SPACE:
5966       return old_data_space_->Contains(addr);
5967     case CODE_SPACE:
5968       return code_space_->Contains(addr);
5969     case MAP_SPACE:
5970       return map_space_->Contains(addr);
5971     case CELL_SPACE:
5972       return cell_space_->Contains(addr);
5973     case LO_SPACE:
5974       return lo_space_->SlowContains(addr);
5975   }
5976
5977   return false;
5978 }
5979
5980
5981 #ifdef VERIFY_HEAP
5982 void Heap::Verify() {
5983   CHECK(HasBeenSetUp());
5984
5985   store_buffer()->Verify();
5986
5987   VerifyPointersVisitor visitor;
5988   IterateRoots(&visitor, VISIT_ONLY_STRONG);
5989
5990   new_space_.Verify();
5991
5992   old_pointer_space_->Verify(&visitor);
5993   map_space_->Verify(&visitor);
5994
5995   VerifyPointersVisitor no_dirty_regions_visitor;
5996   old_data_space_->Verify(&no_dirty_regions_visitor);
5997   code_space_->Verify(&no_dirty_regions_visitor);
5998   cell_space_->Verify(&no_dirty_regions_visitor);
5999
6000   lo_space_->Verify();
6001 }
6002 #endif
6003
6004
6005 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6006   Object* result = NULL;
6007   Object* new_table;
6008   { MaybeObject* maybe_new_table =
6009         string_table()->LookupUtf8String(string, &result);
6010     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6011   }
6012   // Can't use set_string_table because StringTable::cast knows that
6013   // StringTable is a singleton and checks for identity.
6014   roots_[kStringTableRootIndex] = new_table;
6015   ASSERT(result != NULL);
6016   return result;
6017 }
6018
6019
6020 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6021   Object* result = NULL;
6022   Object* new_table;
6023   { MaybeObject* maybe_new_table =
6024         string_table()->LookupOneByteString(string, &result);
6025     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6026   }
6027   // Can't use set_string_table because StringTable::cast knows that
6028   // StringTable is a singleton and checks for identity.
6029   roots_[kStringTableRootIndex] = new_table;
6030   ASSERT(result != NULL);
6031   return result;
6032 }
6033
6034
6035 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6036                                      int from,
6037                                      int length) {
6038   Object* result = NULL;
6039   Object* new_table;
6040   { MaybeObject* maybe_new_table =
6041         string_table()->LookupSubStringOneByteString(string,
6042                                                    from,
6043                                                    length,
6044                                                    &result);
6045     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6046   }
6047   // Can't use set_string_table because StringTable::cast knows that
6048   // StringTable is a singleton and checks for identity.
6049   roots_[kStringTableRootIndex] = new_table;
6050   ASSERT(result != NULL);
6051   return result;
6052 }
6053
6054
6055 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6056   Object* result = NULL;
6057   Object* new_table;
6058   { MaybeObject* maybe_new_table =
6059         string_table()->LookupTwoByteString(string, &result);
6060     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6061   }
6062   // Can't use set_string_table because StringTable::cast knows that
6063   // StringTable is a singleton and checks for identity.
6064   roots_[kStringTableRootIndex] = new_table;
6065   ASSERT(result != NULL);
6066   return result;
6067 }
6068
6069
6070 MaybeObject* Heap::InternalizeString(String* string) {
6071   if (string->IsInternalizedString()) return string;
6072   Object* result = NULL;
6073   Object* new_table;
6074   { MaybeObject* maybe_new_table =
6075         string_table()->LookupString(string, &result);
6076     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6077   }
6078   // Can't use set_string_table because StringTable::cast knows that
6079   // StringTable is a singleton and checks for identity.
6080   roots_[kStringTableRootIndex] = new_table;
6081   ASSERT(result != NULL);
6082   return result;
6083 }
6084
6085
6086 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6087   if (string->IsInternalizedString()) {
6088     *result = string;
6089     return true;
6090   }
6091   return string_table()->LookupStringIfExists(string, result);
6092 }
6093
6094
6095 void Heap::ZapFromSpace() {
6096   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6097                           new_space_.FromSpaceEnd());
6098   while (it.has_next()) {
6099     NewSpacePage* page = it.next();
6100     for (Address cursor = page->area_start(), limit = page->area_end();
6101          cursor < limit;
6102          cursor += kPointerSize) {
6103       Memory::Address_at(cursor) = kFromSpaceZapValue;
6104     }
6105   }
6106 }
6107
6108
6109 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6110                                              Address end,
6111                                              ObjectSlotCallback callback) {
6112   Address slot_address = start;
6113
6114   // We are not collecting slots on new space objects during mutation
6115   // thus we have to scan for pointers to evacuation candidates when we
6116   // promote objects. But we should not record any slots in non-black
6117   // objects. Grey object's slots would be rescanned.
6118   // White object might not survive until the end of collection
6119   // it would be a violation of the invariant to record it's slots.
6120   bool record_slots = false;
6121   if (incremental_marking()->IsCompacting()) {
6122     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6123     record_slots = Marking::IsBlack(mark_bit);
6124   }
6125
6126   while (slot_address < end) {
6127     Object** slot = reinterpret_cast<Object**>(slot_address);
6128     Object* object = *slot;
6129     // If the store buffer becomes overfull we mark pages as being exempt from
6130     // the store buffer.  These pages are scanned to find pointers that point
6131     // to the new space.  In that case we may hit newly promoted objects and
6132     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6133     if (object->IsHeapObject()) {
6134       if (Heap::InFromSpace(object)) {
6135         callback(reinterpret_cast<HeapObject**>(slot),
6136                  HeapObject::cast(object));
6137         Object* new_object = *slot;
6138         if (InNewSpace(new_object)) {
6139           SLOW_ASSERT(Heap::InToSpace(new_object));
6140           SLOW_ASSERT(new_object->IsHeapObject());
6141           store_buffer_.EnterDirectlyIntoStoreBuffer(
6142               reinterpret_cast<Address>(slot));
6143         }
6144         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6145       } else if (record_slots &&
6146                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6147         mark_compact_collector()->RecordSlot(slot, slot, object);
6148       }
6149     }
6150     slot_address += kPointerSize;
6151   }
6152 }
6153
6154
6155 #ifdef DEBUG
6156 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6157
6158
6159 bool IsAMapPointerAddress(Object** addr) {
6160   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6161   int mod = a % Map::kSize;
6162   return mod >= Map::kPointerFieldsBeginOffset &&
6163          mod < Map::kPointerFieldsEndOffset;
6164 }
6165
6166
6167 bool EverythingsAPointer(Object** addr) {
6168   return true;
6169 }
6170
6171
6172 static void CheckStoreBuffer(Heap* heap,
6173                              Object** current,
6174                              Object** limit,
6175                              Object**** store_buffer_position,
6176                              Object*** store_buffer_top,
6177                              CheckStoreBufferFilter filter,
6178                              Address special_garbage_start,
6179                              Address special_garbage_end) {
6180   Map* free_space_map = heap->free_space_map();
6181   for ( ; current < limit; current++) {
6182     Object* o = *current;
6183     Address current_address = reinterpret_cast<Address>(current);
6184     // Skip free space.
6185     if (o == free_space_map) {
6186       Address current_address = reinterpret_cast<Address>(current);
6187       FreeSpace* free_space =
6188           FreeSpace::cast(HeapObject::FromAddress(current_address));
6189       int skip = free_space->Size();
6190       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6191       ASSERT(skip > 0);
6192       current_address += skip - kPointerSize;
6193       current = reinterpret_cast<Object**>(current_address);
6194       continue;
6195     }
6196     // Skip the current linear allocation space between top and limit which is
6197     // unmarked with the free space map, but can contain junk.
6198     if (current_address == special_garbage_start &&
6199         special_garbage_end != special_garbage_start) {
6200       current_address = special_garbage_end - kPointerSize;
6201       current = reinterpret_cast<Object**>(current_address);
6202       continue;
6203     }
6204     if (!(*filter)(current)) continue;
6205     ASSERT(current_address < special_garbage_start ||
6206            current_address >= special_garbage_end);
6207     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6208     // We have to check that the pointer does not point into new space
6209     // without trying to cast it to a heap object since the hash field of
6210     // a string can contain values like 1 and 3 which are tagged null
6211     // pointers.
6212     if (!heap->InNewSpace(o)) continue;
6213     while (**store_buffer_position < current &&
6214            *store_buffer_position < store_buffer_top) {
6215       (*store_buffer_position)++;
6216     }
6217     if (**store_buffer_position != current ||
6218         *store_buffer_position == store_buffer_top) {
6219       Object** obj_start = current;
6220       while (!(*obj_start)->IsMap()) obj_start--;
6221       UNREACHABLE();
6222     }
6223   }
6224 }
6225
6226
6227 // Check that the store buffer contains all intergenerational pointers by
6228 // scanning a page and ensuring that all pointers to young space are in the
6229 // store buffer.
6230 void Heap::OldPointerSpaceCheckStoreBuffer() {
6231   OldSpace* space = old_pointer_space();
6232   PageIterator pages(space);
6233
6234   store_buffer()->SortUniq();
6235
6236   while (pages.has_next()) {
6237     Page* page = pages.next();
6238     Object** current = reinterpret_cast<Object**>(page->area_start());
6239
6240     Address end = page->area_end();
6241
6242     Object*** store_buffer_position = store_buffer()->Start();
6243     Object*** store_buffer_top = store_buffer()->Top();
6244
6245     Object** limit = reinterpret_cast<Object**>(end);
6246     CheckStoreBuffer(this,
6247                      current,
6248                      limit,
6249                      &store_buffer_position,
6250                      store_buffer_top,
6251                      &EverythingsAPointer,
6252                      space->top(),
6253                      space->limit());
6254   }
6255 }
6256
6257
6258 void Heap::MapSpaceCheckStoreBuffer() {
6259   MapSpace* space = map_space();
6260   PageIterator pages(space);
6261
6262   store_buffer()->SortUniq();
6263
6264   while (pages.has_next()) {
6265     Page* page = pages.next();
6266     Object** current = reinterpret_cast<Object**>(page->area_start());
6267
6268     Address end = page->area_end();
6269
6270     Object*** store_buffer_position = store_buffer()->Start();
6271     Object*** store_buffer_top = store_buffer()->Top();
6272
6273     Object** limit = reinterpret_cast<Object**>(end);
6274     CheckStoreBuffer(this,
6275                      current,
6276                      limit,
6277                      &store_buffer_position,
6278                      store_buffer_top,
6279                      &IsAMapPointerAddress,
6280                      space->top(),
6281                      space->limit());
6282   }
6283 }
6284
6285
6286 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6287   LargeObjectIterator it(lo_space());
6288   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6289     // We only have code, sequential strings, or fixed arrays in large
6290     // object space, and only fixed arrays can possibly contain pointers to
6291     // the young generation.
6292     if (object->IsFixedArray()) {
6293       Object*** store_buffer_position = store_buffer()->Start();
6294       Object*** store_buffer_top = store_buffer()->Top();
6295       Object** current = reinterpret_cast<Object**>(object->address());
6296       Object** limit =
6297           reinterpret_cast<Object**>(object->address() + object->Size());
6298       CheckStoreBuffer(this,
6299                        current,
6300                        limit,
6301                        &store_buffer_position,
6302                        store_buffer_top,
6303                        &EverythingsAPointer,
6304                        NULL,
6305                        NULL);
6306     }
6307   }
6308 }
6309 #endif
6310
6311
6312 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6313   IterateStrongRoots(v, mode);
6314   IterateWeakRoots(v, mode);
6315 }
6316
6317
6318 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6319   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6320   v->Synchronize(VisitorSynchronization::kStringTable);
6321   if (mode != VISIT_ALL_IN_SCAVENGE &&
6322       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6323     // Scavenge collections have special processing for this.
6324     external_string_table_.Iterate(v);
6325     error_object_list_.Iterate(v);
6326   }
6327   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6328 }
6329
6330
6331 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6332   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6333   v->Synchronize(VisitorSynchronization::kStrongRootList);
6334
6335   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6336   v->Synchronize(VisitorSynchronization::kInternalizedString);
6337
6338   isolate_->bootstrapper()->Iterate(v);
6339   v->Synchronize(VisitorSynchronization::kBootstrapper);
6340   isolate_->Iterate(v);
6341   v->Synchronize(VisitorSynchronization::kTop);
6342   Relocatable::Iterate(v);
6343   v->Synchronize(VisitorSynchronization::kRelocatable);
6344
6345 #ifdef ENABLE_DEBUGGER_SUPPORT
6346   isolate_->debug()->Iterate(v);
6347   if (isolate_->deoptimizer_data() != NULL) {
6348     isolate_->deoptimizer_data()->Iterate(v);
6349   }
6350 #endif
6351   v->Synchronize(VisitorSynchronization::kDebug);
6352   isolate_->compilation_cache()->Iterate(v);
6353   v->Synchronize(VisitorSynchronization::kCompilationCache);
6354
6355   // Iterate over local handles in handle scopes.
6356   isolate_->handle_scope_implementer()->Iterate(v);
6357   isolate_->IterateDeferredHandles(v);
6358   v->Synchronize(VisitorSynchronization::kHandleScope);
6359
6360   // Iterate over the builtin code objects and code stubs in the
6361   // heap. Note that it is not necessary to iterate over code objects
6362   // on scavenge collections.
6363   if (mode != VISIT_ALL_IN_SCAVENGE) {
6364     isolate_->builtins()->IterateBuiltins(v);
6365   }
6366   v->Synchronize(VisitorSynchronization::kBuiltins);
6367
6368   // Iterate over global handles.
6369   switch (mode) {
6370     case VISIT_ONLY_STRONG:
6371       isolate_->global_handles()->IterateStrongRoots(v);
6372       break;
6373     case VISIT_ALL_IN_SCAVENGE:
6374       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6375       break;
6376     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6377     case VISIT_ALL:
6378       isolate_->global_handles()->IterateAllRoots(v);
6379       break;
6380   }
6381   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6382
6383   // Iterate over pointers being held by inactive threads.
6384   isolate_->thread_manager()->Iterate(v);
6385   v->Synchronize(VisitorSynchronization::kThreadManager);
6386
6387   // Iterate over the pointers the Serialization/Deserialization code is
6388   // holding.
6389   // During garbage collection this keeps the partial snapshot cache alive.
6390   // During deserialization of the startup snapshot this creates the partial
6391   // snapshot cache and deserializes the objects it refers to.  During
6392   // serialization this does nothing, since the partial snapshot cache is
6393   // empty.  However the next thing we do is create the partial snapshot,
6394   // filling up the partial snapshot cache with objects it needs as we go.
6395   SerializerDeserializer::Iterate(v);
6396   // We don't do a v->Synchronize call here, because in debug mode that will
6397   // output a flag to the snapshot.  However at this point the serializer and
6398   // deserializer are deliberately a little unsynchronized (see above) so the
6399   // checking of the sync flag in the snapshot would fail.
6400 }
6401
6402
6403 // TODO(1236194): Since the heap size is configurable on the command line
6404 // and through the API, we should gracefully handle the case that the heap
6405 // size is not big enough to fit all the initial objects.
6406 bool Heap::ConfigureHeap(int max_semispace_size,
6407                          intptr_t max_old_gen_size,
6408                          intptr_t max_executable_size) {
6409   if (HasBeenSetUp()) return false;
6410
6411   if (FLAG_stress_compaction) {
6412     // This will cause more frequent GCs when stressing.
6413     max_semispace_size_ = Page::kPageSize;
6414   }
6415
6416   if (max_semispace_size > 0) {
6417     if (max_semispace_size < Page::kPageSize) {
6418       max_semispace_size = Page::kPageSize;
6419       if (FLAG_trace_gc) {
6420         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6421                  Page::kPageSize >> 10);
6422       }
6423     }
6424     max_semispace_size_ = max_semispace_size;
6425   }
6426
6427   if (Snapshot::IsEnabled()) {
6428     // If we are using a snapshot we always reserve the default amount
6429     // of memory for each semispace because code in the snapshot has
6430     // write-barrier code that relies on the size and alignment of new
6431     // space.  We therefore cannot use a larger max semispace size
6432     // than the default reserved semispace size.
6433     if (max_semispace_size_ > reserved_semispace_size_) {
6434       max_semispace_size_ = reserved_semispace_size_;
6435       if (FLAG_trace_gc) {
6436         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6437                  reserved_semispace_size_ >> 10);
6438       }
6439     }
6440   } else {
6441     // If we are not using snapshots we reserve space for the actual
6442     // max semispace size.
6443     reserved_semispace_size_ = max_semispace_size_;
6444   }
6445
6446   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6447   if (max_executable_size > 0) {
6448     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6449   }
6450
6451   // The max executable size must be less than or equal to the max old
6452   // generation size.
6453   if (max_executable_size_ > max_old_generation_size_) {
6454     max_executable_size_ = max_old_generation_size_;
6455   }
6456
6457   // The new space size must be a power of two to support single-bit testing
6458   // for containment.
6459   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6460   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6461   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6462   external_allocation_limit_ = 16 * max_semispace_size_;
6463
6464   // The old generation is paged and needs at least one page for each space.
6465   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6466   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6467                                                        Page::kPageSize),
6468                                  RoundUp(max_old_generation_size_,
6469                                          Page::kPageSize));
6470
6471   configured_ = true;
6472   return true;
6473 }
6474
6475
6476 bool Heap::ConfigureHeapDefault() {
6477   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6478                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6479                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6480 }
6481
6482
6483 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6484   *stats->start_marker = HeapStats::kStartMarker;
6485   *stats->end_marker = HeapStats::kEndMarker;
6486   *stats->new_space_size = new_space_.SizeAsInt();
6487   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6488   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6489   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6490   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6491   *stats->old_data_space_capacity = old_data_space_->Capacity();
6492   *stats->code_space_size = code_space_->SizeOfObjects();
6493   *stats->code_space_capacity = code_space_->Capacity();
6494   *stats->map_space_size = map_space_->SizeOfObjects();
6495   *stats->map_space_capacity = map_space_->Capacity();
6496   *stats->cell_space_size = cell_space_->SizeOfObjects();
6497   *stats->cell_space_capacity = cell_space_->Capacity();
6498   *stats->lo_space_size = lo_space_->Size();
6499   isolate_->global_handles()->RecordStats(stats);
6500   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6501   *stats->memory_allocator_capacity =
6502       isolate()->memory_allocator()->Size() +
6503       isolate()->memory_allocator()->Available();
6504   *stats->os_error = OS::GetLastError();
6505       isolate()->memory_allocator()->Available();
6506   if (take_snapshot) {
6507     HeapIterator iterator(this);
6508     for (HeapObject* obj = iterator.next();
6509          obj != NULL;
6510          obj = iterator.next()) {
6511       InstanceType type = obj->map()->instance_type();
6512       ASSERT(0 <= type && type <= LAST_TYPE);
6513       stats->objects_per_type[type]++;
6514       stats->size_per_type[type] += obj->Size();
6515     }
6516   }
6517 }
6518
6519
6520 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6521   return old_pointer_space_->SizeOfObjects()
6522       + old_data_space_->SizeOfObjects()
6523       + code_space_->SizeOfObjects()
6524       + map_space_->SizeOfObjects()
6525       + cell_space_->SizeOfObjects()
6526       + lo_space_->SizeOfObjects();
6527 }
6528
6529
6530 intptr_t Heap::PromotedExternalMemorySize() {
6531   if (amount_of_external_allocated_memory_
6532       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6533   return amount_of_external_allocated_memory_
6534       - amount_of_external_allocated_memory_at_last_global_gc_;
6535 }
6536
6537
6538 V8_DECLARE_ONCE(initialize_gc_once);
6539
6540 static void InitializeGCOnce() {
6541   InitializeScavengingVisitorsTables();
6542   NewSpaceScavenger::Initialize();
6543   MarkCompactCollector::Initialize();
6544 }
6545
6546 bool Heap::SetUp() {
6547 #ifdef DEBUG
6548   allocation_timeout_ = FLAG_gc_interval;
6549 #endif
6550
6551   // Initialize heap spaces and initial maps and objects. Whenever something
6552   // goes wrong, just return false. The caller should check the results and
6553   // call Heap::TearDown() to release allocated memory.
6554   //
6555   // If the heap is not yet configured (e.g. through the API), configure it.
6556   // Configuration is based on the flags new-space-size (really the semispace
6557   // size) and old-space-size if set or the initial values of semispace_size_
6558   // and old_generation_size_ otherwise.
6559   if (!configured_) {
6560     if (!ConfigureHeapDefault()) return false;
6561   }
6562
6563   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6564
6565   MarkMapPointersAsEncoded(false);
6566
6567   // Set up memory allocator.
6568   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6569       return false;
6570
6571   // Set up new space.
6572   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6573     return false;
6574   }
6575
6576   // Initialize old pointer space.
6577   old_pointer_space_ =
6578       new OldSpace(this,
6579                    max_old_generation_size_,
6580                    OLD_POINTER_SPACE,
6581                    NOT_EXECUTABLE);
6582   if (old_pointer_space_ == NULL) return false;
6583   if (!old_pointer_space_->SetUp()) return false;
6584
6585   // Initialize old data space.
6586   old_data_space_ =
6587       new OldSpace(this,
6588                    max_old_generation_size_,
6589                    OLD_DATA_SPACE,
6590                    NOT_EXECUTABLE);
6591   if (old_data_space_ == NULL) return false;
6592   if (!old_data_space_->SetUp()) return false;
6593
6594   // Initialize the code space, set its maximum capacity to the old
6595   // generation size. It needs executable memory.
6596   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6597   // virtual address space, so that they can call each other with near calls.
6598   if (code_range_size_ > 0) {
6599     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6600       return false;
6601     }
6602   }
6603
6604   code_space_ =
6605       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6606   if (code_space_ == NULL) return false;
6607   if (!code_space_->SetUp()) return false;
6608
6609   // Initialize map space.
6610   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6611   if (map_space_ == NULL) return false;
6612   if (!map_space_->SetUp()) return false;
6613
6614   // Initialize global property cell space.
6615   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6616   if (cell_space_ == NULL) return false;
6617   if (!cell_space_->SetUp()) return false;
6618
6619   // The large object code space may contain code or data.  We set the memory
6620   // to be non-executable here for safety, but this means we need to enable it
6621   // explicitly when allocating large code objects.
6622   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6623   if (lo_space_ == NULL) return false;
6624   if (!lo_space_->SetUp()) return false;
6625
6626   // Set up the seed that is used to randomize the string hash function.
6627   ASSERT(hash_seed() == 0);
6628   if (FLAG_randomize_hashes) {
6629     if (FLAG_hash_seed == 0) {
6630       set_hash_seed(
6631           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6632     } else {
6633       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6634     }
6635   }
6636
6637   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6638   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6639
6640   store_buffer()->SetUp();
6641
6642   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6643 #ifdef DEBUG
6644   relocation_mutex_locked_by_optimizer_thread_ = false;
6645 #endif  // DEBUG
6646
6647   return true;
6648 }
6649
6650 bool Heap::CreateHeapObjects() {
6651   // Create initial maps.
6652   if (!CreateInitialMaps()) return false;
6653   if (!CreateApiObjects()) return false;
6654
6655   // Create initial objects
6656   if (!CreateInitialObjects()) return false;
6657
6658   native_contexts_list_ = undefined_value();
6659   return true;
6660 }
6661
6662
6663 void Heap::SetStackLimits() {
6664   ASSERT(isolate_ != NULL);
6665   ASSERT(isolate_ == isolate());
6666   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6667   // something that looks like an out of range Smi to the GC.
6668
6669   // Set up the special root array entries containing the stack limits.
6670   // These are actually addresses, but the tag makes the GC ignore it.
6671   roots_[kStackLimitRootIndex] =
6672       reinterpret_cast<Object*>(
6673           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6674   roots_[kRealStackLimitRootIndex] =
6675       reinterpret_cast<Object*>(
6676           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6677 }
6678
6679
6680 void Heap::TearDown() {
6681 #ifdef VERIFY_HEAP
6682   if (FLAG_verify_heap) {
6683     Verify();
6684   }
6685 #endif
6686
6687   if (FLAG_print_cumulative_gc_stat) {
6688     PrintF("\n");
6689     PrintF("gc_count=%d ", gc_count_);
6690     PrintF("mark_sweep_count=%d ", ms_count_);
6691     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6692     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6693     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6694     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6695            get_max_alive_after_gc());
6696     PrintF("total_marking_time=%.1f ", marking_time());
6697     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6698     PrintF("\n\n");
6699   }
6700
6701   isolate_->global_handles()->TearDown();
6702
6703   external_string_table_.TearDown();
6704
6705   error_object_list_.TearDown();
6706
6707   new_space_.TearDown();
6708
6709   if (old_pointer_space_ != NULL) {
6710     old_pointer_space_->TearDown();
6711     delete old_pointer_space_;
6712     old_pointer_space_ = NULL;
6713   }
6714
6715   if (old_data_space_ != NULL) {
6716     old_data_space_->TearDown();
6717     delete old_data_space_;
6718     old_data_space_ = NULL;
6719   }
6720
6721   if (code_space_ != NULL) {
6722     code_space_->TearDown();
6723     delete code_space_;
6724     code_space_ = NULL;
6725   }
6726
6727   if (map_space_ != NULL) {
6728     map_space_->TearDown();
6729     delete map_space_;
6730     map_space_ = NULL;
6731   }
6732
6733   if (cell_space_ != NULL) {
6734     cell_space_->TearDown();
6735     delete cell_space_;
6736     cell_space_ = NULL;
6737   }
6738
6739   if (lo_space_ != NULL) {
6740     lo_space_->TearDown();
6741     delete lo_space_;
6742     lo_space_ = NULL;
6743   }
6744
6745   store_buffer()->TearDown();
6746   incremental_marking()->TearDown();
6747
6748   isolate_->memory_allocator()->TearDown();
6749
6750   delete relocation_mutex_;
6751 }
6752
6753
6754 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6755   ASSERT(callback != NULL);
6756   GCPrologueCallbackPair pair(callback, gc_type);
6757   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6758   return gc_prologue_callbacks_.Add(pair);
6759 }
6760
6761
6762 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6763   ASSERT(callback != NULL);
6764   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6765     if (gc_prologue_callbacks_[i].callback == callback) {
6766       gc_prologue_callbacks_.Remove(i);
6767       return;
6768     }
6769   }
6770   UNREACHABLE();
6771 }
6772
6773
6774 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6775   ASSERT(callback != NULL);
6776   GCEpilogueCallbackPair pair(callback, gc_type);
6777   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6778   return gc_epilogue_callbacks_.Add(pair);
6779 }
6780
6781
6782 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6783   ASSERT(callback != NULL);
6784   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6785     if (gc_epilogue_callbacks_[i].callback == callback) {
6786       gc_epilogue_callbacks_.Remove(i);
6787       return;
6788     }
6789   }
6790   UNREACHABLE();
6791 }
6792
6793
6794 #ifdef DEBUG
6795
6796 class PrintHandleVisitor: public ObjectVisitor {
6797  public:
6798   void VisitPointers(Object** start, Object** end) {
6799     for (Object** p = start; p < end; p++)
6800       PrintF("  handle %p to %p\n",
6801              reinterpret_cast<void*>(p),
6802              reinterpret_cast<void*>(*p));
6803   }
6804 };
6805
6806 void Heap::PrintHandles() {
6807   PrintF("Handles:\n");
6808   PrintHandleVisitor v;
6809   isolate_->handle_scope_implementer()->Iterate(&v);
6810 }
6811
6812 #endif
6813
6814
6815 Space* AllSpaces::next() {
6816   switch (counter_++) {
6817     case NEW_SPACE:
6818       return heap_->new_space();
6819     case OLD_POINTER_SPACE:
6820       return heap_->old_pointer_space();
6821     case OLD_DATA_SPACE:
6822       return heap_->old_data_space();
6823     case CODE_SPACE:
6824       return heap_->code_space();
6825     case MAP_SPACE:
6826       return heap_->map_space();
6827     case CELL_SPACE:
6828       return heap_->cell_space();
6829     case LO_SPACE:
6830       return heap_->lo_space();
6831     default:
6832       return NULL;
6833   }
6834 }
6835
6836
6837 PagedSpace* PagedSpaces::next() {
6838   switch (counter_++) {
6839     case OLD_POINTER_SPACE:
6840       return heap_->old_pointer_space();
6841     case OLD_DATA_SPACE:
6842       return heap_->old_data_space();
6843     case CODE_SPACE:
6844       return heap_->code_space();
6845     case MAP_SPACE:
6846       return heap_->map_space();
6847     case CELL_SPACE:
6848       return heap_->cell_space();
6849     default:
6850       return NULL;
6851   }
6852 }
6853
6854
6855
6856 OldSpace* OldSpaces::next() {
6857   switch (counter_++) {
6858     case OLD_POINTER_SPACE:
6859       return heap_->old_pointer_space();
6860     case OLD_DATA_SPACE:
6861       return heap_->old_data_space();
6862     case CODE_SPACE:
6863       return heap_->code_space();
6864     default:
6865       return NULL;
6866   }
6867 }
6868
6869
6870 SpaceIterator::SpaceIterator(Heap* heap)
6871     : heap_(heap),
6872       current_space_(FIRST_SPACE),
6873       iterator_(NULL),
6874       size_func_(NULL) {
6875 }
6876
6877
6878 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6879     : heap_(heap),
6880       current_space_(FIRST_SPACE),
6881       iterator_(NULL),
6882       size_func_(size_func) {
6883 }
6884
6885
6886 SpaceIterator::~SpaceIterator() {
6887   // Delete active iterator if any.
6888   delete iterator_;
6889 }
6890
6891
6892 bool SpaceIterator::has_next() {
6893   // Iterate until no more spaces.
6894   return current_space_ != LAST_SPACE;
6895 }
6896
6897
6898 ObjectIterator* SpaceIterator::next() {
6899   if (iterator_ != NULL) {
6900     delete iterator_;
6901     iterator_ = NULL;
6902     // Move to the next space
6903     current_space_++;
6904     if (current_space_ > LAST_SPACE) {
6905       return NULL;
6906     }
6907   }
6908
6909   // Return iterator for the new current space.
6910   return CreateIterator();
6911 }
6912
6913
6914 // Create an iterator for the space to iterate.
6915 ObjectIterator* SpaceIterator::CreateIterator() {
6916   ASSERT(iterator_ == NULL);
6917
6918   switch (current_space_) {
6919     case NEW_SPACE:
6920       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6921       break;
6922     case OLD_POINTER_SPACE:
6923       iterator_ =
6924           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6925       break;
6926     case OLD_DATA_SPACE:
6927       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6928       break;
6929     case CODE_SPACE:
6930       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6931       break;
6932     case MAP_SPACE:
6933       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6934       break;
6935     case CELL_SPACE:
6936       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6937       break;
6938     case LO_SPACE:
6939       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6940       break;
6941   }
6942
6943   // Return the newly allocated iterator;
6944   ASSERT(iterator_ != NULL);
6945   return iterator_;
6946 }
6947
6948
6949 class HeapObjectsFilter {
6950  public:
6951   virtual ~HeapObjectsFilter() {}
6952   virtual bool SkipObject(HeapObject* object) = 0;
6953 };
6954
6955
6956 class UnreachableObjectsFilter : public HeapObjectsFilter {
6957  public:
6958   UnreachableObjectsFilter() {
6959     MarkReachableObjects();
6960   }
6961
6962   ~UnreachableObjectsFilter() {
6963     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6964   }
6965
6966   bool SkipObject(HeapObject* object) {
6967     MarkBit mark_bit = Marking::MarkBitFrom(object);
6968     return !mark_bit.Get();
6969   }
6970
6971  private:
6972   class MarkingVisitor : public ObjectVisitor {
6973    public:
6974     MarkingVisitor() : marking_stack_(10) {}
6975
6976     void VisitPointers(Object** start, Object** end) {
6977       for (Object** p = start; p < end; p++) {
6978         if (!(*p)->IsHeapObject()) continue;
6979         HeapObject* obj = HeapObject::cast(*p);
6980         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6981         if (!mark_bit.Get()) {
6982           mark_bit.Set();
6983           marking_stack_.Add(obj);
6984         }
6985       }
6986     }
6987
6988     void TransitiveClosure() {
6989       while (!marking_stack_.is_empty()) {
6990         HeapObject* obj = marking_stack_.RemoveLast();
6991         obj->Iterate(this);
6992       }
6993     }
6994
6995    private:
6996     List<HeapObject*> marking_stack_;
6997   };
6998
6999   void MarkReachableObjects() {
7000     Heap* heap = Isolate::Current()->heap();
7001     MarkingVisitor visitor;
7002     heap->IterateRoots(&visitor, VISIT_ALL);
7003     visitor.TransitiveClosure();
7004   }
7005
7006   AssertNoAllocation no_alloc;
7007 };
7008
7009
7010 HeapIterator::HeapIterator(Heap* heap)
7011     : heap_(heap),
7012       filtering_(HeapIterator::kNoFiltering),
7013       filter_(NULL) {
7014   Init();
7015 }
7016
7017
7018 HeapIterator::HeapIterator(Heap* heap,
7019                            HeapIterator::HeapObjectsFiltering filtering)
7020     : heap_(heap),
7021       filtering_(filtering),
7022       filter_(NULL) {
7023   Init();
7024 }
7025
7026
7027 HeapIterator::~HeapIterator() {
7028   Shutdown();
7029 }
7030
7031
7032 void HeapIterator::Init() {
7033   // Start the iteration.
7034   space_iterator_ = new SpaceIterator(heap_);
7035   switch (filtering_) {
7036     case kFilterUnreachable:
7037       filter_ = new UnreachableObjectsFilter;
7038       break;
7039     default:
7040       break;
7041   }
7042   object_iterator_ = space_iterator_->next();
7043 }
7044
7045
7046 void HeapIterator::Shutdown() {
7047 #ifdef DEBUG
7048   // Assert that in filtering mode we have iterated through all
7049   // objects. Otherwise, heap will be left in an inconsistent state.
7050   if (filtering_ != kNoFiltering) {
7051     ASSERT(object_iterator_ == NULL);
7052   }
7053 #endif
7054   // Make sure the last iterator is deallocated.
7055   delete space_iterator_;
7056   space_iterator_ = NULL;
7057   object_iterator_ = NULL;
7058   delete filter_;
7059   filter_ = NULL;
7060 }
7061
7062
7063 HeapObject* HeapIterator::next() {
7064   if (filter_ == NULL) return NextObject();
7065
7066   HeapObject* obj = NextObject();
7067   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7068   return obj;
7069 }
7070
7071
7072 HeapObject* HeapIterator::NextObject() {
7073   // No iterator means we are done.
7074   if (object_iterator_ == NULL) return NULL;
7075
7076   if (HeapObject* obj = object_iterator_->next_object()) {
7077     // If the current iterator has more objects we are fine.
7078     return obj;
7079   } else {
7080     // Go though the spaces looking for one that has objects.
7081     while (space_iterator_->has_next()) {
7082       object_iterator_ = space_iterator_->next();
7083       if (HeapObject* obj = object_iterator_->next_object()) {
7084         return obj;
7085       }
7086     }
7087   }
7088   // Done with the last space.
7089   object_iterator_ = NULL;
7090   return NULL;
7091 }
7092
7093
7094 void HeapIterator::reset() {
7095   // Restart the iterator.
7096   Shutdown();
7097   Init();
7098 }
7099
7100
7101 #ifdef DEBUG
7102
7103 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7104
7105 class PathTracer::MarkVisitor: public ObjectVisitor {
7106  public:
7107   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7108   void VisitPointers(Object** start, Object** end) {
7109     // Scan all HeapObject pointers in [start, end)
7110     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7111       if ((*p)->IsHeapObject())
7112         tracer_->MarkRecursively(p, this);
7113     }
7114   }
7115
7116  private:
7117   PathTracer* tracer_;
7118 };
7119
7120
7121 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7122  public:
7123   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7124   void VisitPointers(Object** start, Object** end) {
7125     // Scan all HeapObject pointers in [start, end)
7126     for (Object** p = start; p < end; p++) {
7127       if ((*p)->IsHeapObject())
7128         tracer_->UnmarkRecursively(p, this);
7129     }
7130   }
7131
7132  private:
7133   PathTracer* tracer_;
7134 };
7135
7136
7137 void PathTracer::VisitPointers(Object** start, Object** end) {
7138   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7139   // Visit all HeapObject pointers in [start, end)
7140   for (Object** p = start; !done && (p < end); p++) {
7141     if ((*p)->IsHeapObject()) {
7142       TracePathFrom(p);
7143       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7144     }
7145   }
7146 }
7147
7148
7149 void PathTracer::Reset() {
7150   found_target_ = false;
7151   object_stack_.Clear();
7152 }
7153
7154
7155 void PathTracer::TracePathFrom(Object** root) {
7156   ASSERT((search_target_ == kAnyGlobalObject) ||
7157          search_target_->IsHeapObject());
7158   found_target_in_trace_ = false;
7159   Reset();
7160
7161   MarkVisitor mark_visitor(this);
7162   MarkRecursively(root, &mark_visitor);
7163
7164   UnmarkVisitor unmark_visitor(this);
7165   UnmarkRecursively(root, &unmark_visitor);
7166
7167   ProcessResults();
7168 }
7169
7170
7171 static bool SafeIsNativeContext(HeapObject* obj) {
7172   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7173 }
7174
7175
7176 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7177   if (!(*p)->IsHeapObject()) return;
7178
7179   HeapObject* obj = HeapObject::cast(*p);
7180
7181   Object* map = obj->map();
7182
7183   if (!map->IsHeapObject()) return;  // visited before
7184
7185   if (found_target_in_trace_) return;  // stop if target found
7186   object_stack_.Add(obj);
7187   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7188       (obj == search_target_)) {
7189     found_target_in_trace_ = true;
7190     found_target_ = true;
7191     return;
7192   }
7193
7194   bool is_native_context = SafeIsNativeContext(obj);
7195
7196   // not visited yet
7197   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7198
7199   Address map_addr = map_p->address();
7200
7201   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7202
7203   // Scan the object body.
7204   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7205     // This is specialized to scan Context's properly.
7206     Object** start = reinterpret_cast<Object**>(obj->address() +
7207                                                 Context::kHeaderSize);
7208     Object** end = reinterpret_cast<Object**>(obj->address() +
7209         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7210     mark_visitor->VisitPointers(start, end);
7211   } else {
7212     obj->IterateBody(map_p->instance_type(),
7213                      obj->SizeFromMap(map_p),
7214                      mark_visitor);
7215   }
7216
7217   // Scan the map after the body because the body is a lot more interesting
7218   // when doing leak detection.
7219   MarkRecursively(&map, mark_visitor);
7220
7221   if (!found_target_in_trace_)  // don't pop if found the target
7222     object_stack_.RemoveLast();
7223 }
7224
7225
7226 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7227   if (!(*p)->IsHeapObject()) return;
7228
7229   HeapObject* obj = HeapObject::cast(*p);
7230
7231   Object* map = obj->map();
7232
7233   if (map->IsHeapObject()) return;  // unmarked already
7234
7235   Address map_addr = reinterpret_cast<Address>(map);
7236
7237   map_addr -= kMarkTag;
7238
7239   ASSERT_TAG_ALIGNED(map_addr);
7240
7241   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7242
7243   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7244
7245   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7246
7247   obj->IterateBody(Map::cast(map_p)->instance_type(),
7248                    obj->SizeFromMap(Map::cast(map_p)),
7249                    unmark_visitor);
7250 }
7251
7252
7253 void PathTracer::ProcessResults() {
7254   if (found_target_) {
7255     PrintF("=====================================\n");
7256     PrintF("====        Path to object       ====\n");
7257     PrintF("=====================================\n\n");
7258
7259     ASSERT(!object_stack_.is_empty());
7260     for (int i = 0; i < object_stack_.length(); i++) {
7261       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7262       Object* obj = object_stack_[i];
7263       obj->Print();
7264     }
7265     PrintF("=====================================\n");
7266   }
7267 }
7268
7269
7270 // Triggers a depth-first traversal of reachable objects from one
7271 // given root object and finds a path to a specific heap object and
7272 // prints it.
7273 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7274   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7275   tracer.VisitPointer(&root);
7276 }
7277
7278
7279 // Triggers a depth-first traversal of reachable objects from roots
7280 // and finds a path to a specific heap object and prints it.
7281 void Heap::TracePathToObject(Object* target) {
7282   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7283   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7284 }
7285
7286
7287 // Triggers a depth-first traversal of reachable objects from roots
7288 // and finds a path to any global object and prints it. Useful for
7289 // determining the source for leaks of global objects.
7290 void Heap::TracePathToGlobal() {
7291   PathTracer tracer(PathTracer::kAnyGlobalObject,
7292                     PathTracer::FIND_ALL,
7293                     VISIT_ALL);
7294   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7295 }
7296 #endif
7297
7298
7299 static intptr_t CountTotalHolesSize(Heap* heap) {
7300   intptr_t holes_size = 0;
7301   OldSpaces spaces(heap);
7302   for (OldSpace* space = spaces.next();
7303        space != NULL;
7304        space = spaces.next()) {
7305     holes_size += space->Waste() + space->Available();
7306   }
7307   return holes_size;
7308 }
7309
7310
7311 GCTracer::GCTracer(Heap* heap,
7312                    const char* gc_reason,
7313                    const char* collector_reason)
7314     : start_time_(0.0),
7315       start_object_size_(0),
7316       start_memory_size_(0),
7317       gc_count_(0),
7318       full_gc_count_(0),
7319       allocated_since_last_gc_(0),
7320       spent_in_mutator_(0),
7321       promoted_objects_size_(0),
7322       nodes_died_in_new_space_(0),
7323       nodes_copied_in_new_space_(0),
7324       nodes_promoted_(0),
7325       heap_(heap),
7326       gc_reason_(gc_reason),
7327       collector_reason_(collector_reason) {
7328   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7329   start_time_ = OS::TimeCurrentMillis();
7330   start_object_size_ = heap_->SizeOfObjects();
7331   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7332
7333   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7334     scopes_[i] = 0;
7335   }
7336
7337   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7338
7339   allocated_since_last_gc_ =
7340       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7341
7342   if (heap_->last_gc_end_timestamp_ > 0) {
7343     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7344   }
7345
7346   steps_count_ = heap_->incremental_marking()->steps_count();
7347   steps_took_ = heap_->incremental_marking()->steps_took();
7348   longest_step_ = heap_->incremental_marking()->longest_step();
7349   steps_count_since_last_gc_ =
7350       heap_->incremental_marking()->steps_count_since_last_gc();
7351   steps_took_since_last_gc_ =
7352       heap_->incremental_marking()->steps_took_since_last_gc();
7353 }
7354
7355
7356 GCTracer::~GCTracer() {
7357   // Printf ONE line iff flag is set.
7358   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7359
7360   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7361
7362   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7363   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7364
7365   double time = heap_->last_gc_end_timestamp_ - start_time_;
7366
7367   // Update cumulative GC statistics if required.
7368   if (FLAG_print_cumulative_gc_stat) {
7369     heap_->total_gc_time_ms_ += time;
7370     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7371     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7372                                      heap_->alive_after_last_gc_);
7373     if (!first_gc) {
7374       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7375                                    spent_in_mutator_);
7376     }
7377   } else if (FLAG_trace_gc_verbose) {
7378     heap_->total_gc_time_ms_ += time;
7379   }
7380
7381   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7382
7383   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7384
7385   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7386   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7387
7388   if (!FLAG_trace_gc_nvp) {
7389     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7390
7391     double end_memory_size_mb =
7392         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7393
7394     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7395            CollectorString(),
7396            static_cast<double>(start_object_size_) / MB,
7397            static_cast<double>(start_memory_size_) / MB,
7398            SizeOfHeapObjects(),
7399            end_memory_size_mb);
7400
7401     if (external_time > 0) PrintF("%d / ", external_time);
7402     PrintF("%.1f ms", time);
7403     if (steps_count_ > 0) {
7404       if (collector_ == SCAVENGER) {
7405         PrintF(" (+ %.1f ms in %d steps since last GC)",
7406                steps_took_since_last_gc_,
7407                steps_count_since_last_gc_);
7408       } else {
7409         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7410                    "biggest step %.1f ms)",
7411                steps_took_,
7412                steps_count_,
7413                longest_step_);
7414       }
7415     }
7416
7417     if (gc_reason_ != NULL) {
7418       PrintF(" [%s]", gc_reason_);
7419     }
7420
7421     if (collector_reason_ != NULL) {
7422       PrintF(" [%s]", collector_reason_);
7423     }
7424
7425     PrintF(".\n");
7426   } else {
7427     PrintF("pause=%.1f ", time);
7428     PrintF("mutator=%.1f ", spent_in_mutator_);
7429     PrintF("gc=");
7430     switch (collector_) {
7431       case SCAVENGER:
7432         PrintF("s");
7433         break;
7434       case MARK_COMPACTOR:
7435         PrintF("ms");
7436         break;
7437       default:
7438         UNREACHABLE();
7439     }
7440     PrintF(" ");
7441
7442     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7443     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7444     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7445     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7446     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7447     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7448     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7449     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7450     PrintF("compaction_ptrs=%.1f ",
7451         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7452     PrintF("intracompaction_ptrs=%.1f ",
7453         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7454     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7455
7456     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7457     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7458     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7459            in_free_list_or_wasted_before_gc_);
7460     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7461
7462     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7463     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7464     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7465     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7466     PrintF("nodes_promoted=%d ", nodes_promoted_);
7467
7468     if (collector_ == SCAVENGER) {
7469       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7470       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7471     } else {
7472       PrintF("stepscount=%d ", steps_count_);
7473       PrintF("stepstook=%.1f ", steps_took_);
7474       PrintF("longeststep=%.1f ", longest_step_);
7475     }
7476
7477     PrintF("\n");
7478   }
7479
7480   heap_->PrintShortHeapStatistics();
7481 }
7482
7483
7484 const char* GCTracer::CollectorString() {
7485   switch (collector_) {
7486     case SCAVENGER:
7487       return "Scavenge";
7488     case MARK_COMPACTOR:
7489       return "Mark-sweep";
7490   }
7491   return "Unknown GC";
7492 }
7493
7494
7495 int KeyedLookupCache::Hash(Map* map, Name* name) {
7496   // Uses only lower 32 bits if pointers are larger.
7497   uintptr_t addr_hash =
7498       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7499   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7500 }
7501
7502
7503 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7504   int index = (Hash(map, name) & kHashMask);
7505   for (int i = 0; i < kEntriesPerBucket; i++) {
7506     Key& key = keys_[index + i];
7507     if ((key.map == map) && key.name->Equals(name)) {
7508       return field_offsets_[index + i];
7509     }
7510   }
7511   return kNotFound;
7512 }
7513
7514
7515 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7516   if (!name->IsUniqueName()) {
7517     String* internalized_string;
7518     if (!HEAP->InternalizeStringIfExists(
7519             String::cast(name), &internalized_string)) {
7520       return;
7521     }
7522     name = internalized_string;
7523   }
7524   // This cache is cleared only between mark compact passes, so we expect the
7525   // cache to only contain old space names.
7526   ASSERT(!HEAP->InNewSpace(name));
7527
7528   int index = (Hash(map, name) & kHashMask);
7529   // After a GC there will be free slots, so we use them in order (this may
7530   // help to get the most frequently used one in position 0).
7531   for (int i = 0; i< kEntriesPerBucket; i++) {
7532     Key& key = keys_[index];
7533     Object* free_entry_indicator = NULL;
7534     if (key.map == free_entry_indicator) {
7535       key.map = map;
7536       key.name = name;
7537       field_offsets_[index + i] = field_offset;
7538       return;
7539     }
7540   }
7541   // No free entry found in this bucket, so we move them all down one and
7542   // put the new entry at position zero.
7543   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7544     Key& key = keys_[index + i];
7545     Key& key2 = keys_[index + i - 1];
7546     key = key2;
7547     field_offsets_[index + i] = field_offsets_[index + i - 1];
7548   }
7549
7550   // Write the new first entry.
7551   Key& key = keys_[index];
7552   key.map = map;
7553   key.name = name;
7554   field_offsets_[index] = field_offset;
7555 }
7556
7557
7558 void KeyedLookupCache::Clear() {
7559   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7560 }
7561
7562
7563 void DescriptorLookupCache::Clear() {
7564   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7565 }
7566
7567
7568 #ifdef DEBUG
7569 void Heap::GarbageCollectionGreedyCheck() {
7570   ASSERT(FLAG_gc_greedy);
7571   if (isolate_->bootstrapper()->IsActive()) return;
7572   if (disallow_allocation_failure()) return;
7573   CollectGarbage(NEW_SPACE);
7574 }
7575 #endif
7576
7577
7578 TranscendentalCache::SubCache::SubCache(Type t)
7579   : type_(t),
7580     isolate_(Isolate::Current()) {
7581   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7582   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7583   for (int i = 0; i < kCacheSize; i++) {
7584     elements_[i].in[0] = in0;
7585     elements_[i].in[1] = in1;
7586     elements_[i].output = NULL;
7587   }
7588 }
7589
7590
7591 void TranscendentalCache::Clear() {
7592   for (int i = 0; i < kNumberOfCaches; i++) {
7593     if (caches_[i] != NULL) {
7594       delete caches_[i];
7595       caches_[i] = NULL;
7596     }
7597   }
7598 }
7599
7600
7601 void ExternalStringTable::CleanUp() {
7602   int last = 0;
7603   for (int i = 0; i < new_space_strings_.length(); ++i) {
7604     if (new_space_strings_[i] == heap_->the_hole_value()) {
7605       continue;
7606     }
7607     if (heap_->InNewSpace(new_space_strings_[i])) {
7608       new_space_strings_[last++] = new_space_strings_[i];
7609     } else {
7610       old_space_strings_.Add(new_space_strings_[i]);
7611     }
7612   }
7613   new_space_strings_.Rewind(last);
7614   new_space_strings_.Trim();
7615
7616   last = 0;
7617   for (int i = 0; i < old_space_strings_.length(); ++i) {
7618     if (old_space_strings_[i] == heap_->the_hole_value()) {
7619       continue;
7620     }
7621     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7622     old_space_strings_[last++] = old_space_strings_[i];
7623   }
7624   old_space_strings_.Rewind(last);
7625   old_space_strings_.Trim();
7626 #ifdef VERIFY_HEAP
7627   if (FLAG_verify_heap) {
7628     Verify();
7629   }
7630 #endif
7631 }
7632
7633
7634 void ExternalStringTable::TearDown() {
7635   new_space_strings_.Free();
7636   old_space_strings_.Free();
7637 }
7638
7639
7640 // Update all references.
7641 void ErrorObjectList::UpdateReferences() {
7642   for (int i = 0; i < list_.length(); i++) {
7643     HeapObject* object = HeapObject::cast(list_[i]);
7644     MapWord first_word = object->map_word();
7645     if (first_word.IsForwardingAddress()) {
7646       list_[i] = first_word.ToForwardingAddress();
7647     }
7648   }
7649 }
7650
7651
7652 // Unforwarded objects in new space are dead and removed from the list.
7653 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7654   if (list_.is_empty()) return;
7655   if (!nested_) {
7656     int write_index = 0;
7657     for (int i = 0; i < list_.length(); i++) {
7658       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7659       if (first_word.IsForwardingAddress()) {
7660         list_[write_index++] = first_word.ToForwardingAddress();
7661       }
7662     }
7663     list_.Rewind(write_index);
7664   } else {
7665     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7666     // objects in the list, just remove dead ones, as to not confuse the
7667     // loop in DeferredFormatStackTrace.
7668     for (int i = 0; i < list_.length(); i++) {
7669       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7670       list_[i] = first_word.IsForwardingAddress()
7671                      ? first_word.ToForwardingAddress()
7672                      : heap->the_hole_value();
7673     }
7674   }
7675 }
7676
7677
7678 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7679   // If formatting the stack trace causes a GC, this method will be
7680   // recursively called.  In that case, skip the recursive call, since
7681   // the loop modifies the list while iterating over it.
7682   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7683   nested_ = true;
7684   HandleScope scope(isolate);
7685   Handle<String> stack_key = isolate->factory()->stack_string();
7686   int write_index = 0;
7687   int budget = kBudgetPerGC;
7688   for (int i = 0; i < list_.length(); i++) {
7689     Object* object = list_[i];
7690     JSFunction* getter_fun;
7691
7692     { AssertNoAllocation assert;
7693       // Skip possible holes in the list.
7694       if (object->IsTheHole()) continue;
7695       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7696         list_[write_index++] = object;
7697         continue;
7698       }
7699
7700       // Check whether the stack property is backed by the original getter.
7701       LookupResult lookup(isolate);
7702       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7703       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7704       Object* callback = lookup.GetCallbackObject();
7705       if (!callback->IsAccessorPair()) continue;
7706       Object* getter_obj = AccessorPair::cast(callback)->getter();
7707       if (!getter_obj->IsJSFunction()) continue;
7708       getter_fun = JSFunction::cast(getter_obj);
7709       String* key = isolate->heap()->hidden_stack_trace_string();
7710       Object* value = getter_fun->GetHiddenProperty(key);
7711       if (key != value) continue;
7712     }
7713
7714     budget--;
7715     HandleScope scope(isolate);
7716     bool has_exception = false;
7717 #ifdef DEBUG
7718     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7719 #endif
7720     Handle<Object> object_handle(object, isolate);
7721     Handle<Object> getter_handle(getter_fun, isolate);
7722     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7723     ASSERT(*map == HeapObject::cast(*object_handle)->map());
7724     if (has_exception) {
7725       // Hit an exception (most likely a stack overflow).
7726       // Wrap up this pass and retry after another GC.
7727       isolate->clear_pending_exception();
7728       // We use the handle since calling the getter might have caused a GC.
7729       list_[write_index++] = *object_handle;
7730       budget = 0;
7731     }
7732   }
7733   list_.Rewind(write_index);
7734   list_.Trim();
7735   nested_ = false;
7736 }
7737
7738
7739 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7740   for (int i = 0; i < list_.length(); i++) {
7741     HeapObject* object = HeapObject::cast(list_[i]);
7742     if (!Marking::MarkBitFrom(object).Get()) {
7743       list_[i] = heap->the_hole_value();
7744     }
7745   }
7746 }
7747
7748
7749 void ErrorObjectList::TearDown() {
7750   list_.Free();
7751 }
7752
7753
7754 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7755   chunk->set_next_chunk(chunks_queued_for_free_);
7756   chunks_queued_for_free_ = chunk;
7757 }
7758
7759
7760 void Heap::FreeQueuedChunks() {
7761   if (chunks_queued_for_free_ == NULL) return;
7762   MemoryChunk* next;
7763   MemoryChunk* chunk;
7764   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7765     next = chunk->next_chunk();
7766     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7767
7768     if (chunk->owner()->identity() == LO_SPACE) {
7769       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7770       // If FromAnyPointerAddress encounters a slot that belongs to a large
7771       // chunk queued for deletion it will fail to find the chunk because
7772       // it try to perform a search in the list of pages owned by of the large
7773       // object space and queued chunks were detached from that list.
7774       // To work around this we split large chunk into normal kPageSize aligned
7775       // pieces and initialize size, owner and flags field of every piece.
7776       // If FromAnyPointerAddress encounters a slot that belongs to one of
7777       // these smaller pieces it will treat it as a slot on a normal Page.
7778       Address chunk_end = chunk->address() + chunk->size();
7779       MemoryChunk* inner = MemoryChunk::FromAddress(
7780           chunk->address() + Page::kPageSize);
7781       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7782       while (inner <= inner_last) {
7783         // Size of a large chunk is always a multiple of
7784         // OS::AllocateAlignment() so there is always
7785         // enough space for a fake MemoryChunk header.
7786         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7787         // Guard against overflow.
7788         if (area_end < inner->address()) area_end = chunk_end;
7789         inner->SetArea(inner->address(), area_end);
7790         inner->set_size(Page::kPageSize);
7791         inner->set_owner(lo_space());
7792         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7793         inner = MemoryChunk::FromAddress(
7794             inner->address() + Page::kPageSize);
7795       }
7796     }
7797   }
7798   isolate_->heap()->store_buffer()->Compact();
7799   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7800   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7801     next = chunk->next_chunk();
7802     isolate_->memory_allocator()->Free(chunk);
7803   }
7804   chunks_queued_for_free_ = NULL;
7805 }
7806
7807
7808 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7809   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7810   // Tag the page pointer to make it findable in the dump file.
7811   if (compacted) {
7812     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7813   } else {
7814     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7815   }
7816   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7817       reinterpret_cast<Address>(p);
7818   remembered_unmapped_pages_index_++;
7819   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7820 }
7821
7822
7823 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7824   memset(object_counts_, 0, sizeof(object_counts_));
7825   memset(object_sizes_, 0, sizeof(object_sizes_));
7826   if (clear_last_time_stats) {
7827     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7828     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7829   }
7830 }
7831
7832
7833 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7834
7835
7836 void Heap::CheckpointObjectStats() {
7837   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7838   Counters* counters = isolate()->counters();
7839 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7840   counters->count_of_##name()->Increment(                                      \
7841       static_cast<int>(object_counts_[name]));                                 \
7842   counters->count_of_##name()->Decrement(                                      \
7843       static_cast<int>(object_counts_last_time_[name]));                       \
7844   counters->size_of_##name()->Increment(                                       \
7845       static_cast<int>(object_sizes_[name]));                                  \
7846   counters->size_of_##name()->Decrement(                                       \
7847       static_cast<int>(object_sizes_last_time_[name]));
7848   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7849 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7850   int index;
7851 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7852   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7853   counters->count_of_CODE_TYPE_##name()->Increment(       \
7854       static_cast<int>(object_counts_[index]));           \
7855   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7856       static_cast<int>(object_counts_last_time_[index])); \
7857   counters->size_of_CODE_TYPE_##name()->Increment(        \
7858       static_cast<int>(object_sizes_[index]));            \
7859   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7860       static_cast<int>(object_sizes_last_time_[index]));
7861   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7862 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7863 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7864   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7865   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7866       static_cast<int>(object_counts_[index]));           \
7867   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7868       static_cast<int>(object_counts_last_time_[index])); \
7869   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7870       static_cast<int>(object_sizes_[index]));            \
7871   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7872       static_cast<int>(object_sizes_last_time_[index]));
7873   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7874 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7875
7876   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7877   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7878   ClearObjectStats();
7879 }
7880
7881
7882 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7883   if (FLAG_parallel_recompilation) {
7884     heap_->relocation_mutex_->Lock();
7885 #ifdef DEBUG
7886     heap_->relocation_mutex_locked_by_optimizer_thread_ =
7887         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7888 #endif  // DEBUG
7889   }
7890 }
7891
7892 } }  // namespace v8::internal