Merge remote-tracking branch 'origin/v0.10'
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "natives.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
44 #include "once.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
47 #include "snapshot.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "v8utils.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
55 #endif
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
59 #endif
60
61 namespace v8 {
62 namespace internal {
63
64
65 Heap::Heap()
66     : isolate_(NULL),
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71       code_range_size_(512*MB),
72 #else
73 #define LUMP_OF_MEMORY MB
74       code_range_size_(0),
75 #endif
76 #if defined(ANDROID)
77       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       initial_semispace_size_(Page::kPageSize),
80       max_old_generation_size_(192*MB),
81       max_executable_size_(max_old_generation_size_),
82 #else
83       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       initial_semispace_size_(Page::kPageSize),
86       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87       max_executable_size_(256l * LUMP_OF_MEMORY),
88 #endif
89
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94       survived_since_last_expansion_(0),
95       sweep_generation_(0),
96       always_allocate_scope_depth_(0),
97       linear_allocation_scope_depth_(0),
98       contexts_disposed_(0),
99       global_ic_age_(0),
100       flush_monomorphic_ics_(false),
101       scan_on_scavenge_pages_(0),
102       new_space_(this),
103       old_pointer_space_(NULL),
104       old_data_space_(NULL),
105       code_space_(NULL),
106       map_space_(NULL),
107       cell_space_(NULL),
108       lo_space_(NULL),
109       gc_state_(NOT_IN_GC),
110       gc_post_processing_depth_(0),
111       ms_count_(0),
112       gc_count_(0),
113       remembered_unmapped_pages_index_(0),
114       unflattened_strings_length_(0),
115 #ifdef DEBUG
116       allocation_allowed_(true),
117       allocation_timeout_(0),
118       disallow_allocation_failure_(false),
119 #endif  // DEBUG
120       new_space_high_promotion_mode_active_(false),
121       old_gen_promotion_limit_(kMinimumPromotionLimit),
122       old_gen_allocation_limit_(kMinimumAllocationLimit),
123       old_gen_limit_factor_(1),
124       size_of_old_gen_at_last_old_space_gc_(0),
125       external_allocation_limit_(0),
126       amount_of_external_allocated_memory_(0),
127       amount_of_external_allocated_memory_at_last_global_gc_(0),
128       old_gen_exhausted_(false),
129       store_buffer_rebuilder_(store_buffer()),
130       hidden_string_(NULL),
131       global_gc_prologue_callback_(NULL),
132       global_gc_epilogue_callback_(NULL),
133       gc_safe_size_of_old_object_(NULL),
134       total_regexp_code_generated_(0),
135       tracer_(NULL),
136       young_survivors_after_last_gc_(0),
137       high_survival_rate_period_length_(0),
138       low_survival_rate_period_length_(0),
139       survival_rate_(0),
140       previous_survival_rate_trend_(Heap::STABLE),
141       survival_rate_trend_(Heap::STABLE),
142       max_gc_pause_(0.0),
143       total_gc_time_ms_(0.0),
144       max_alive_after_gc_(0),
145       min_in_mutator_(kMaxInt),
146       alive_after_last_gc_(0),
147       last_gc_end_timestamp_(0.0),
148       marking_time_(0.0),
149       sweeping_time_(0.0),
150       store_buffer_(this),
151       marking_(this),
152       incremental_marking_(this),
153       number_idle_notifications_(0),
154       last_idle_notification_gc_count_(0),
155       last_idle_notification_gc_count_init_(false),
156       mark_sweeps_since_idle_round_started_(0),
157       ms_count_at_last_idle_notification_(0),
158       gc_count_at_last_idle_gc_(0),
159       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
160       gcs_since_last_deopt_(0),
161 #ifdef VERIFY_HEAP
162       no_weak_embedded_maps_verification_scope_depth_(0),
163 #endif
164       promotion_queue_(this),
165       configured_(false),
166       chunks_queued_for_free_(NULL),
167       relocation_mutex_(NULL) {
168   // Allow build-time customization of the max semispace size. Building
169   // V8 with snapshots and a non-default max semispace size is much
170   // easier if you can define it as part of the build environment.
171 #if defined(V8_MAX_SEMISPACE_SIZE)
172   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 #endif
174
175   intptr_t max_virtual = OS::MaxVirtualMemory();
176
177   if (max_virtual > 0) {
178     if (code_range_size_ > 0) {
179       // Reserve no more than 1/8 of the memory for the code range.
180       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
181     }
182   }
183
184   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
185   native_contexts_list_ = NULL;
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity();
205 }
206
207
208 intptr_t Heap::CommittedMemory() {
209   if (!HasBeenSetUp()) return 0;
210
211   return new_space_.CommittedMemory() +
212       old_pointer_space_->CommittedMemory() +
213       old_data_space_->CommittedMemory() +
214       code_space_->CommittedMemory() +
215       map_space_->CommittedMemory() +
216       cell_space_->CommittedMemory() +
217       lo_space_->Size();
218 }
219
220
221 size_t Heap::CommittedPhysicalMemory() {
222   if (!HasBeenSetUp()) return 0;
223
224   return new_space_.CommittedPhysicalMemory() +
225       old_pointer_space_->CommittedPhysicalMemory() +
226       old_data_space_->CommittedPhysicalMemory() +
227       code_space_->CommittedPhysicalMemory() +
228       map_space_->CommittedPhysicalMemory() +
229       cell_space_->CommittedPhysicalMemory() +
230       lo_space_->CommittedPhysicalMemory();
231 }
232
233
234 intptr_t Heap::CommittedMemoryExecutable() {
235   if (!HasBeenSetUp()) return 0;
236
237   return isolate()->memory_allocator()->SizeExecutable();
238 }
239
240
241 intptr_t Heap::Available() {
242   if (!HasBeenSetUp()) return 0;
243
244   return new_space_.Available() +
245       old_pointer_space_->Available() +
246       old_data_space_->Available() +
247       code_space_->Available() +
248       map_space_->Available() +
249       cell_space_->Available();
250 }
251
252
253 bool Heap::HasBeenSetUp() {
254   return old_pointer_space_ != NULL &&
255          old_data_space_ != NULL &&
256          code_space_ != NULL &&
257          map_space_ != NULL &&
258          cell_space_ != NULL &&
259          lo_space_ != NULL;
260 }
261
262
263 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
264   if (IntrusiveMarking::IsMarked(object)) {
265     return IntrusiveMarking::SizeOfMarkedObject(object);
266   }
267   return object->SizeFromMap(object->map());
268 }
269
270
271 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
272                                               const char** reason) {
273   // Is global GC requested?
274   if (space != NEW_SPACE) {
275     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
276     *reason = "GC in old space requested";
277     return MARK_COMPACTOR;
278   }
279
280   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
281     *reason = "GC in old space forced by flags";
282     return MARK_COMPACTOR;
283   }
284
285   // Is enough data promoted to justify a global GC?
286   if (OldGenerationPromotionLimitReached()) {
287     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
288     *reason = "promotion limit reached";
289     return MARK_COMPACTOR;
290   }
291
292   // Have allocation in OLD and LO failed?
293   if (old_gen_exhausted_) {
294     isolate_->counters()->
295         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
296     *reason = "old generations exhausted";
297     return MARK_COMPACTOR;
298   }
299
300   // Is there enough space left in OLD to guarantee that a scavenge can
301   // succeed?
302   //
303   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
304   // for object promotion. It counts only the bytes that the memory
305   // allocator has not yet allocated from the OS and assigned to any space,
306   // and does not count available bytes already in the old space or code
307   // space.  Undercounting is safe---we may get an unrequested full GC when
308   // a scavenge would have succeeded.
309   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
310     isolate_->counters()->
311         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
312     *reason = "scavenge might not succeed";
313     return MARK_COMPACTOR;
314   }
315
316   // Default
317   *reason = NULL;
318   return SCAVENGER;
319 }
320
321
322 // TODO(1238405): Combine the infrastructure for --heap-stats and
323 // --log-gc to avoid the complicated preprocessor and flag testing.
324 void Heap::ReportStatisticsBeforeGC() {
325   // Heap::ReportHeapStatistics will also log NewSpace statistics when
326   // compiled --log-gc is set.  The following logic is used to avoid
327   // double logging.
328 #ifdef DEBUG
329   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
330   if (FLAG_heap_stats) {
331     ReportHeapStatistics("Before GC");
332   } else if (FLAG_log_gc) {
333     new_space_.ReportStatistics();
334   }
335   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
336 #else
337   if (FLAG_log_gc) {
338     new_space_.CollectStatistics();
339     new_space_.ReportStatistics();
340     new_space_.ClearHistograms();
341   }
342 #endif  // DEBUG
343 }
344
345
346 void Heap::PrintShortHeapStatistics() {
347   if (!FLAG_trace_gc_verbose) return;
348   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
349                ", available: %6" V8_PTR_PREFIX "d KB\n",
350            isolate_->memory_allocator()->Size() / KB,
351            isolate_->memory_allocator()->Available() / KB);
352   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
353                ", available: %6" V8_PTR_PREFIX "d KB"
354                ", committed: %6" V8_PTR_PREFIX "d KB\n",
355            new_space_.Size() / KB,
356            new_space_.Available() / KB,
357            new_space_.CommittedMemory() / KB);
358   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
359                ", available: %6" V8_PTR_PREFIX "d KB"
360                ", committed: %6" V8_PTR_PREFIX "d KB\n",
361            old_pointer_space_->SizeOfObjects() / KB,
362            old_pointer_space_->Available() / KB,
363            old_pointer_space_->CommittedMemory() / KB);
364   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
365                ", available: %6" V8_PTR_PREFIX "d KB"
366                ", committed: %6" V8_PTR_PREFIX "d KB\n",
367            old_data_space_->SizeOfObjects() / KB,
368            old_data_space_->Available() / KB,
369            old_data_space_->CommittedMemory() / KB);
370   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
371                ", available: %6" V8_PTR_PREFIX "d KB"
372                ", committed: %6" V8_PTR_PREFIX "d KB\n",
373            code_space_->SizeOfObjects() / KB,
374            code_space_->Available() / KB,
375            code_space_->CommittedMemory() / KB);
376   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
377                ", available: %6" V8_PTR_PREFIX "d KB"
378                ", committed: %6" V8_PTR_PREFIX "d KB\n",
379            map_space_->SizeOfObjects() / KB,
380            map_space_->Available() / KB,
381            map_space_->CommittedMemory() / KB);
382   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
383                ", available: %6" V8_PTR_PREFIX "d KB"
384                ", committed: %6" V8_PTR_PREFIX "d KB\n",
385            cell_space_->SizeOfObjects() / KB,
386            cell_space_->Available() / KB,
387            cell_space_->CommittedMemory() / KB);
388   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
389                ", available: %6" V8_PTR_PREFIX "d KB"
390                ", committed: %6" V8_PTR_PREFIX "d KB\n",
391            lo_space_->SizeOfObjects() / KB,
392            lo_space_->Available() / KB,
393            lo_space_->CommittedMemory() / KB);
394   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
395                ", available: %6" V8_PTR_PREFIX "d KB"
396                ", committed: %6" V8_PTR_PREFIX "d KB\n",
397            this->SizeOfObjects() / KB,
398            this->Available() / KB,
399            this->CommittedMemory() / KB);
400   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
401 }
402
403
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407   // Similar to the before GC, we use some complicated logic to ensure that
408   // NewSpace statistics are logged exactly once when --log-gc is turned on.
409 #if defined(DEBUG)
410   if (FLAG_heap_stats) {
411     new_space_.CollectStatistics();
412     ReportHeapStatistics("After GC");
413   } else if (FLAG_log_gc) {
414     new_space_.ReportStatistics();
415   }
416 #else
417   if (FLAG_log_gc) new_space_.ReportStatistics();
418 #endif  // DEBUG
419 }
420
421
422 void Heap::GarbageCollectionPrologue() {
423   isolate_->transcendental_cache()->Clear();
424   ClearJSFunctionResultCaches();
425   gc_count_++;
426   unflattened_strings_length_ = 0;
427
428   if (FLAG_flush_code && FLAG_flush_code_incrementally) {
429     mark_compact_collector()->EnableCodeFlushing(true);
430   }
431
432 #ifdef VERIFY_HEAP
433   if (FLAG_verify_heap) {
434     Verify();
435   }
436 #endif
437
438 #ifdef DEBUG
439   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
440   allow_allocation(false);
441
442   if (FLAG_gc_verbose) Print();
443
444   ReportStatisticsBeforeGC();
445 #endif  // DEBUG
446
447   store_buffer()->GCPrologue();
448 }
449
450
451 intptr_t Heap::SizeOfObjects() {
452   intptr_t total = 0;
453   AllSpaces spaces(this);
454   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
455     total += space->SizeOfObjects();
456   }
457   return total;
458 }
459
460
461 void Heap::RepairFreeListsAfterBoot() {
462   PagedSpaces spaces(this);
463   for (PagedSpace* space = spaces.next();
464        space != NULL;
465        space = spaces.next()) {
466     space->RepairFreeListsAfterBoot();
467   }
468 }
469
470
471 void Heap::GarbageCollectionEpilogue() {
472   store_buffer()->GCEpilogue();
473
474   // In release mode, we only zap the from space under heap verification.
475   if (Heap::ShouldZapGarbage()) {
476     ZapFromSpace();
477   }
478
479 #ifdef VERIFY_HEAP
480   if (FLAG_verify_heap) {
481     Verify();
482   }
483 #endif
484
485 #ifdef DEBUG
486   allow_allocation(true);
487   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488   if (FLAG_print_handles) PrintHandles();
489   if (FLAG_gc_verbose) Print();
490   if (FLAG_code_stats) ReportCodeStatistics("After GC");
491 #endif
492   if (FLAG_deopt_every_n_garbage_collections > 0) {
493     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494       Deoptimizer::DeoptimizeAll(isolate());
495       gcs_since_last_deopt_ = 0;
496     }
497   }
498
499   isolate_->counters()->alive_after_last_gc()->Set(
500       static_cast<int>(SizeOfObjects()));
501
502   isolate_->counters()->string_table_capacity()->Set(
503       string_table()->Capacity());
504   isolate_->counters()->number_of_symbols()->Set(
505       string_table()->NumberOfElements());
506
507   if (CommittedMemory() > 0) {
508     isolate_->counters()->external_fragmentation_total()->AddSample(
509         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
510
511     isolate_->counters()->heap_fraction_map_space()->AddSample(
512         static_cast<int>(
513             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514     isolate_->counters()->heap_fraction_cell_space()->AddSample(
515         static_cast<int>(
516             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
517
518     isolate_->counters()->heap_sample_total_committed()->AddSample(
519         static_cast<int>(CommittedMemory() / KB));
520     isolate_->counters()->heap_sample_total_used()->AddSample(
521         static_cast<int>(SizeOfObjects() / KB));
522     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523         static_cast<int>(map_space()->CommittedMemory() / KB));
524     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525         static_cast<int>(cell_space()->CommittedMemory() / KB));
526   }
527
528 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
529   isolate_->counters()->space##_bytes_available()->Set(                        \
530       static_cast<int>(space()->Available()));                                 \
531   isolate_->counters()->space##_bytes_committed()->Set(                        \
532       static_cast<int>(space()->CommittedMemory()));                           \
533   isolate_->counters()->space##_bytes_used()->Set(                             \
534       static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
536   if (space()->CommittedMemory() > 0) {                                        \
537     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
538         static_cast<int>(100 -                                                 \
539             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
540   }
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
542   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
543   UPDATE_FRAGMENTATION_FOR_SPACE(space)
544
545   UPDATE_COUNTERS_FOR_SPACE(new_space)
546   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
555
556 #if defined(DEBUG)
557   ReportStatisticsAfterGC();
558 #endif  // DEBUG
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560   isolate_->debug()->AfterGarbageCollection();
561 #endif  // ENABLE_DEBUGGER_SUPPORT
562
563   error_object_list_.DeferredFormatStackTrace(isolate());
564 }
565
566
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568   // Since we are ignoring the return value, the exact choice of space does
569   // not matter, so long as we do not specify NEW_SPACE, which would not
570   // cause a full GC.
571   mark_compact_collector_.SetFlags(flags);
572   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573   mark_compact_collector_.SetFlags(kNoGCFlags);
574 }
575
576
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578   // Since we are ignoring the return value, the exact choice of space does
579   // not matter, so long as we do not specify NEW_SPACE, which would not
580   // cause a full GC.
581   // Major GC would invoke weak handle callbacks on weakly reachable
582   // handles, but won't collect weakly reachable objects until next
583   // major GC.  Therefore if we collect aggressively and weak handle callback
584   // has been invoked, we rerun major GC to release objects which become
585   // garbage.
586   // Note: as weak callbacks can execute arbitrary code, we cannot
587   // hope that eventually there will be no weak callbacks invocations.
588   // Therefore stop recollecting after several attempts.
589   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590                                      kReduceMemoryFootprintMask);
591   isolate_->compilation_cache()->Clear();
592   const int kMaxNumberOfAttempts = 7;
593   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
595       break;
596     }
597   }
598   mark_compact_collector()->SetFlags(kNoGCFlags);
599   new_space_.Shrink();
600   UncommitFromSpace();
601   incremental_marking()->UncommitMarkingDeque();
602 }
603
604
605 bool Heap::CollectGarbage(AllocationSpace space,
606                           GarbageCollector collector,
607                           const char* gc_reason,
608                           const char* collector_reason) {
609   // The VM is in the GC state until exiting this function.
610   VMState<GC> state(isolate_);
611
612 #ifdef DEBUG
613   // Reset the allocation timeout to the GC interval, but make sure to
614   // allow at least a few allocations after a collection. The reason
615   // for this is that we have a lot of allocation sequences and we
616   // assume that a garbage collection will allow the subsequent
617   // allocation attempts to go through.
618   allocation_timeout_ = Max(6, FLAG_gc_interval);
619 #endif
620
621   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622     if (FLAG_trace_incremental_marking) {
623       PrintF("[IncrementalMarking] Scavenge during marking.\n");
624     }
625   }
626
627   if (collector == MARK_COMPACTOR &&
628       !mark_compact_collector()->abort_incremental_marking() &&
629       !incremental_marking()->IsStopped() &&
630       !incremental_marking()->should_hurry() &&
631       FLAG_incremental_marking_steps) {
632     // Make progress in incremental marking.
633     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636     if (!incremental_marking()->IsComplete()) {
637       if (FLAG_trace_incremental_marking) {
638         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
639       }
640       collector = SCAVENGER;
641       collector_reason = "incremental marking delaying mark-sweep";
642     }
643   }
644
645   bool next_gc_likely_to_collect_more = false;
646
647   { GCTracer tracer(this, gc_reason, collector_reason);
648     GarbageCollectionPrologue();
649     // The GC count was incremented in the prologue.  Tell the tracer about
650     // it.
651     tracer.set_gc_count(gc_count_);
652
653     // Tell the tracer which collector we've selected.
654     tracer.set_collector(collector);
655
656     {
657       HistogramTimerScope histogram_timer_scope(
658           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
659                                    : isolate_->counters()->gc_compactor());
660       next_gc_likely_to_collect_more =
661           PerformGarbageCollection(collector, &tracer);
662     }
663
664     GarbageCollectionEpilogue();
665   }
666
667   // Start incremental marking for the next cycle. The heap snapshot
668   // generator needs incremental marking to stay off after it aborted.
669   if (!mark_compact_collector()->abort_incremental_marking() &&
670       incremental_marking()->IsStopped() &&
671       incremental_marking()->WorthActivating() &&
672       NextGCIsLikelyToBeFull()) {
673     incremental_marking()->Start();
674   }
675
676   return next_gc_likely_to_collect_more;
677 }
678
679
680 void Heap::PerformScavenge() {
681   GCTracer tracer(this, NULL, NULL);
682   if (incremental_marking()->IsStopped()) {
683     PerformGarbageCollection(SCAVENGER, &tracer);
684   } else {
685     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
686   }
687 }
688
689
690 void Heap::MoveElements(FixedArray* array,
691                         int dst_index,
692                         int src_index,
693                         int len) {
694   if (len == 0) return;
695
696   ASSERT(array->map() != HEAP->fixed_cow_array_map());
697   Object** dst_objects = array->data_start() + dst_index;
698   OS::MemMove(dst_objects,
699               array->data_start() + src_index,
700               len * kPointerSize);
701   if (!InNewSpace(array)) {
702     for (int i = 0; i < len; i++) {
703       // TODO(hpayer): check store buffer for entries
704       if (InNewSpace(dst_objects[i])) {
705         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
706       }
707     }
708   }
709   incremental_marking()->RecordWrites(array);
710 }
711
712
713 #ifdef VERIFY_HEAP
714 // Helper class for verifying the string table.
715 class StringTableVerifier : public ObjectVisitor {
716  public:
717   void VisitPointers(Object** start, Object** end) {
718     // Visit all HeapObject pointers in [start, end).
719     for (Object** p = start; p < end; p++) {
720       if ((*p)->IsHeapObject()) {
721         // Check that the string is actually internalized.
722         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
723               (*p)->IsInternalizedString());
724       }
725     }
726   }
727 };
728
729
730 static void VerifyStringTable() {
731   StringTableVerifier verifier;
732   HEAP->string_table()->IterateElements(&verifier);
733 }
734 #endif  // VERIFY_HEAP
735
736
737 static bool AbortIncrementalMarkingAndCollectGarbage(
738     Heap* heap,
739     AllocationSpace space,
740     const char* gc_reason = NULL) {
741   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
742   bool result = heap->CollectGarbage(space, gc_reason);
743   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
744   return result;
745 }
746
747
748 void Heap::ReserveSpace(
749     int *sizes,
750     Address *locations_out) {
751   bool gc_performed = true;
752   int counter = 0;
753   static const int kThreshold = 20;
754   while (gc_performed && counter++ < kThreshold) {
755     gc_performed = false;
756     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
757     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
758       if (sizes[space] != 0) {
759         MaybeObject* allocation;
760         if (space == NEW_SPACE) {
761           allocation = new_space()->AllocateRaw(sizes[space]);
762         } else {
763           allocation = paged_space(space)->AllocateRaw(sizes[space]);
764         }
765         FreeListNode* node;
766         if (!allocation->To<FreeListNode>(&node)) {
767           if (space == NEW_SPACE) {
768             Heap::CollectGarbage(NEW_SPACE,
769                                  "failed to reserve space in the new space");
770           } else {
771             AbortIncrementalMarkingAndCollectGarbage(
772                 this,
773                 static_cast<AllocationSpace>(space),
774                 "failed to reserve space in paged space");
775           }
776           gc_performed = true;
777           break;
778         } else {
779           // Mark with a free list node, in case we have a GC before
780           // deserializing.
781           node->set_size(this, sizes[space]);
782           locations_out[space] = node->address();
783         }
784       }
785     }
786   }
787
788   if (gc_performed) {
789     // Failed to reserve the space after several attempts.
790     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
791   }
792 }
793
794
795 void Heap::EnsureFromSpaceIsCommitted() {
796   if (new_space_.CommitFromSpaceIfNeeded()) return;
797
798   // Committing memory to from space failed.
799   // Memory is exhausted and we will die.
800   V8::FatalProcessOutOfMemory("Committing semi space failed.");
801 }
802
803
804 void Heap::ClearJSFunctionResultCaches() {
805   if (isolate_->bootstrapper()->IsActive()) return;
806
807   Object* context = native_contexts_list_;
808   while (!context->IsUndefined()) {
809     // Get the caches for this context. GC can happen when the context
810     // is not fully initialized, so the caches can be undefined.
811     Object* caches_or_undefined =
812         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
813     if (!caches_or_undefined->IsUndefined()) {
814       FixedArray* caches = FixedArray::cast(caches_or_undefined);
815       // Clear the caches:
816       int length = caches->length();
817       for (int i = 0; i < length; i++) {
818         JSFunctionResultCache::cast(caches->get(i))->Clear();
819       }
820     }
821     // Get the next context:
822     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
823   }
824 }
825
826
827 void Heap::ClearNormalizedMapCaches() {
828   if (isolate_->bootstrapper()->IsActive() &&
829       !incremental_marking()->IsMarking()) {
830     return;
831   }
832
833   Object* context = native_contexts_list_;
834   while (!context->IsUndefined()) {
835     // GC can happen when the context is not fully initialized,
836     // so the cache can be undefined.
837     Object* cache =
838         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
839     if (!cache->IsUndefined()) {
840       NormalizedMapCache::cast(cache)->Clear();
841     }
842     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
843   }
844 }
845
846
847 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
848   double survival_rate =
849       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
850       start_new_space_size;
851
852   if (survival_rate > kYoungSurvivalRateHighThreshold) {
853     high_survival_rate_period_length_++;
854   } else {
855     high_survival_rate_period_length_ = 0;
856   }
857
858   if (survival_rate < kYoungSurvivalRateLowThreshold) {
859     low_survival_rate_period_length_++;
860   } else {
861     low_survival_rate_period_length_ = 0;
862   }
863
864   double survival_rate_diff = survival_rate_ - survival_rate;
865
866   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
867     set_survival_rate_trend(DECREASING);
868   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
869     set_survival_rate_trend(INCREASING);
870   } else {
871     set_survival_rate_trend(STABLE);
872   }
873
874   survival_rate_ = survival_rate;
875 }
876
877 bool Heap::PerformGarbageCollection(GarbageCollector collector,
878                                     GCTracer* tracer) {
879   bool next_gc_likely_to_collect_more = false;
880
881   if (collector != SCAVENGER) {
882     PROFILE(isolate_, CodeMovingGCEvent());
883   }
884
885 #ifdef VERIFY_HEAP
886   if (FLAG_verify_heap) {
887     VerifyStringTable();
888   }
889 #endif
890
891   GCType gc_type =
892       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
893
894   {
895     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
896     VMState<EXTERNAL> state(isolate_);
897     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
898   }
899
900   EnsureFromSpaceIsCommitted();
901
902   int start_new_space_size = Heap::new_space()->SizeAsInt();
903
904   if (IsHighSurvivalRate()) {
905     // We speed up the incremental marker if it is running so that it
906     // does not fall behind the rate of promotion, which would cause a
907     // constantly growing old space.
908     incremental_marking()->NotifyOfHighPromotionRate();
909   }
910
911   if (collector == MARK_COMPACTOR) {
912     // Perform mark-sweep with optional compaction.
913     MarkCompact(tracer);
914     sweep_generation_++;
915     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
916         IsStableOrIncreasingSurvivalTrend();
917
918     UpdateSurvivalRateTrend(start_new_space_size);
919
920     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
921
922     if (high_survival_rate_during_scavenges &&
923         IsStableOrIncreasingSurvivalTrend()) {
924       // Stable high survival rates of young objects both during partial and
925       // full collection indicate that mutator is either building or modifying
926       // a structure with a long lifetime.
927       // In this case we aggressively raise old generation memory limits to
928       // postpone subsequent mark-sweep collection and thus trade memory
929       // space for the mutation speed.
930       old_gen_limit_factor_ = 2;
931     } else {
932       old_gen_limit_factor_ = 1;
933     }
934
935     old_gen_promotion_limit_ =
936         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
937     old_gen_allocation_limit_ =
938         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
939
940     old_gen_exhausted_ = false;
941   } else {
942     tracer_ = tracer;
943     Scavenge();
944     tracer_ = NULL;
945
946     UpdateSurvivalRateTrend(start_new_space_size);
947   }
948
949   if (!new_space_high_promotion_mode_active_ &&
950       new_space_.Capacity() == new_space_.MaximumCapacity() &&
951       IsStableOrIncreasingSurvivalTrend() &&
952       IsHighSurvivalRate()) {
953     // Stable high survival rates even though young generation is at
954     // maximum capacity indicates that most objects will be promoted.
955     // To decrease scavenger pauses and final mark-sweep pauses, we
956     // have to limit maximal capacity of the young generation.
957     new_space_high_promotion_mode_active_ = true;
958     if (FLAG_trace_gc) {
959       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
960                new_space_.InitialCapacity() / MB);
961     }
962     // Support for global pre-tenuring uses the high promotion mode as a
963     // heuristic indicator of whether to pretenure or not, we trigger
964     // deoptimization here to take advantage of pre-tenuring as soon as
965     // possible.
966     if (FLAG_pretenure_literals) {
967       isolate_->stack_guard()->FullDeopt();
968     }
969   } else if (new_space_high_promotion_mode_active_ &&
970       IsStableOrDecreasingSurvivalTrend() &&
971       IsLowSurvivalRate()) {
972     // Decreasing low survival rates might indicate that the above high
973     // promotion mode is over and we should allow the young generation
974     // to grow again.
975     new_space_high_promotion_mode_active_ = false;
976     if (FLAG_trace_gc) {
977       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
978                new_space_.MaximumCapacity() / MB);
979     }
980     // Trigger deoptimization here to turn off pre-tenuring as soon as
981     // possible.
982     if (FLAG_pretenure_literals) {
983       isolate_->stack_guard()->FullDeopt();
984     }
985   }
986
987   if (new_space_high_promotion_mode_active_ &&
988       new_space_.Capacity() > new_space_.InitialCapacity()) {
989     new_space_.Shrink();
990   }
991
992   isolate_->counters()->objs_since_last_young()->Set(0);
993
994   // Callbacks that fire after this point might trigger nested GCs and
995   // restart incremental marking, the assertion can't be moved down.
996   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
997
998   gc_post_processing_depth_++;
999   { DisableAssertNoAllocation allow_allocation;
1000     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1001     next_gc_likely_to_collect_more =
1002         isolate_->global_handles()->PostGarbageCollectionProcessing(
1003             collector, tracer);
1004   }
1005   gc_post_processing_depth_--;
1006
1007   // Update relocatables.
1008   Relocatable::PostGarbageCollectionProcessing();
1009
1010   if (collector == MARK_COMPACTOR) {
1011     // Register the amount of external allocated memory.
1012     amount_of_external_allocated_memory_at_last_global_gc_ =
1013         amount_of_external_allocated_memory_;
1014   }
1015
1016   {
1017     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1018     VMState<EXTERNAL> state(isolate_);
1019     CallGCEpilogueCallbacks(gc_type);
1020   }
1021
1022 #ifdef VERIFY_HEAP
1023   if (FLAG_verify_heap) {
1024     VerifyStringTable();
1025   }
1026 #endif
1027
1028   return next_gc_likely_to_collect_more;
1029 }
1030
1031
1032 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1033   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1034     global_gc_prologue_callback_();
1035   }
1036   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1037     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1038       gc_prologue_callbacks_[i].callback(gc_type, flags);
1039     }
1040   }
1041 }
1042
1043
1044 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1045   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1046     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1047       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1048     }
1049   }
1050   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1051     global_gc_epilogue_callback_();
1052   }
1053 }
1054
1055
1056 void Heap::MarkCompact(GCTracer* tracer) {
1057   gc_state_ = MARK_COMPACT;
1058   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1059
1060   mark_compact_collector_.Prepare(tracer);
1061
1062   ms_count_++;
1063   tracer->set_full_gc_count(ms_count_);
1064
1065   MarkCompactPrologue();
1066
1067   mark_compact_collector_.CollectGarbage();
1068
1069   LOG(isolate_, ResourceEvent("markcompact", "end"));
1070
1071   gc_state_ = NOT_IN_GC;
1072
1073   isolate_->counters()->objs_since_last_full()->Set(0);
1074
1075   contexts_disposed_ = 0;
1076
1077   flush_monomorphic_ics_ = false;
1078 }
1079
1080
1081 void Heap::MarkCompactPrologue() {
1082   // At any old GC clear the keyed lookup cache to enable collection of unused
1083   // maps.
1084   isolate_->keyed_lookup_cache()->Clear();
1085   isolate_->context_slot_cache()->Clear();
1086   isolate_->descriptor_lookup_cache()->Clear();
1087   RegExpResultsCache::Clear(string_split_cache());
1088   RegExpResultsCache::Clear(regexp_multiple_cache());
1089
1090   isolate_->compilation_cache()->MarkCompactPrologue();
1091
1092   CompletelyClearInstanceofCache();
1093
1094   FlushNumberStringCache();
1095   if (FLAG_cleanup_code_caches_at_gc) {
1096     polymorphic_code_cache()->set_cache(undefined_value());
1097   }
1098
1099   ClearNormalizedMapCaches();
1100 }
1101
1102
1103 Object* Heap::FindCodeObject(Address a) {
1104   return isolate()->inner_pointer_to_code_cache()->
1105       GcSafeFindCodeForInnerPointer(a);
1106 }
1107
1108
1109 // Helper class for copying HeapObjects
1110 class ScavengeVisitor: public ObjectVisitor {
1111  public:
1112   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1113
1114   void VisitPointer(Object** p) { ScavengePointer(p); }
1115
1116   void VisitPointers(Object** start, Object** end) {
1117     // Copy all HeapObject pointers in [start, end)
1118     for (Object** p = start; p < end; p++) ScavengePointer(p);
1119   }
1120
1121  private:
1122   void ScavengePointer(Object** p) {
1123     Object* object = *p;
1124     if (!heap_->InNewSpace(object)) return;
1125     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1126                          reinterpret_cast<HeapObject*>(object));
1127   }
1128
1129   Heap* heap_;
1130 };
1131
1132
1133 #ifdef VERIFY_HEAP
1134 // Visitor class to verify pointers in code or data space do not point into
1135 // new space.
1136 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1137  public:
1138   void VisitPointers(Object** start, Object**end) {
1139     for (Object** current = start; current < end; current++) {
1140       if ((*current)->IsHeapObject()) {
1141         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1142       }
1143     }
1144   }
1145 };
1146
1147
1148 static void VerifyNonPointerSpacePointers() {
1149   // Verify that there are no pointers to new space in spaces where we
1150   // do not expect them.
1151   VerifyNonPointerSpacePointersVisitor v;
1152   HeapObjectIterator code_it(HEAP->code_space());
1153   for (HeapObject* object = code_it.Next();
1154        object != NULL; object = code_it.Next())
1155     object->Iterate(&v);
1156
1157   // The old data space was normally swept conservatively so that the iterator
1158   // doesn't work, so we normally skip the next bit.
1159   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1160     HeapObjectIterator data_it(HEAP->old_data_space());
1161     for (HeapObject* object = data_it.Next();
1162          object != NULL; object = data_it.Next())
1163       object->Iterate(&v);
1164   }
1165 }
1166 #endif  // VERIFY_HEAP
1167
1168
1169 void Heap::CheckNewSpaceExpansionCriteria() {
1170   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1171       survived_since_last_expansion_ > new_space_.Capacity() &&
1172       !new_space_high_promotion_mode_active_) {
1173     // Grow the size of new space if there is room to grow, enough data
1174     // has survived scavenge since the last expansion and we are not in
1175     // high promotion mode.
1176     new_space_.Grow();
1177     survived_since_last_expansion_ = 0;
1178   }
1179 }
1180
1181
1182 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1183   return heap->InNewSpace(*p) &&
1184       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1185 }
1186
1187
1188 void Heap::ScavengeStoreBufferCallback(
1189     Heap* heap,
1190     MemoryChunk* page,
1191     StoreBufferEvent event) {
1192   heap->store_buffer_rebuilder_.Callback(page, event);
1193 }
1194
1195
1196 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1197   if (event == kStoreBufferStartScanningPagesEvent) {
1198     start_of_current_page_ = NULL;
1199     current_page_ = NULL;
1200   } else if (event == kStoreBufferScanningPageEvent) {
1201     if (current_page_ != NULL) {
1202       // If this page already overflowed the store buffer during this iteration.
1203       if (current_page_->scan_on_scavenge()) {
1204         // Then we should wipe out the entries that have been added for it.
1205         store_buffer_->SetTop(start_of_current_page_);
1206       } else if (store_buffer_->Top() - start_of_current_page_ >=
1207                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1208         // Did we find too many pointers in the previous page?  The heuristic is
1209         // that no page can take more then 1/5 the remaining slots in the store
1210         // buffer.
1211         current_page_->set_scan_on_scavenge(true);
1212         store_buffer_->SetTop(start_of_current_page_);
1213       } else {
1214         // In this case the page we scanned took a reasonable number of slots in
1215         // the store buffer.  It has now been rehabilitated and is no longer
1216         // marked scan_on_scavenge.
1217         ASSERT(!current_page_->scan_on_scavenge());
1218       }
1219     }
1220     start_of_current_page_ = store_buffer_->Top();
1221     current_page_ = page;
1222   } else if (event == kStoreBufferFullEvent) {
1223     // The current page overflowed the store buffer again.  Wipe out its entries
1224     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1225     // several times while scanning.
1226     if (current_page_ == NULL) {
1227       // Store Buffer overflowed while scanning promoted objects.  These are not
1228       // in any particular page, though they are likely to be clustered by the
1229       // allocation routines.
1230       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1231     } else {
1232       // Store Buffer overflowed while scanning a particular old space page for
1233       // pointers to new space.
1234       ASSERT(current_page_ == page);
1235       ASSERT(page != NULL);
1236       current_page_->set_scan_on_scavenge(true);
1237       ASSERT(start_of_current_page_ != store_buffer_->Top());
1238       store_buffer_->SetTop(start_of_current_page_);
1239     }
1240   } else {
1241     UNREACHABLE();
1242   }
1243 }
1244
1245
1246 void PromotionQueue::Initialize() {
1247   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1248   // entries (where each is a pair of intptr_t). This allows us to simplify
1249   // the test fpr when to switch pages.
1250   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1251          == 0);
1252   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1253   front_ = rear_ =
1254       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1255   emergency_stack_ = NULL;
1256   guard_ = false;
1257 }
1258
1259
1260 void PromotionQueue::RelocateQueueHead() {
1261   ASSERT(emergency_stack_ == NULL);
1262
1263   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1264   intptr_t* head_start = rear_;
1265   intptr_t* head_end =
1266       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1267
1268   int entries_count =
1269       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1270
1271   emergency_stack_ = new List<Entry>(2 * entries_count);
1272
1273   while (head_start != head_end) {
1274     int size = static_cast<int>(*(head_start++));
1275     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1276     emergency_stack_->Add(Entry(obj, size));
1277   }
1278   rear_ = head_end;
1279 }
1280
1281
1282 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1283  public:
1284   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1285
1286   virtual Object* RetainAs(Object* object) {
1287     if (!heap_->InFromSpace(object)) {
1288       return object;
1289     }
1290
1291     MapWord map_word = HeapObject::cast(object)->map_word();
1292     if (map_word.IsForwardingAddress()) {
1293       return map_word.ToForwardingAddress();
1294     }
1295     return NULL;
1296   }
1297
1298  private:
1299   Heap* heap_;
1300 };
1301
1302
1303 void Heap::Scavenge() {
1304   RelocationLock relocation_lock(this);
1305
1306 #ifdef VERIFY_HEAP
1307   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1308 #endif
1309
1310   gc_state_ = SCAVENGE;
1311
1312   // Implements Cheney's copying algorithm
1313   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1314
1315   // Clear descriptor cache.
1316   isolate_->descriptor_lookup_cache()->Clear();
1317
1318   // Used for updating survived_since_last_expansion_ at function end.
1319   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1320
1321   CheckNewSpaceExpansionCriteria();
1322
1323   SelectScavengingVisitorsTable();
1324
1325   incremental_marking()->PrepareForScavenge();
1326
1327   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1328   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1329
1330   // Flip the semispaces.  After flipping, to space is empty, from space has
1331   // live objects.
1332   new_space_.Flip();
1333   new_space_.ResetAllocationInfo();
1334
1335   // We need to sweep newly copied objects which can be either in the
1336   // to space or promoted to the old generation.  For to-space
1337   // objects, we treat the bottom of the to space as a queue.  Newly
1338   // copied and unswept objects lie between a 'front' mark and the
1339   // allocation pointer.
1340   //
1341   // Promoted objects can go into various old-generation spaces, and
1342   // can be allocated internally in the spaces (from the free list).
1343   // We treat the top of the to space as a queue of addresses of
1344   // promoted objects.  The addresses of newly promoted and unswept
1345   // objects lie between a 'front' mark and a 'rear' mark that is
1346   // updated as a side effect of promoting an object.
1347   //
1348   // There is guaranteed to be enough room at the top of the to space
1349   // for the addresses of promoted objects: every object promoted
1350   // frees up its size in bytes from the top of the new space, and
1351   // objects are at least one pointer in size.
1352   Address new_space_front = new_space_.ToSpaceStart();
1353   promotion_queue_.Initialize();
1354
1355 #ifdef DEBUG
1356   store_buffer()->Clean();
1357 #endif
1358
1359   ScavengeVisitor scavenge_visitor(this);
1360   // Copy roots.
1361   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1362
1363   // Copy objects reachable from the old generation.
1364   {
1365     StoreBufferRebuildScope scope(this,
1366                                   store_buffer(),
1367                                   &ScavengeStoreBufferCallback);
1368     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1369   }
1370
1371   // Copy objects reachable from cells by scavenging cell values directly.
1372   HeapObjectIterator cell_iterator(cell_space_);
1373   for (HeapObject* heap_object = cell_iterator.Next();
1374        heap_object != NULL;
1375        heap_object = cell_iterator.Next()) {
1376     if (heap_object->IsJSGlobalPropertyCell()) {
1377       JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1378       Address value_address = cell->ValueAddress();
1379       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1380     }
1381   }
1382
1383   // Copy objects reachable from the code flushing candidates list.
1384   MarkCompactCollector* collector = mark_compact_collector();
1385   if (collector->is_code_flushing_enabled()) {
1386     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1387   }
1388
1389   // Scavenge object reachable from the native contexts list directly.
1390   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1391
1392   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1393
1394   while (isolate()->global_handles()->IterateObjectGroups(
1395       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1396     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1397   }
1398   isolate()->global_handles()->RemoveObjectGroups();
1399   isolate()->global_handles()->RemoveImplicitRefGroups();
1400
1401   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1402       &IsUnscavengedHeapObject);
1403   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1404       &scavenge_visitor);
1405   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1406
1407   UpdateNewSpaceReferencesInExternalStringTable(
1408       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1409
1410   error_object_list_.UpdateReferencesInNewSpace(this);
1411
1412   promotion_queue_.Destroy();
1413
1414   if (!FLAG_watch_ic_patching) {
1415     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1416   }
1417   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1418
1419   ScavengeWeakObjectRetainer weak_object_retainer(this);
1420   ProcessWeakReferences(&weak_object_retainer);
1421
1422   ASSERT(new_space_front == new_space_.top());
1423
1424   // Set age mark.
1425   new_space_.set_age_mark(new_space_.top());
1426
1427   new_space_.LowerInlineAllocationLimit(
1428       new_space_.inline_allocation_limit_step());
1429
1430   // Update how much has survived scavenge.
1431   IncrementYoungSurvivorsCounter(static_cast<int>(
1432       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1433
1434   LOG(isolate_, ResourceEvent("scavenge", "end"));
1435
1436   gc_state_ = NOT_IN_GC;
1437
1438   scavenges_since_last_idle_round_++;
1439 }
1440
1441
1442 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1443                                                                 Object** p) {
1444   MapWord first_word = HeapObject::cast(*p)->map_word();
1445
1446   if (!first_word.IsForwardingAddress()) {
1447     // Unreachable external string can be finalized.
1448     heap->FinalizeExternalString(String::cast(*p));
1449     return NULL;
1450   }
1451
1452   // String is still reachable.
1453   return String::cast(first_word.ToForwardingAddress());
1454 }
1455
1456
1457 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1458     ExternalStringTableUpdaterCallback updater_func) {
1459 #ifdef VERIFY_HEAP
1460   if (FLAG_verify_heap) {
1461     external_string_table_.Verify();
1462   }
1463 #endif
1464
1465   if (external_string_table_.new_space_strings_.is_empty()) return;
1466
1467   Object** start = &external_string_table_.new_space_strings_[0];
1468   Object** end = start + external_string_table_.new_space_strings_.length();
1469   Object** last = start;
1470
1471   for (Object** p = start; p < end; ++p) {
1472     ASSERT(InFromSpace(*p));
1473     String* target = updater_func(this, p);
1474
1475     if (target == NULL) continue;
1476
1477     ASSERT(target->IsExternalString());
1478
1479     if (InNewSpace(target)) {
1480       // String is still in new space.  Update the table entry.
1481       *last = target;
1482       ++last;
1483     } else {
1484       // String got promoted.  Move it to the old string list.
1485       external_string_table_.AddOldString(target);
1486     }
1487   }
1488
1489   ASSERT(last <= end);
1490   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1491 }
1492
1493
1494 void Heap::UpdateReferencesInExternalStringTable(
1495     ExternalStringTableUpdaterCallback updater_func) {
1496
1497   // Update old space string references.
1498   if (external_string_table_.old_space_strings_.length() > 0) {
1499     Object** start = &external_string_table_.old_space_strings_[0];
1500     Object** end = start + external_string_table_.old_space_strings_.length();
1501     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1502   }
1503
1504   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1505 }
1506
1507
1508 static Object* ProcessFunctionWeakReferences(Heap* heap,
1509                                              Object* function,
1510                                              WeakObjectRetainer* retainer,
1511                                              bool record_slots) {
1512   Object* undefined = heap->undefined_value();
1513   Object* head = undefined;
1514   JSFunction* tail = NULL;
1515   Object* candidate = function;
1516   while (candidate != undefined) {
1517     // Check whether to keep the candidate in the list.
1518     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1519     Object* retain = retainer->RetainAs(candidate);
1520     if (retain != NULL) {
1521       if (head == undefined) {
1522         // First element in the list.
1523         head = retain;
1524       } else {
1525         // Subsequent elements in the list.
1526         ASSERT(tail != NULL);
1527         tail->set_next_function_link(retain);
1528         if (record_slots) {
1529           Object** next_function =
1530               HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1531           heap->mark_compact_collector()->RecordSlot(
1532               next_function, next_function, retain);
1533         }
1534       }
1535       // Retained function is new tail.
1536       candidate_function = reinterpret_cast<JSFunction*>(retain);
1537       tail = candidate_function;
1538
1539       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1540
1541       if (retain == undefined) break;
1542     }
1543
1544     // Move to next element in the list.
1545     candidate = candidate_function->next_function_link();
1546   }
1547
1548   // Terminate the list if there is one or more elements.
1549   if (tail != NULL) {
1550     tail->set_next_function_link(undefined);
1551   }
1552
1553   return head;
1554 }
1555
1556
1557 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1558   Object* undefined = undefined_value();
1559   Object* head = undefined;
1560   Context* tail = NULL;
1561   Object* candidate = native_contexts_list_;
1562
1563   // We don't record weak slots during marking or scavenges.
1564   // Instead we do it once when we complete mark-compact cycle.
1565   // Note that write barrier has no effect if we are already in the middle of
1566   // compacting mark-sweep cycle and we have to record slots manually.
1567   bool record_slots =
1568       gc_state() == MARK_COMPACT &&
1569       mark_compact_collector()->is_compacting();
1570
1571   while (candidate != undefined) {
1572     // Check whether to keep the candidate in the list.
1573     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1574     Object* retain = retainer->RetainAs(candidate);
1575     if (retain != NULL) {
1576       if (head == undefined) {
1577         // First element in the list.
1578         head = retain;
1579       } else {
1580         // Subsequent elements in the list.
1581         ASSERT(tail != NULL);
1582         tail->set_unchecked(this,
1583                             Context::NEXT_CONTEXT_LINK,
1584                             retain,
1585                             UPDATE_WRITE_BARRIER);
1586
1587         if (record_slots) {
1588           Object** next_context =
1589               HeapObject::RawField(
1590                   tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1591           mark_compact_collector()->RecordSlot(
1592               next_context, next_context, retain);
1593         }
1594       }
1595       // Retained context is new tail.
1596       candidate_context = reinterpret_cast<Context*>(retain);
1597       tail = candidate_context;
1598
1599       if (retain == undefined) break;
1600
1601       // Process the weak list of optimized functions for the context.
1602       Object* function_list_head =
1603           ProcessFunctionWeakReferences(
1604               this,
1605               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1606               retainer,
1607               record_slots);
1608       candidate_context->set_unchecked(this,
1609                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1610                                        function_list_head,
1611                                        UPDATE_WRITE_BARRIER);
1612       if (record_slots) {
1613         Object** optimized_functions =
1614             HeapObject::RawField(
1615                 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1616         mark_compact_collector()->RecordSlot(
1617             optimized_functions, optimized_functions, function_list_head);
1618       }
1619     }
1620
1621     // Move to next element in the list.
1622     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1623   }
1624
1625   // Terminate the list if there is one or more elements.
1626   if (tail != NULL) {
1627     tail->set_unchecked(this,
1628                         Context::NEXT_CONTEXT_LINK,
1629                         Heap::undefined_value(),
1630                         UPDATE_WRITE_BARRIER);
1631   }
1632
1633   // Update the head of the list of contexts.
1634   native_contexts_list_ = head;
1635 }
1636
1637
1638 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1639   AssertNoAllocation no_allocation;
1640
1641   // Both the external string table and the string table may contain
1642   // external strings, but neither lists them exhaustively, nor is the
1643   // intersection set empty.  Therefore we iterate over the external string
1644   // table first, ignoring internalized strings, and then over the
1645   // internalized string table.
1646
1647   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1648    public:
1649     explicit ExternalStringTableVisitorAdapter(
1650         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1651     virtual void VisitPointers(Object** start, Object** end) {
1652       for (Object** p = start; p < end; p++) {
1653         // Visit non-internalized external strings,
1654         // since internalized strings are listed in the string table.
1655         if (!(*p)->IsInternalizedString()) {
1656           ASSERT((*p)->IsExternalString());
1657           visitor_->VisitExternalString(Utils::ToLocal(
1658               Handle<String>(String::cast(*p))));
1659         }
1660       }
1661     }
1662    private:
1663     v8::ExternalResourceVisitor* visitor_;
1664   } external_string_table_visitor(visitor);
1665
1666   external_string_table_.Iterate(&external_string_table_visitor);
1667
1668   class StringTableVisitorAdapter : public ObjectVisitor {
1669    public:
1670     explicit StringTableVisitorAdapter(
1671         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1672     virtual void VisitPointers(Object** start, Object** end) {
1673       for (Object** p = start; p < end; p++) {
1674         if ((*p)->IsExternalString()) {
1675           ASSERT((*p)->IsInternalizedString());
1676           visitor_->VisitExternalString(Utils::ToLocal(
1677               Handle<String>(String::cast(*p))));
1678         }
1679       }
1680     }
1681    private:
1682     v8::ExternalResourceVisitor* visitor_;
1683   } string_table_visitor(visitor);
1684
1685   string_table()->IterateElements(&string_table_visitor);
1686 }
1687
1688
1689 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1690  public:
1691   static inline void VisitPointer(Heap* heap, Object** p) {
1692     Object* object = *p;
1693     if (!heap->InNewSpace(object)) return;
1694     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1695                          reinterpret_cast<HeapObject*>(object));
1696   }
1697 };
1698
1699
1700 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1701                          Address new_space_front) {
1702   do {
1703     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1704     // The addresses new_space_front and new_space_.top() define a
1705     // queue of unprocessed copied objects.  Process them until the
1706     // queue is empty.
1707     while (new_space_front != new_space_.top()) {
1708       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1709         HeapObject* object = HeapObject::FromAddress(new_space_front);
1710         new_space_front +=
1711           NewSpaceScavenger::IterateBody(object->map(), object);
1712       } else {
1713         new_space_front =
1714             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1715       }
1716     }
1717
1718     // Promote and process all the to-be-promoted objects.
1719     {
1720       StoreBufferRebuildScope scope(this,
1721                                     store_buffer(),
1722                                     &ScavengeStoreBufferCallback);
1723       while (!promotion_queue()->is_empty()) {
1724         HeapObject* target;
1725         int size;
1726         promotion_queue()->remove(&target, &size);
1727
1728         // Promoted object might be already partially visited
1729         // during old space pointer iteration. Thus we search specificly
1730         // for pointers to from semispace instead of looking for pointers
1731         // to new space.
1732         ASSERT(!target->IsMap());
1733         IterateAndMarkPointersToFromSpace(target->address(),
1734                                           target->address() + size,
1735                                           &ScavengeObject);
1736       }
1737     }
1738
1739     // Take another spin if there are now unswept objects in new space
1740     // (there are currently no more unswept promoted objects).
1741   } while (new_space_front != new_space_.top());
1742
1743   return new_space_front;
1744 }
1745
1746
1747 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1748
1749
1750 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1751                                               HeapObject* object,
1752                                               int size));
1753
1754 static HeapObject* EnsureDoubleAligned(Heap* heap,
1755                                        HeapObject* object,
1756                                        int size) {
1757   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1758     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1759     return HeapObject::FromAddress(object->address() + kPointerSize);
1760   } else {
1761     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1762                                kPointerSize);
1763     return object;
1764   }
1765 }
1766
1767
1768 enum LoggingAndProfiling {
1769   LOGGING_AND_PROFILING_ENABLED,
1770   LOGGING_AND_PROFILING_DISABLED
1771 };
1772
1773
1774 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1775
1776
1777 template<MarksHandling marks_handling,
1778          LoggingAndProfiling logging_and_profiling_mode>
1779 class ScavengingVisitor : public StaticVisitorBase {
1780  public:
1781   static void Initialize() {
1782     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1783     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1784     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1785     table_.Register(kVisitByteArray, &EvacuateByteArray);
1786     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1787     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1788
1789     table_.Register(kVisitNativeContext,
1790                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1791                         template VisitSpecialized<Context::kSize>);
1792
1793     table_.Register(kVisitConsString,
1794                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1795                         template VisitSpecialized<ConsString::kSize>);
1796
1797     table_.Register(kVisitSlicedString,
1798                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1799                         template VisitSpecialized<SlicedString::kSize>);
1800
1801     table_.Register(kVisitSymbol,
1802                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1803                         template VisitSpecialized<Symbol::kSize>);
1804
1805     table_.Register(kVisitSharedFunctionInfo,
1806                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1807                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1808
1809     table_.Register(kVisitJSWeakMap,
1810                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1811                     Visit);
1812
1813     table_.Register(kVisitJSRegExp,
1814                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1815                     Visit);
1816
1817     if (marks_handling == IGNORE_MARKS) {
1818       table_.Register(kVisitJSFunction,
1819                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1820                           template VisitSpecialized<JSFunction::kSize>);
1821     } else {
1822       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1823     }
1824
1825     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1826                                    kVisitDataObject,
1827                                    kVisitDataObjectGeneric>();
1828
1829     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1830                                    kVisitJSObject,
1831                                    kVisitJSObjectGeneric>();
1832
1833     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1834                                    kVisitStruct,
1835                                    kVisitStructGeneric>();
1836   }
1837
1838   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1839     return &table_;
1840   }
1841
1842  private:
1843   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1844   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1845
1846   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1847     bool should_record = false;
1848 #ifdef DEBUG
1849     should_record = FLAG_heap_stats;
1850 #endif
1851     should_record = should_record || FLAG_log_gc;
1852     if (should_record) {
1853       if (heap->new_space()->Contains(obj)) {
1854         heap->new_space()->RecordAllocation(obj);
1855       } else {
1856         heap->new_space()->RecordPromotion(obj);
1857       }
1858     }
1859   }
1860
1861   // Helper function used by CopyObject to copy a source object to an
1862   // allocated target object and update the forwarding pointer in the source
1863   // object.  Returns the target object.
1864   INLINE(static void MigrateObject(Heap* heap,
1865                                    HeapObject* source,
1866                                    HeapObject* target,
1867                                    int size)) {
1868     // Copy the content of source to target.
1869     heap->CopyBlock(target->address(), source->address(), size);
1870
1871     // Set the forwarding address.
1872     source->set_map_word(MapWord::FromForwardingAddress(target));
1873
1874     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1875       // Update NewSpace stats if necessary.
1876       RecordCopiedObject(heap, target);
1877       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1878       Isolate* isolate = heap->isolate();
1879       if (isolate->logger()->is_logging_code_events() ||
1880           isolate->cpu_profiler()->is_profiling()) {
1881         if (target->IsSharedFunctionInfo()) {
1882           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1883               source->address(), target->address()));
1884         }
1885       }
1886     }
1887
1888     if (marks_handling == TRANSFER_MARKS) {
1889       if (Marking::TransferColor(source, target)) {
1890         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1891       }
1892     }
1893   }
1894
1895
1896   template<ObjectContents object_contents,
1897            SizeRestriction size_restriction,
1898            int alignment>
1899   static inline void EvacuateObject(Map* map,
1900                                     HeapObject** slot,
1901                                     HeapObject* object,
1902                                     int object_size) {
1903     SLOW_ASSERT((size_restriction != SMALL) ||
1904                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1905     SLOW_ASSERT(object->Size() == object_size);
1906
1907     int allocation_size = object_size;
1908     if (alignment != kObjectAlignment) {
1909       ASSERT(alignment == kDoubleAlignment);
1910       allocation_size += kPointerSize;
1911     }
1912
1913     Heap* heap = map->GetHeap();
1914     if (heap->ShouldBePromoted(object->address(), object_size)) {
1915       MaybeObject* maybe_result;
1916
1917       if ((size_restriction != SMALL) &&
1918           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1919         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1920                                                      NOT_EXECUTABLE);
1921       } else {
1922         if (object_contents == DATA_OBJECT) {
1923           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1924         } else {
1925           maybe_result =
1926               heap->old_pointer_space()->AllocateRaw(allocation_size);
1927         }
1928       }
1929
1930       Object* result = NULL;  // Initialization to please compiler.
1931       if (maybe_result->ToObject(&result)) {
1932         HeapObject* target = HeapObject::cast(result);
1933
1934         if (alignment != kObjectAlignment) {
1935           target = EnsureDoubleAligned(heap, target, allocation_size);
1936         }
1937
1938         // Order is important: slot might be inside of the target if target
1939         // was allocated over a dead object and slot comes from the store
1940         // buffer.
1941         *slot = target;
1942         MigrateObject(heap, object, target, object_size);
1943
1944         if (object_contents == POINTER_OBJECT) {
1945           if (map->instance_type() == JS_FUNCTION_TYPE) {
1946             heap->promotion_queue()->insert(
1947                 target, JSFunction::kNonWeakFieldsEndOffset);
1948           } else {
1949             heap->promotion_queue()->insert(target, object_size);
1950           }
1951         }
1952
1953         heap->tracer()->increment_promoted_objects_size(object_size);
1954         return;
1955       }
1956     }
1957     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1958     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1959     Object* result = allocation->ToObjectUnchecked();
1960     HeapObject* target = HeapObject::cast(result);
1961
1962     if (alignment != kObjectAlignment) {
1963       target = EnsureDoubleAligned(heap, target, allocation_size);
1964     }
1965
1966     // Order is important: slot might be inside of the target if target
1967     // was allocated over a dead object and slot comes from the store
1968     // buffer.
1969     *slot = target;
1970     MigrateObject(heap, object, target, object_size);
1971     return;
1972   }
1973
1974
1975   static inline void EvacuateJSFunction(Map* map,
1976                                         HeapObject** slot,
1977                                         HeapObject* object) {
1978     ObjectEvacuationStrategy<POINTER_OBJECT>::
1979         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1980
1981     HeapObject* target = *slot;
1982     MarkBit mark_bit = Marking::MarkBitFrom(target);
1983     if (Marking::IsBlack(mark_bit)) {
1984       // This object is black and it might not be rescanned by marker.
1985       // We should explicitly record code entry slot for compaction because
1986       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1987       // miss it as it is not HeapObject-tagged.
1988       Address code_entry_slot =
1989           target->address() + JSFunction::kCodeEntryOffset;
1990       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1991       map->GetHeap()->mark_compact_collector()->
1992           RecordCodeEntrySlot(code_entry_slot, code);
1993     }
1994   }
1995
1996
1997   static inline void EvacuateFixedArray(Map* map,
1998                                         HeapObject** slot,
1999                                         HeapObject* object) {
2000     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2001     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2002                                                  slot,
2003                                                  object,
2004                                                  object_size);
2005   }
2006
2007
2008   static inline void EvacuateFixedDoubleArray(Map* map,
2009                                               HeapObject** slot,
2010                                               HeapObject* object) {
2011     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2012     int object_size = FixedDoubleArray::SizeFor(length);
2013     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2014         map,
2015         slot,
2016         object,
2017         object_size);
2018   }
2019
2020
2021   static inline void EvacuateByteArray(Map* map,
2022                                        HeapObject** slot,
2023                                        HeapObject* object) {
2024     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2025     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2026         map, slot, object, object_size);
2027   }
2028
2029
2030   static inline void EvacuateSeqOneByteString(Map* map,
2031                                             HeapObject** slot,
2032                                             HeapObject* object) {
2033     int object_size = SeqOneByteString::cast(object)->
2034         SeqOneByteStringSize(map->instance_type());
2035     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2036         map, slot, object, object_size);
2037   }
2038
2039
2040   static inline void EvacuateSeqTwoByteString(Map* map,
2041                                               HeapObject** slot,
2042                                               HeapObject* object) {
2043     int object_size = SeqTwoByteString::cast(object)->
2044         SeqTwoByteStringSize(map->instance_type());
2045     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2046         map, slot, object, object_size);
2047   }
2048
2049
2050   static inline bool IsShortcutCandidate(int type) {
2051     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2052   }
2053
2054   static inline void EvacuateShortcutCandidate(Map* map,
2055                                                HeapObject** slot,
2056                                                HeapObject* object) {
2057     ASSERT(IsShortcutCandidate(map->instance_type()));
2058
2059     Heap* heap = map->GetHeap();
2060
2061     if (marks_handling == IGNORE_MARKS &&
2062         ConsString::cast(object)->unchecked_second() ==
2063         heap->empty_string()) {
2064       HeapObject* first =
2065           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2066
2067       *slot = first;
2068
2069       if (!heap->InNewSpace(first)) {
2070         object->set_map_word(MapWord::FromForwardingAddress(first));
2071         return;
2072       }
2073
2074       MapWord first_word = first->map_word();
2075       if (first_word.IsForwardingAddress()) {
2076         HeapObject* target = first_word.ToForwardingAddress();
2077
2078         *slot = target;
2079         object->set_map_word(MapWord::FromForwardingAddress(target));
2080         return;
2081       }
2082
2083       heap->DoScavengeObject(first->map(), slot, first);
2084       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2085       return;
2086     }
2087
2088     int object_size = ConsString::kSize;
2089     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2090         map, slot, object, object_size);
2091   }
2092
2093   template<ObjectContents object_contents>
2094   class ObjectEvacuationStrategy {
2095    public:
2096     template<int object_size>
2097     static inline void VisitSpecialized(Map* map,
2098                                         HeapObject** slot,
2099                                         HeapObject* object) {
2100       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2101           map, slot, object, object_size);
2102     }
2103
2104     static inline void Visit(Map* map,
2105                              HeapObject** slot,
2106                              HeapObject* object) {
2107       int object_size = map->instance_size();
2108       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2109           map, slot, object, object_size);
2110     }
2111   };
2112
2113   static VisitorDispatchTable<ScavengingCallback> table_;
2114 };
2115
2116
2117 template<MarksHandling marks_handling,
2118          LoggingAndProfiling logging_and_profiling_mode>
2119 VisitorDispatchTable<ScavengingCallback>
2120     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2121
2122
2123 static void InitializeScavengingVisitorsTables() {
2124   ScavengingVisitor<TRANSFER_MARKS,
2125                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2126   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2127   ScavengingVisitor<TRANSFER_MARKS,
2128                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2129   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2130 }
2131
2132
2133 void Heap::SelectScavengingVisitorsTable() {
2134   bool logging_and_profiling =
2135       isolate()->logger()->is_logging() ||
2136       isolate()->cpu_profiler()->is_profiling() ||
2137       (isolate()->heap_profiler() != NULL &&
2138        isolate()->heap_profiler()->is_profiling());
2139
2140   if (!incremental_marking()->IsMarking()) {
2141     if (!logging_and_profiling) {
2142       scavenging_visitors_table_.CopyFrom(
2143           ScavengingVisitor<IGNORE_MARKS,
2144                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2145     } else {
2146       scavenging_visitors_table_.CopyFrom(
2147           ScavengingVisitor<IGNORE_MARKS,
2148                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2149     }
2150   } else {
2151     if (!logging_and_profiling) {
2152       scavenging_visitors_table_.CopyFrom(
2153           ScavengingVisitor<TRANSFER_MARKS,
2154                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2155     } else {
2156       scavenging_visitors_table_.CopyFrom(
2157           ScavengingVisitor<TRANSFER_MARKS,
2158                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2159     }
2160
2161     if (incremental_marking()->IsCompacting()) {
2162       // When compacting forbid short-circuiting of cons-strings.
2163       // Scavenging code relies on the fact that new space object
2164       // can't be evacuated into evacuation candidate but
2165       // short-circuiting violates this assumption.
2166       scavenging_visitors_table_.Register(
2167           StaticVisitorBase::kVisitShortcutCandidate,
2168           scavenging_visitors_table_.GetVisitorById(
2169               StaticVisitorBase::kVisitConsString));
2170     }
2171   }
2172 }
2173
2174
2175 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2176   SLOW_ASSERT(HEAP->InFromSpace(object));
2177   MapWord first_word = object->map_word();
2178   SLOW_ASSERT(!first_word.IsForwardingAddress());
2179   Map* map = first_word.ToMap();
2180   map->GetHeap()->DoScavengeObject(map, p, object);
2181 }
2182
2183
2184 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2185                                       int instance_size) {
2186   Object* result;
2187   MaybeObject* maybe_result = AllocateRawMap();
2188   if (!maybe_result->ToObject(&result)) return maybe_result;
2189
2190   // Map::cast cannot be used due to uninitialized map field.
2191   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2192   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2193   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2194   reinterpret_cast<Map*>(result)->set_visitor_id(
2195         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2196   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2197   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2198   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2199   reinterpret_cast<Map*>(result)->set_bit_field(0);
2200   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2201   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2202                    Map::OwnsDescriptors::encode(true);
2203   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2204   return result;
2205 }
2206
2207
2208 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2209                                int instance_size,
2210                                ElementsKind elements_kind) {
2211   Object* result;
2212   MaybeObject* maybe_result = AllocateRawMap();
2213   if (!maybe_result->To(&result)) return maybe_result;
2214
2215   Map* map = reinterpret_cast<Map*>(result);
2216   map->set_map_no_write_barrier(meta_map());
2217   map->set_instance_type(instance_type);
2218   map->set_visitor_id(
2219       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2220   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2221   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2222   map->set_instance_size(instance_size);
2223   map->set_inobject_properties(0);
2224   map->set_pre_allocated_property_fields(0);
2225   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2226   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2227                           SKIP_WRITE_BARRIER);
2228   map->init_back_pointer(undefined_value());
2229   map->set_unused_property_fields(0);
2230   map->set_instance_descriptors(empty_descriptor_array());
2231   map->set_bit_field(0);
2232   map->set_bit_field2(1 << Map::kIsExtensible);
2233   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2234                    Map::OwnsDescriptors::encode(true);
2235   map->set_bit_field3(bit_field3);
2236   map->set_elements_kind(elements_kind);
2237
2238   return map;
2239 }
2240
2241
2242 MaybeObject* Heap::AllocateCodeCache() {
2243   CodeCache* code_cache;
2244   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2245     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2246   }
2247   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2248   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2249   return code_cache;
2250 }
2251
2252
2253 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2254   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2255 }
2256
2257
2258 MaybeObject* Heap::AllocateAccessorPair() {
2259   AccessorPair* accessors;
2260   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2261     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2262   }
2263   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2264   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2265   return accessors;
2266 }
2267
2268
2269 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2270   TypeFeedbackInfo* info;
2271   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2272     if (!maybe_info->To(&info)) return maybe_info;
2273   }
2274   info->initialize_storage();
2275   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2276                                 SKIP_WRITE_BARRIER);
2277   return info;
2278 }
2279
2280
2281 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2282   AliasedArgumentsEntry* entry;
2283   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2284     if (!maybe_entry->To(&entry)) return maybe_entry;
2285   }
2286   entry->set_aliased_context_slot(aliased_context_slot);
2287   return entry;
2288 }
2289
2290
2291 const Heap::StringTypeTable Heap::string_type_table[] = {
2292 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2293   {type, size, k##camel_name##MapRootIndex},
2294   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2295 #undef STRING_TYPE_ELEMENT
2296 };
2297
2298
2299 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2300 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2301   {contents, k##name##RootIndex},
2302   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2303 #undef CONSTANT_STRING_ELEMENT
2304 };
2305
2306
2307 const Heap::StructTable Heap::struct_table[] = {
2308 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2309   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2310   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2311 #undef STRUCT_TABLE_ELEMENT
2312 };
2313
2314
2315 bool Heap::CreateInitialMaps() {
2316   Object* obj;
2317   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2318     if (!maybe_obj->ToObject(&obj)) return false;
2319   }
2320   // Map::cast cannot be used due to uninitialized map field.
2321   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2322   set_meta_map(new_meta_map);
2323   new_meta_map->set_map(new_meta_map);
2324
2325   { MaybeObject* maybe_obj =
2326         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2327     if (!maybe_obj->ToObject(&obj)) return false;
2328   }
2329   set_fixed_array_map(Map::cast(obj));
2330
2331   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2332     if (!maybe_obj->ToObject(&obj)) return false;
2333   }
2334   set_oddball_map(Map::cast(obj));
2335
2336   // Allocate the empty array.
2337   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2338     if (!maybe_obj->ToObject(&obj)) return false;
2339   }
2340   set_empty_fixed_array(FixedArray::cast(obj));
2341
2342   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2343     if (!maybe_obj->ToObject(&obj)) return false;
2344   }
2345   set_null_value(Oddball::cast(obj));
2346   Oddball::cast(obj)->set_kind(Oddball::kNull);
2347
2348   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2349     if (!maybe_obj->ToObject(&obj)) return false;
2350   }
2351   set_undefined_value(Oddball::cast(obj));
2352   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2353   ASSERT(!InNewSpace(undefined_value()));
2354
2355   // Allocate the empty descriptor array.
2356   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2357     if (!maybe_obj->ToObject(&obj)) return false;
2358   }
2359   set_empty_descriptor_array(DescriptorArray::cast(obj));
2360
2361   // Fix the instance_descriptors for the existing maps.
2362   meta_map()->set_code_cache(empty_fixed_array());
2363   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2364   meta_map()->init_back_pointer(undefined_value());
2365   meta_map()->set_instance_descriptors(empty_descriptor_array());
2366
2367   fixed_array_map()->set_code_cache(empty_fixed_array());
2368   fixed_array_map()->set_dependent_code(
2369       DependentCode::cast(empty_fixed_array()));
2370   fixed_array_map()->init_back_pointer(undefined_value());
2371   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2372
2373   oddball_map()->set_code_cache(empty_fixed_array());
2374   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2375   oddball_map()->init_back_pointer(undefined_value());
2376   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2377
2378   // Fix prototype object for existing maps.
2379   meta_map()->set_prototype(null_value());
2380   meta_map()->set_constructor(null_value());
2381
2382   fixed_array_map()->set_prototype(null_value());
2383   fixed_array_map()->set_constructor(null_value());
2384
2385   oddball_map()->set_prototype(null_value());
2386   oddball_map()->set_constructor(null_value());
2387
2388   { MaybeObject* maybe_obj =
2389         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2390     if (!maybe_obj->ToObject(&obj)) return false;
2391   }
2392   set_fixed_cow_array_map(Map::cast(obj));
2393   ASSERT(fixed_array_map() != fixed_cow_array_map());
2394
2395   { MaybeObject* maybe_obj =
2396         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2397     if (!maybe_obj->ToObject(&obj)) return false;
2398   }
2399   set_scope_info_map(Map::cast(obj));
2400
2401   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2402     if (!maybe_obj->ToObject(&obj)) return false;
2403   }
2404   set_heap_number_map(Map::cast(obj));
2405
2406   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2407     if (!maybe_obj->ToObject(&obj)) return false;
2408   }
2409   set_symbol_map(Map::cast(obj));
2410
2411   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2412     if (!maybe_obj->ToObject(&obj)) return false;
2413   }
2414   set_foreign_map(Map::cast(obj));
2415
2416   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2417     const StringTypeTable& entry = string_type_table[i];
2418     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2419       if (!maybe_obj->ToObject(&obj)) return false;
2420     }
2421     roots_[entry.index] = Map::cast(obj);
2422   }
2423
2424   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2425     if (!maybe_obj->ToObject(&obj)) return false;
2426   }
2427   set_undetectable_string_map(Map::cast(obj));
2428   Map::cast(obj)->set_is_undetectable();
2429
2430   { MaybeObject* maybe_obj =
2431         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2432     if (!maybe_obj->ToObject(&obj)) return false;
2433   }
2434   set_undetectable_ascii_string_map(Map::cast(obj));
2435   Map::cast(obj)->set_is_undetectable();
2436
2437   { MaybeObject* maybe_obj =
2438         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2439     if (!maybe_obj->ToObject(&obj)) return false;
2440   }
2441   set_fixed_double_array_map(Map::cast(obj));
2442
2443   { MaybeObject* maybe_obj =
2444         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2445     if (!maybe_obj->ToObject(&obj)) return false;
2446   }
2447   set_byte_array_map(Map::cast(obj));
2448
2449   { MaybeObject* maybe_obj =
2450         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2451     if (!maybe_obj->ToObject(&obj)) return false;
2452   }
2453   set_free_space_map(Map::cast(obj));
2454
2455   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2456     if (!maybe_obj->ToObject(&obj)) return false;
2457   }
2458   set_empty_byte_array(ByteArray::cast(obj));
2459
2460   { MaybeObject* maybe_obj =
2461         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2462     if (!maybe_obj->ToObject(&obj)) return false;
2463   }
2464   set_external_pixel_array_map(Map::cast(obj));
2465
2466   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2467                                          ExternalArray::kAlignedSize);
2468     if (!maybe_obj->ToObject(&obj)) return false;
2469   }
2470   set_external_byte_array_map(Map::cast(obj));
2471
2472   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2473                                          ExternalArray::kAlignedSize);
2474     if (!maybe_obj->ToObject(&obj)) return false;
2475   }
2476   set_external_unsigned_byte_array_map(Map::cast(obj));
2477
2478   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2479                                          ExternalArray::kAlignedSize);
2480     if (!maybe_obj->ToObject(&obj)) return false;
2481   }
2482   set_external_short_array_map(Map::cast(obj));
2483
2484   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2485                                          ExternalArray::kAlignedSize);
2486     if (!maybe_obj->ToObject(&obj)) return false;
2487   }
2488   set_external_unsigned_short_array_map(Map::cast(obj));
2489
2490   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2491                                          ExternalArray::kAlignedSize);
2492     if (!maybe_obj->ToObject(&obj)) return false;
2493   }
2494   set_external_int_array_map(Map::cast(obj));
2495
2496   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2497                                          ExternalArray::kAlignedSize);
2498     if (!maybe_obj->ToObject(&obj)) return false;
2499   }
2500   set_external_unsigned_int_array_map(Map::cast(obj));
2501
2502   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2503                                          ExternalArray::kAlignedSize);
2504     if (!maybe_obj->ToObject(&obj)) return false;
2505   }
2506   set_external_float_array_map(Map::cast(obj));
2507
2508   { MaybeObject* maybe_obj =
2509         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2510     if (!maybe_obj->ToObject(&obj)) return false;
2511   }
2512   set_non_strict_arguments_elements_map(Map::cast(obj));
2513
2514   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2515                                          ExternalArray::kAlignedSize);
2516     if (!maybe_obj->ToObject(&obj)) return false;
2517   }
2518   set_external_double_array_map(Map::cast(obj));
2519
2520   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2521     if (!maybe_obj->ToObject(&obj)) return false;
2522   }
2523   set_code_map(Map::cast(obj));
2524
2525   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2526                                          JSGlobalPropertyCell::kSize);
2527     if (!maybe_obj->ToObject(&obj)) return false;
2528   }
2529   set_global_property_cell_map(Map::cast(obj));
2530
2531   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2532     if (!maybe_obj->ToObject(&obj)) return false;
2533   }
2534   set_one_pointer_filler_map(Map::cast(obj));
2535
2536   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2537     if (!maybe_obj->ToObject(&obj)) return false;
2538   }
2539   set_two_pointer_filler_map(Map::cast(obj));
2540
2541   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2542     const StructTable& entry = struct_table[i];
2543     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2544       if (!maybe_obj->ToObject(&obj)) return false;
2545     }
2546     roots_[entry.index] = Map::cast(obj);
2547   }
2548
2549   { MaybeObject* maybe_obj =
2550         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2551     if (!maybe_obj->ToObject(&obj)) return false;
2552   }
2553   set_hash_table_map(Map::cast(obj));
2554
2555   { MaybeObject* maybe_obj =
2556         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557     if (!maybe_obj->ToObject(&obj)) return false;
2558   }
2559   set_function_context_map(Map::cast(obj));
2560
2561   { MaybeObject* maybe_obj =
2562         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2563     if (!maybe_obj->ToObject(&obj)) return false;
2564   }
2565   set_catch_context_map(Map::cast(obj));
2566
2567   { MaybeObject* maybe_obj =
2568         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2569     if (!maybe_obj->ToObject(&obj)) return false;
2570   }
2571   set_with_context_map(Map::cast(obj));
2572
2573   { MaybeObject* maybe_obj =
2574         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2575     if (!maybe_obj->ToObject(&obj)) return false;
2576   }
2577   set_block_context_map(Map::cast(obj));
2578
2579   { MaybeObject* maybe_obj =
2580         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2581     if (!maybe_obj->ToObject(&obj)) return false;
2582   }
2583   set_module_context_map(Map::cast(obj));
2584
2585   { MaybeObject* maybe_obj =
2586         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2587     if (!maybe_obj->ToObject(&obj)) return false;
2588   }
2589   set_global_context_map(Map::cast(obj));
2590
2591   { MaybeObject* maybe_obj =
2592         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2593     if (!maybe_obj->ToObject(&obj)) return false;
2594   }
2595   Map* native_context_map = Map::cast(obj);
2596   native_context_map->set_dictionary_map(true);
2597   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2598   set_native_context_map(native_context_map);
2599
2600   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2601                                          SharedFunctionInfo::kAlignedSize);
2602     if (!maybe_obj->ToObject(&obj)) return false;
2603   }
2604   set_shared_function_info_map(Map::cast(obj));
2605
2606   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2607                                          JSMessageObject::kSize);
2608     if (!maybe_obj->ToObject(&obj)) return false;
2609   }
2610   set_message_object_map(Map::cast(obj));
2611
2612   Map* external_map;
2613   { MaybeObject* maybe_obj =
2614         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2615     if (!maybe_obj->To(&external_map)) return false;
2616   }
2617   external_map->set_is_extensible(false);
2618   set_external_map(external_map);
2619
2620   ASSERT(!InNewSpace(empty_fixed_array()));
2621   return true;
2622 }
2623
2624
2625 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2626   // Statically ensure that it is safe to allocate heap numbers in paged
2627   // spaces.
2628   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2629   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2630
2631   Object* result;
2632   { MaybeObject* maybe_result =
2633         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2634     if (!maybe_result->ToObject(&result)) return maybe_result;
2635   }
2636
2637   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2638   HeapNumber::cast(result)->set_value(value);
2639   return result;
2640 }
2641
2642
2643 MaybeObject* Heap::AllocateHeapNumber(double value) {
2644   // Use general version, if we're forced to always allocate.
2645   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2646
2647   // This version of AllocateHeapNumber is optimized for
2648   // allocation in new space.
2649   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2650   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2651   Object* result;
2652   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2653     if (!maybe_result->ToObject(&result)) return maybe_result;
2654   }
2655   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2656   HeapNumber::cast(result)->set_value(value);
2657   return result;
2658 }
2659
2660
2661 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2662   Object* result;
2663   { MaybeObject* maybe_result = AllocateRawCell();
2664     if (!maybe_result->ToObject(&result)) return maybe_result;
2665   }
2666   HeapObject::cast(result)->set_map_no_write_barrier(
2667       global_property_cell_map());
2668   JSGlobalPropertyCell::cast(result)->set_value(value);
2669   return result;
2670 }
2671
2672
2673 MaybeObject* Heap::CreateOddball(const char* to_string,
2674                                  Object* to_number,
2675                                  byte kind) {
2676   Object* result;
2677   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2678     if (!maybe_result->ToObject(&result)) return maybe_result;
2679   }
2680   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2681 }
2682
2683
2684 bool Heap::CreateApiObjects() {
2685   Object* obj;
2686
2687   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2688     if (!maybe_obj->ToObject(&obj)) return false;
2689   }
2690   // Don't use Smi-only elements optimizations for objects with the neander
2691   // map. There are too many cases where element values are set directly with a
2692   // bottleneck to trap the Smi-only -> fast elements transition, and there
2693   // appears to be no benefit for optimize this case.
2694   Map* new_neander_map = Map::cast(obj);
2695   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2696   set_neander_map(new_neander_map);
2697
2698   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2699     if (!maybe_obj->ToObject(&obj)) return false;
2700   }
2701   Object* elements;
2702   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2703     if (!maybe_elements->ToObject(&elements)) return false;
2704   }
2705   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2706   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2707   set_message_listeners(JSObject::cast(obj));
2708
2709   return true;
2710 }
2711
2712
2713 void Heap::CreateJSEntryStub() {
2714   JSEntryStub stub;
2715   set_js_entry_code(*stub.GetCode(isolate()));
2716 }
2717
2718
2719 void Heap::CreateJSConstructEntryStub() {
2720   JSConstructEntryStub stub;
2721   set_js_construct_entry_code(*stub.GetCode(isolate()));
2722 }
2723
2724
2725 void Heap::CreateFixedStubs() {
2726   // Here we create roots for fixed stubs. They are needed at GC
2727   // for cooking and uncooking (check out frames.cc).
2728   // The eliminates the need for doing dictionary lookup in the
2729   // stub cache for these stubs.
2730   HandleScope scope(isolate());
2731   // gcc-4.4 has problem generating correct code of following snippet:
2732   // {  JSEntryStub stub;
2733   //    js_entry_code_ = *stub.GetCode();
2734   // }
2735   // {  JSConstructEntryStub stub;
2736   //    js_construct_entry_code_ = *stub.GetCode();
2737   // }
2738   // To workaround the problem, make separate functions without inlining.
2739   Heap::CreateJSEntryStub();
2740   Heap::CreateJSConstructEntryStub();
2741
2742   // Create stubs that should be there, so we don't unexpectedly have to
2743   // create them if we need them during the creation of another stub.
2744   // Stub creation mixes raw pointers and handles in an unsafe manner so
2745   // we cannot create stubs while we are creating stubs.
2746   CodeStub::GenerateStubsAheadOfTime(isolate());
2747 }
2748
2749
2750 bool Heap::CreateInitialObjects() {
2751   Object* obj;
2752
2753   // The -0 value must be set before NumberFromDouble works.
2754   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2755     if (!maybe_obj->ToObject(&obj)) return false;
2756   }
2757   set_minus_zero_value(HeapNumber::cast(obj));
2758   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2759
2760   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2761     if (!maybe_obj->ToObject(&obj)) return false;
2762   }
2763   set_nan_value(HeapNumber::cast(obj));
2764
2765   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2766     if (!maybe_obj->ToObject(&obj)) return false;
2767   }
2768   set_infinity_value(HeapNumber::cast(obj));
2769
2770   // The hole has not been created yet, but we want to put something
2771   // predictable in the gaps in the string table, so lets make that Smi zero.
2772   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2773
2774   // Allocate initial string table.
2775   { MaybeObject* maybe_obj =
2776         StringTable::Allocate(this, kInitialStringTableSize);
2777     if (!maybe_obj->ToObject(&obj)) return false;
2778   }
2779   // Don't use set_string_table() due to asserts.
2780   roots_[kStringTableRootIndex] = obj;
2781
2782   // Finish initializing oddballs after creating the string table.
2783   { MaybeObject* maybe_obj =
2784         undefined_value()->Initialize("undefined",
2785                                       nan_value(),
2786                                       Oddball::kUndefined);
2787     if (!maybe_obj->ToObject(&obj)) return false;
2788   }
2789
2790   // Initialize the null_value.
2791   { MaybeObject* maybe_obj =
2792         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2793     if (!maybe_obj->ToObject(&obj)) return false;
2794   }
2795
2796   { MaybeObject* maybe_obj = CreateOddball("true",
2797                                            Smi::FromInt(1),
2798                                            Oddball::kTrue);
2799     if (!maybe_obj->ToObject(&obj)) return false;
2800   }
2801   set_true_value(Oddball::cast(obj));
2802
2803   { MaybeObject* maybe_obj = CreateOddball("false",
2804                                            Smi::FromInt(0),
2805                                            Oddball::kFalse);
2806     if (!maybe_obj->ToObject(&obj)) return false;
2807   }
2808   set_false_value(Oddball::cast(obj));
2809
2810   { MaybeObject* maybe_obj = CreateOddball("hole",
2811                                            Smi::FromInt(-1),
2812                                            Oddball::kTheHole);
2813     if (!maybe_obj->ToObject(&obj)) return false;
2814   }
2815   set_the_hole_value(Oddball::cast(obj));
2816
2817   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2818                                            Smi::FromInt(-4),
2819                                            Oddball::kArgumentMarker);
2820     if (!maybe_obj->ToObject(&obj)) return false;
2821   }
2822   set_arguments_marker(Oddball::cast(obj));
2823
2824   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2825                                            Smi::FromInt(-2),
2826                                            Oddball::kOther);
2827     if (!maybe_obj->ToObject(&obj)) return false;
2828   }
2829   set_no_interceptor_result_sentinel(obj);
2830
2831   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2832                                            Smi::FromInt(-3),
2833                                            Oddball::kOther);
2834     if (!maybe_obj->ToObject(&obj)) return false;
2835   }
2836   set_termination_exception(obj);
2837
2838   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2839     { MaybeObject* maybe_obj =
2840           InternalizeUtf8String(constant_string_table[i].contents);
2841       if (!maybe_obj->ToObject(&obj)) return false;
2842     }
2843     roots_[constant_string_table[i].index] = String::cast(obj);
2844   }
2845
2846   // Allocate the hidden string which is used to identify the hidden properties
2847   // in JSObjects. The hash code has a special value so that it will not match
2848   // the empty string when searching for the property. It cannot be part of the
2849   // loop above because it needs to be allocated manually with the special
2850   // hash code in place. The hash code for the hidden_string is zero to ensure
2851   // that it will always be at the first entry in property descriptors.
2852   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
2853       OneByteVector("", 0), String::kEmptyStringHash);
2854     if (!maybe_obj->ToObject(&obj)) return false;
2855   }
2856   hidden_string_ = String::cast(obj);
2857
2858   // Allocate the code_stubs dictionary. The initial size is set to avoid
2859   // expanding the dictionary during bootstrapping.
2860   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
2861     if (!maybe_obj->ToObject(&obj)) return false;
2862   }
2863   set_code_stubs(UnseededNumberDictionary::cast(obj));
2864
2865
2866   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2867   // is set to avoid expanding the dictionary during bootstrapping.
2868   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
2869     if (!maybe_obj->ToObject(&obj)) return false;
2870   }
2871   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2872
2873   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2874     if (!maybe_obj->ToObject(&obj)) return false;
2875   }
2876   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2877
2878   set_instanceof_cache_function(Smi::FromInt(0));
2879   set_instanceof_cache_map(Smi::FromInt(0));
2880   set_instanceof_cache_answer(Smi::FromInt(0));
2881
2882   CreateFixedStubs();
2883
2884   // Allocate the dictionary of intrinsic function names.
2885   { MaybeObject* maybe_obj =
2886         NameDictionary::Allocate(this, Runtime::kNumFunctions);
2887     if (!maybe_obj->ToObject(&obj)) return false;
2888   }
2889   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2890                                                                        obj);
2891     if (!maybe_obj->ToObject(&obj)) return false;
2892   }
2893   set_intrinsic_function_names(NameDictionary::cast(obj));
2894
2895   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2896     if (!maybe_obj->ToObject(&obj)) return false;
2897   }
2898   set_number_string_cache(FixedArray::cast(obj));
2899
2900   // Allocate cache for single character one byte strings.
2901   { MaybeObject* maybe_obj =
2902         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
2903     if (!maybe_obj->ToObject(&obj)) return false;
2904   }
2905   set_single_character_string_cache(FixedArray::cast(obj));
2906
2907   // Allocate cache for string split.
2908   { MaybeObject* maybe_obj = AllocateFixedArray(
2909       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2910     if (!maybe_obj->ToObject(&obj)) return false;
2911   }
2912   set_string_split_cache(FixedArray::cast(obj));
2913
2914   { MaybeObject* maybe_obj = AllocateFixedArray(
2915       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2916     if (!maybe_obj->ToObject(&obj)) return false;
2917   }
2918   set_regexp_multiple_cache(FixedArray::cast(obj));
2919
2920   // Allocate cache for external strings pointing to native source code.
2921   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2922     if (!maybe_obj->ToObject(&obj)) return false;
2923   }
2924   set_natives_source_cache(FixedArray::cast(obj));
2925
2926   // Allocate object to hold object observation state.
2927   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2928     if (!maybe_obj->ToObject(&obj)) return false;
2929   }
2930   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2931     if (!maybe_obj->ToObject(&obj)) return false;
2932   }
2933   set_observation_state(JSObject::cast(obj));
2934
2935   // Handling of script id generation is in FACTORY->NewScript.
2936   set_last_script_id(undefined_value());
2937
2938   // Initialize keyed lookup cache.
2939   isolate_->keyed_lookup_cache()->Clear();
2940
2941   // Initialize context slot cache.
2942   isolate_->context_slot_cache()->Clear();
2943
2944   // Initialize descriptor cache.
2945   isolate_->descriptor_lookup_cache()->Clear();
2946
2947   // Initialize compilation cache.
2948   isolate_->compilation_cache()->Clear();
2949
2950   return true;
2951 }
2952
2953
2954 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2955   RootListIndex writable_roots[] = {
2956     kStoreBufferTopRootIndex,
2957     kStackLimitRootIndex,
2958     kNumberStringCacheRootIndex,
2959     kInstanceofCacheFunctionRootIndex,
2960     kInstanceofCacheMapRootIndex,
2961     kInstanceofCacheAnswerRootIndex,
2962     kCodeStubsRootIndex,
2963     kNonMonomorphicCacheRootIndex,
2964     kPolymorphicCodeCacheRootIndex,
2965     kLastScriptIdRootIndex,
2966     kEmptyScriptRootIndex,
2967     kRealStackLimitRootIndex,
2968     kArgumentsAdaptorDeoptPCOffsetRootIndex,
2969     kConstructStubDeoptPCOffsetRootIndex,
2970     kGetterStubDeoptPCOffsetRootIndex,
2971     kSetterStubDeoptPCOffsetRootIndex,
2972     kStringTableRootIndex,
2973   };
2974
2975   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2976     if (root_index == writable_roots[i])
2977       return true;
2978   }
2979   return false;
2980 }
2981
2982
2983 Object* RegExpResultsCache::Lookup(Heap* heap,
2984                                    String* key_string,
2985                                    Object* key_pattern,
2986                                    ResultsCacheType type) {
2987   FixedArray* cache;
2988   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2989   if (type == STRING_SPLIT_SUBSTRINGS) {
2990     ASSERT(key_pattern->IsString());
2991     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2992     cache = heap->string_split_cache();
2993   } else {
2994     ASSERT(type == REGEXP_MULTIPLE_INDICES);
2995     ASSERT(key_pattern->IsFixedArray());
2996     cache = heap->regexp_multiple_cache();
2997   }
2998
2999   uint32_t hash = key_string->Hash();
3000   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3001       ~(kArrayEntriesPerCacheEntry - 1));
3002   if (cache->get(index + kStringOffset) == key_string &&
3003       cache->get(index + kPatternOffset) == key_pattern) {
3004     return cache->get(index + kArrayOffset);
3005   }
3006   index =
3007       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3008   if (cache->get(index + kStringOffset) == key_string &&
3009       cache->get(index + kPatternOffset) == key_pattern) {
3010     return cache->get(index + kArrayOffset);
3011   }
3012   return Smi::FromInt(0);
3013 }
3014
3015
3016 void RegExpResultsCache::Enter(Heap* heap,
3017                                String* key_string,
3018                                Object* key_pattern,
3019                                FixedArray* value_array,
3020                                ResultsCacheType type) {
3021   FixedArray* cache;
3022   if (!key_string->IsInternalizedString()) return;
3023   if (type == STRING_SPLIT_SUBSTRINGS) {
3024     ASSERT(key_pattern->IsString());
3025     if (!key_pattern->IsInternalizedString()) return;
3026     cache = heap->string_split_cache();
3027   } else {
3028     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3029     ASSERT(key_pattern->IsFixedArray());
3030     cache = heap->regexp_multiple_cache();
3031   }
3032
3033   uint32_t hash = key_string->Hash();
3034   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3035       ~(kArrayEntriesPerCacheEntry - 1));
3036   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3037     cache->set(index + kStringOffset, key_string);
3038     cache->set(index + kPatternOffset, key_pattern);
3039     cache->set(index + kArrayOffset, value_array);
3040   } else {
3041     uint32_t index2 =
3042         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3043     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3044       cache->set(index2 + kStringOffset, key_string);
3045       cache->set(index2 + kPatternOffset, key_pattern);
3046       cache->set(index2 + kArrayOffset, value_array);
3047     } else {
3048       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3049       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3050       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3051       cache->set(index + kStringOffset, key_string);
3052       cache->set(index + kPatternOffset, key_pattern);
3053       cache->set(index + kArrayOffset, value_array);
3054     }
3055   }
3056   // If the array is a reasonably short list of substrings, convert it into a
3057   // list of internalized strings.
3058   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3059     for (int i = 0; i < value_array->length(); i++) {
3060       String* str = String::cast(value_array->get(i));
3061       Object* internalized_str;
3062       MaybeObject* maybe_string = heap->InternalizeString(str);
3063       if (maybe_string->ToObject(&internalized_str)) {
3064         value_array->set(i, internalized_str);
3065       }
3066     }
3067   }
3068   // Convert backing store to a copy-on-write array.
3069   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3070 }
3071
3072
3073 void RegExpResultsCache::Clear(FixedArray* cache) {
3074   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3075     cache->set(i, Smi::FromInt(0));
3076   }
3077 }
3078
3079
3080 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3081   MaybeObject* maybe_obj =
3082       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3083   return maybe_obj;
3084 }
3085
3086
3087 int Heap::FullSizeNumberStringCacheLength() {
3088   // Compute the size of the number string cache based on the max newspace size.
3089   // The number string cache has a minimum size based on twice the initial cache
3090   // size to ensure that it is bigger after being made 'full size'.
3091   int number_string_cache_size = max_semispace_size_ / 512;
3092   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3093                                  Min(0x4000, number_string_cache_size));
3094   // There is a string and a number per entry so the length is twice the number
3095   // of entries.
3096   return number_string_cache_size * 2;
3097 }
3098
3099
3100 void Heap::AllocateFullSizeNumberStringCache() {
3101   // The idea is to have a small number string cache in the snapshot to keep
3102   // boot-time memory usage down.  If we expand the number string cache already
3103   // while creating the snapshot then that didn't work out.
3104   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3105   MaybeObject* maybe_obj =
3106       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3107   Object* new_cache;
3108   if (maybe_obj->ToObject(&new_cache)) {
3109     // We don't bother to repopulate the cache with entries from the old cache.
3110     // It will be repopulated soon enough with new strings.
3111     set_number_string_cache(FixedArray::cast(new_cache));
3112   }
3113   // If allocation fails then we just return without doing anything.  It is only
3114   // a cache, so best effort is OK here.
3115 }
3116
3117
3118 void Heap::FlushNumberStringCache() {
3119   // Flush the number to string cache.
3120   int len = number_string_cache()->length();
3121   for (int i = 0; i < len; i++) {
3122     number_string_cache()->set_undefined(this, i);
3123   }
3124 }
3125
3126
3127 static inline int double_get_hash(double d) {
3128   DoubleRepresentation rep(d);
3129   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3130 }
3131
3132
3133 static inline int smi_get_hash(Smi* smi) {
3134   return smi->value();
3135 }
3136
3137
3138 Object* Heap::GetNumberStringCache(Object* number) {
3139   int hash;
3140   int mask = (number_string_cache()->length() >> 1) - 1;
3141   if (number->IsSmi()) {
3142     hash = smi_get_hash(Smi::cast(number)) & mask;
3143   } else {
3144     hash = double_get_hash(number->Number()) & mask;
3145   }
3146   Object* key = number_string_cache()->get(hash * 2);
3147   if (key == number) {
3148     return String::cast(number_string_cache()->get(hash * 2 + 1));
3149   } else if (key->IsHeapNumber() &&
3150              number->IsHeapNumber() &&
3151              key->Number() == number->Number()) {
3152     return String::cast(number_string_cache()->get(hash * 2 + 1));
3153   }
3154   return undefined_value();
3155 }
3156
3157
3158 void Heap::SetNumberStringCache(Object* number, String* string) {
3159   int hash;
3160   int mask = (number_string_cache()->length() >> 1) - 1;
3161   if (number->IsSmi()) {
3162     hash = smi_get_hash(Smi::cast(number)) & mask;
3163   } else {
3164     hash = double_get_hash(number->Number()) & mask;
3165   }
3166   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3167       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3168     // The first time we have a hash collision, we move to the full sized
3169     // number string cache.
3170     AllocateFullSizeNumberStringCache();
3171     return;
3172   }
3173   number_string_cache()->set(hash * 2, number);
3174   number_string_cache()->set(hash * 2 + 1, string);
3175 }
3176
3177
3178 MaybeObject* Heap::NumberToString(Object* number,
3179                                   bool check_number_string_cache) {
3180   isolate_->counters()->number_to_string_runtime()->Increment();
3181   if (check_number_string_cache) {
3182     Object* cached = GetNumberStringCache(number);
3183     if (cached != undefined_value()) {
3184       return cached;
3185     }
3186   }
3187
3188   char arr[100];
3189   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3190   const char* str;
3191   if (number->IsSmi()) {
3192     int num = Smi::cast(number)->value();
3193     str = IntToCString(num, buffer);
3194   } else {
3195     double num = HeapNumber::cast(number)->value();
3196     str = DoubleToCString(num, buffer);
3197   }
3198
3199   Object* js_string;
3200   MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
3201   if (maybe_js_string->ToObject(&js_string)) {
3202     SetNumberStringCache(number, String::cast(js_string));
3203   }
3204   return maybe_js_string;
3205 }
3206
3207
3208 MaybeObject* Heap::Uint32ToString(uint32_t value,
3209                                   bool check_number_string_cache) {
3210   Object* number;
3211   MaybeObject* maybe = NumberFromUint32(value);
3212   if (!maybe->To<Object>(&number)) return maybe;
3213   return NumberToString(number, check_number_string_cache);
3214 }
3215
3216
3217 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3218   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3219 }
3220
3221
3222 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3223     ExternalArrayType array_type) {
3224   switch (array_type) {
3225     case kExternalByteArray:
3226       return kExternalByteArrayMapRootIndex;
3227     case kExternalUnsignedByteArray:
3228       return kExternalUnsignedByteArrayMapRootIndex;
3229     case kExternalShortArray:
3230       return kExternalShortArrayMapRootIndex;
3231     case kExternalUnsignedShortArray:
3232       return kExternalUnsignedShortArrayMapRootIndex;
3233     case kExternalIntArray:
3234       return kExternalIntArrayMapRootIndex;
3235     case kExternalUnsignedIntArray:
3236       return kExternalUnsignedIntArrayMapRootIndex;
3237     case kExternalFloatArray:
3238       return kExternalFloatArrayMapRootIndex;
3239     case kExternalDoubleArray:
3240       return kExternalDoubleArrayMapRootIndex;
3241     case kExternalPixelArray:
3242       return kExternalPixelArrayMapRootIndex;
3243     default:
3244       UNREACHABLE();
3245       return kUndefinedValueRootIndex;
3246   }
3247 }
3248
3249
3250 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3251   // We need to distinguish the minus zero value and this cannot be
3252   // done after conversion to int. Doing this by comparing bit
3253   // patterns is faster than using fpclassify() et al.
3254   static const DoubleRepresentation minus_zero(-0.0);
3255
3256   DoubleRepresentation rep(value);
3257   if (rep.bits == minus_zero.bits) {
3258     return AllocateHeapNumber(-0.0, pretenure);
3259   }
3260
3261   int int_value = FastD2I(value);
3262   if (value == int_value && Smi::IsValid(int_value)) {
3263     return Smi::FromInt(int_value);
3264   }
3265
3266   // Materialize the value in the heap.
3267   return AllocateHeapNumber(value, pretenure);
3268 }
3269
3270
3271 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3272   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3273   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3274   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3275   Foreign* result;
3276   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3277   if (!maybe_result->To(&result)) return maybe_result;
3278   result->set_foreign_address(address);
3279   return result;
3280 }
3281
3282
3283 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3284   SharedFunctionInfo* share;
3285   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3286   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3287
3288   // Set pointer fields.
3289   share->set_name(name);
3290   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3291   share->set_code(illegal);
3292   share->ClearOptimizedCodeMap();
3293   share->set_scope_info(ScopeInfo::Empty(isolate_));
3294   Code* construct_stub =
3295       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3296   share->set_construct_stub(construct_stub);
3297   share->set_instance_class_name(Object_string());
3298   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3299   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3300   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3301   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3302   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3303   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3304   share->set_ast_node_count(0);
3305   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3306   share->set_counters(0);
3307
3308   // Set integer fields (smi or int, depending on the architecture).
3309   share->set_length(0);
3310   share->set_formal_parameter_count(0);
3311   share->set_expected_nof_properties(0);
3312   share->set_num_literals(0);
3313   share->set_start_position_and_type(0);
3314   share->set_end_position(0);
3315   share->set_function_token_position(0);
3316   // All compiler hints default to false or 0.
3317   share->set_compiler_hints(0);
3318   share->set_this_property_assignments_count(0);
3319   share->set_opt_count(0);
3320
3321   return share;
3322 }
3323
3324
3325 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3326                                            JSArray* arguments,
3327                                            int start_position,
3328                                            int end_position,
3329                                            Object* script,
3330                                            Object* stack_trace,
3331                                            Object* stack_frames) {
3332   Object* result;
3333   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3334     if (!maybe_result->ToObject(&result)) return maybe_result;
3335   }
3336   JSMessageObject* message = JSMessageObject::cast(result);
3337   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3338   message->initialize_elements();
3339   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3340   message->set_type(type);
3341   message->set_arguments(arguments);
3342   message->set_start_position(start_position);
3343   message->set_end_position(end_position);
3344   message->set_script(script);
3345   message->set_stack_trace(stack_trace);
3346   message->set_stack_frames(stack_frames);
3347   return result;
3348 }
3349
3350
3351
3352 // Returns true for a character in a range.  Both limits are inclusive.
3353 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3354   // This makes uses of the the unsigned wraparound.
3355   return character - from <= to - from;
3356 }
3357
3358
3359 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3360     Heap* heap,
3361     uint16_t c1,
3362     uint16_t c2) {
3363   String* result;
3364   // Numeric strings have a different hash algorithm not known by
3365   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3366   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3367       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3368     return result;
3369   // Now we know the length is 2, we might as well make use of that fact
3370   // when building the new string.
3371   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3372     // We can do this.
3373     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3374     Object* result;
3375     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3376       if (!maybe_result->ToObject(&result)) return maybe_result;
3377     }
3378     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3379     dest[0] = static_cast<uint8_t>(c1);
3380     dest[1] = static_cast<uint8_t>(c2);
3381     return result;
3382   } else {
3383     Object* result;
3384     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3385       if (!maybe_result->ToObject(&result)) return maybe_result;
3386     }
3387     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3388     dest[0] = c1;
3389     dest[1] = c2;
3390     return result;
3391   }
3392 }
3393
3394
3395 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3396   int first_length = first->length();
3397   if (first_length == 0) {
3398     return second;
3399   }
3400
3401   int second_length = second->length();
3402   if (second_length == 0) {
3403     return first;
3404   }
3405
3406   int length = first_length + second_length;
3407
3408   // Optimization for 2-byte strings often used as keys in a decompression
3409   // dictionary.  Check whether we already have the string in the string
3410   // table to prevent creation of many unneccesary strings.
3411   if (length == 2) {
3412     uint16_t c1 = first->Get(0);
3413     uint16_t c2 = second->Get(0);
3414     return MakeOrFindTwoCharacterString(this, c1, c2);
3415   }
3416
3417   bool first_is_one_byte = first->IsOneByteRepresentation();
3418   bool second_is_one_byte = second->IsOneByteRepresentation();
3419   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3420   // Make sure that an out of memory exception is thrown if the length
3421   // of the new cons string is too large.
3422   if (length > String::kMaxLength || length < 0) {
3423     isolate()->context()->mark_out_of_memory();
3424     return Failure::OutOfMemoryException(0x4);
3425   }
3426
3427   bool is_one_byte_data_in_two_byte_string = false;
3428   if (!is_one_byte) {
3429     // At least one of the strings uses two-byte representation so we
3430     // can't use the fast case code for short ASCII strings below, but
3431     // we can try to save memory if all chars actually fit in ASCII.
3432     is_one_byte_data_in_two_byte_string =
3433         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3434     if (is_one_byte_data_in_two_byte_string) {
3435       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3436     }
3437   }
3438
3439   // If the resulting string is small make a flat string.
3440   if (length < ConsString::kMinLength) {
3441     // Note that neither of the two inputs can be a slice because:
3442     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3443     ASSERT(first->IsFlat());
3444     ASSERT(second->IsFlat());
3445     if (is_one_byte) {
3446       Object* result;
3447       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3448         if (!maybe_result->ToObject(&result)) return maybe_result;
3449       }
3450       // Copy the characters into the new object.
3451       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3452       // Copy first part.
3453       const uint8_t* src;
3454       if (first->IsExternalString()) {
3455         src = ExternalAsciiString::cast(first)->GetChars();
3456       } else {
3457         src = SeqOneByteString::cast(first)->GetChars();
3458       }
3459       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3460       // Copy second part.
3461       if (second->IsExternalString()) {
3462         src = ExternalAsciiString::cast(second)->GetChars();
3463       } else {
3464         src = SeqOneByteString::cast(second)->GetChars();
3465       }
3466       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3467       return result;
3468     } else {
3469       if (is_one_byte_data_in_two_byte_string) {
3470         Object* result;
3471         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3472           if (!maybe_result->ToObject(&result)) return maybe_result;
3473         }
3474         // Copy the characters into the new object.
3475         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3476         String::WriteToFlat(first, dest, 0, first_length);
3477         String::WriteToFlat(second, dest + first_length, 0, second_length);
3478         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3479         return result;
3480       }
3481
3482       Object* result;
3483       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3484         if (!maybe_result->ToObject(&result)) return maybe_result;
3485       }
3486       // Copy the characters into the new object.
3487       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3488       String::WriteToFlat(first, dest, 0, first_length);
3489       String::WriteToFlat(second, dest + first_length, 0, second_length);
3490       return result;
3491     }
3492   }
3493
3494   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3495       cons_ascii_string_map() : cons_string_map();
3496
3497   Object* result;
3498   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3499     if (!maybe_result->ToObject(&result)) return maybe_result;
3500   }
3501
3502   AssertNoAllocation no_gc;
3503   ConsString* cons_string = ConsString::cast(result);
3504   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3505   cons_string->set_length(length);
3506   cons_string->set_hash_field(String::kEmptyHashField);
3507   cons_string->set_first(first, mode);
3508   cons_string->set_second(second, mode);
3509   return result;
3510 }
3511
3512
3513 MaybeObject* Heap::AllocateSubString(String* buffer,
3514                                      int start,
3515                                      int end,
3516                                      PretenureFlag pretenure) {
3517   int length = end - start;
3518   if (length <= 0) {
3519     return empty_string();
3520   } else if (length == 1) {
3521     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3522   } else if (length == 2) {
3523     // Optimization for 2-byte strings often used as keys in a decompression
3524     // dictionary.  Check whether we already have the string in the string
3525     // table to prevent creation of many unnecessary strings.
3526     uint16_t c1 = buffer->Get(start);
3527     uint16_t c2 = buffer->Get(start + 1);
3528     return MakeOrFindTwoCharacterString(this, c1, c2);
3529   }
3530
3531   // Make an attempt to flatten the buffer to reduce access time.
3532   buffer = buffer->TryFlattenGetString();
3533
3534   if (!FLAG_string_slices ||
3535       !buffer->IsFlat() ||
3536       length < SlicedString::kMinLength ||
3537       pretenure == TENURED) {
3538     Object* result;
3539     // WriteToFlat takes care of the case when an indirect string has a
3540     // different encoding from its underlying string.  These encodings may
3541     // differ because of externalization.
3542     bool is_one_byte = buffer->IsOneByteRepresentation();
3543     { MaybeObject* maybe_result = is_one_byte
3544                                   ? AllocateRawOneByteString(length, pretenure)
3545                                   : AllocateRawTwoByteString(length, pretenure);
3546       if (!maybe_result->ToObject(&result)) return maybe_result;
3547     }
3548     String* string_result = String::cast(result);
3549     // Copy the characters into the new object.
3550     if (is_one_byte) {
3551       ASSERT(string_result->IsOneByteRepresentation());
3552       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3553       String::WriteToFlat(buffer, dest, start, end);
3554     } else {
3555       ASSERT(string_result->IsTwoByteRepresentation());
3556       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3557       String::WriteToFlat(buffer, dest, start, end);
3558     }
3559     return result;
3560   }
3561
3562   ASSERT(buffer->IsFlat());
3563 #if VERIFY_HEAP
3564   if (FLAG_verify_heap) {
3565     buffer->StringVerify();
3566   }
3567 #endif
3568
3569   Object* result;
3570   // When slicing an indirect string we use its encoding for a newly created
3571   // slice and don't check the encoding of the underlying string.  This is safe
3572   // even if the encodings are different because of externalization.  If an
3573   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3574   // codes of the underlying string must still fit into ASCII (because
3575   // externalization must not change char codes).
3576   { Map* map = buffer->IsOneByteRepresentation()
3577                  ? sliced_ascii_string_map()
3578                  : sliced_string_map();
3579     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3580     if (!maybe_result->ToObject(&result)) return maybe_result;
3581   }
3582
3583   AssertNoAllocation no_gc;
3584   SlicedString* sliced_string = SlicedString::cast(result);
3585   sliced_string->set_length(length);
3586   sliced_string->set_hash_field(String::kEmptyHashField);
3587   if (buffer->IsConsString()) {
3588     ConsString* cons = ConsString::cast(buffer);
3589     ASSERT(cons->second()->length() == 0);
3590     sliced_string->set_parent(cons->first());
3591     sliced_string->set_offset(start);
3592   } else if (buffer->IsSlicedString()) {
3593     // Prevent nesting sliced strings.
3594     SlicedString* parent_slice = SlicedString::cast(buffer);
3595     sliced_string->set_parent(parent_slice->parent());
3596     sliced_string->set_offset(start + parent_slice->offset());
3597   } else {
3598     sliced_string->set_parent(buffer);
3599     sliced_string->set_offset(start);
3600   }
3601   ASSERT(sliced_string->parent()->IsSeqString() ||
3602          sliced_string->parent()->IsExternalString());
3603   return result;
3604 }
3605
3606
3607 MaybeObject* Heap::AllocateExternalStringFromAscii(
3608     const ExternalAsciiString::Resource* resource) {
3609   size_t length = resource->length();
3610   if (length > static_cast<size_t>(String::kMaxLength)) {
3611     isolate()->context()->mark_out_of_memory();
3612     return Failure::OutOfMemoryException(0x5);
3613   }
3614
3615   Map* map = external_ascii_string_map();
3616   Object* result;
3617   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3618     if (!maybe_result->ToObject(&result)) return maybe_result;
3619   }
3620
3621   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3622   external_string->set_length(static_cast<int>(length));
3623   external_string->set_hash_field(String::kEmptyHashField);
3624   external_string->set_resource(resource);
3625
3626   return result;
3627 }
3628
3629
3630 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3631     const ExternalTwoByteString::Resource* resource) {
3632   size_t length = resource->length();
3633   if (length > static_cast<size_t>(String::kMaxLength)) {
3634     isolate()->context()->mark_out_of_memory();
3635     return Failure::OutOfMemoryException(0x6);
3636   }
3637
3638   // For small strings we check whether the resource contains only
3639   // one byte characters.  If yes, we use a different string map.
3640   static const size_t kOneByteCheckLengthLimit = 32;
3641   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3642       String::IsOneByte(resource->data(), static_cast<int>(length));
3643   Map* map = is_one_byte ?
3644       external_string_with_one_byte_data_map() : external_string_map();
3645   Object* result;
3646   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3647     if (!maybe_result->ToObject(&result)) return maybe_result;
3648   }
3649
3650   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3651   external_string->set_length(static_cast<int>(length));
3652   external_string->set_hash_field(String::kEmptyHashField);
3653   external_string->set_resource(resource);
3654
3655   return result;
3656 }
3657
3658
3659 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3660   if (code <= String::kMaxOneByteCharCode) {
3661     Object* value = single_character_string_cache()->get(code);
3662     if (value != undefined_value()) return value;
3663
3664     uint8_t buffer[1];
3665     buffer[0] = static_cast<uint8_t>(code);
3666     Object* result;
3667     MaybeObject* maybe_result =
3668         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3669
3670     if (!maybe_result->ToObject(&result)) return maybe_result;
3671     single_character_string_cache()->set(code, result);
3672     return result;
3673   }
3674
3675   Object* result;
3676   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3677     if (!maybe_result->ToObject(&result)) return maybe_result;
3678   }
3679   String* answer = String::cast(result);
3680   answer->Set(0, code);
3681   return answer;
3682 }
3683
3684
3685 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3686   if (length < 0 || length > ByteArray::kMaxLength) {
3687     return Failure::OutOfMemoryException(0x7);
3688   }
3689   if (pretenure == NOT_TENURED) {
3690     return AllocateByteArray(length);
3691   }
3692   int size = ByteArray::SizeFor(length);
3693   Object* result;
3694   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3695                    ? old_data_space_->AllocateRaw(size)
3696                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3697     if (!maybe_result->ToObject(&result)) return maybe_result;
3698   }
3699
3700   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3701       byte_array_map());
3702   reinterpret_cast<ByteArray*>(result)->set_length(length);
3703   return result;
3704 }
3705
3706
3707 MaybeObject* Heap::AllocateByteArray(int length) {
3708   if (length < 0 || length > ByteArray::kMaxLength) {
3709     return Failure::OutOfMemoryException(0x8);
3710   }
3711   int size = ByteArray::SizeFor(length);
3712   AllocationSpace space =
3713       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3714   Object* result;
3715   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3716     if (!maybe_result->ToObject(&result)) return maybe_result;
3717   }
3718
3719   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3720       byte_array_map());
3721   reinterpret_cast<ByteArray*>(result)->set_length(length);
3722   return result;
3723 }
3724
3725
3726 void Heap::CreateFillerObjectAt(Address addr, int size) {
3727   if (size == 0) return;
3728   HeapObject* filler = HeapObject::FromAddress(addr);
3729   if (size == kPointerSize) {
3730     filler->set_map_no_write_barrier(one_pointer_filler_map());
3731   } else if (size == 2 * kPointerSize) {
3732     filler->set_map_no_write_barrier(two_pointer_filler_map());
3733   } else {
3734     filler->set_map_no_write_barrier(free_space_map());
3735     FreeSpace::cast(filler)->set_size(size);
3736   }
3737 }
3738
3739
3740 MaybeObject* Heap::AllocateExternalArray(int length,
3741                                          ExternalArrayType array_type,
3742                                          void* external_pointer,
3743                                          PretenureFlag pretenure) {
3744   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3745   Object* result;
3746   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3747                                             space,
3748                                             OLD_DATA_SPACE);
3749     if (!maybe_result->ToObject(&result)) return maybe_result;
3750   }
3751
3752   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3753       MapForExternalArrayType(array_type));
3754   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3755   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3756       external_pointer);
3757
3758   return result;
3759 }
3760
3761
3762 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3763                               Code::Flags flags,
3764                               Handle<Object> self_reference,
3765                               bool immovable,
3766                               bool crankshafted) {
3767   // Allocate ByteArray before the Code object, so that we do not risk
3768   // leaving uninitialized Code object (and breaking the heap).
3769   ByteArray* reloc_info;
3770   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3771   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3772
3773   // Compute size.
3774   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3775   int obj_size = Code::SizeFor(body_size);
3776   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3777   MaybeObject* maybe_result;
3778   // Large code objects and code objects which should stay at a fixed address
3779   // are allocated in large object space.
3780   HeapObject* result;
3781   bool force_lo_space = obj_size > code_space()->AreaSize();
3782   if (force_lo_space) {
3783     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3784   } else {
3785     maybe_result = code_space_->AllocateRaw(obj_size);
3786   }
3787   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3788
3789   if (immovable && !force_lo_space &&
3790       // Objects on the first page of each space are never moved.
3791       !code_space_->FirstPage()->Contains(result->address())) {
3792     // Discard the first code allocation, which was on a page where it could be
3793     // moved.
3794     CreateFillerObjectAt(result->address(), obj_size);
3795     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3796     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3797   }
3798
3799   // Initialize the object
3800   result->set_map_no_write_barrier(code_map());
3801   Code* code = Code::cast(result);
3802   ASSERT(!isolate_->code_range()->exists() ||
3803       isolate_->code_range()->contains(code->address()));
3804   code->set_instruction_size(desc.instr_size);
3805   code->set_relocation_info(reloc_info);
3806   code->set_flags(flags);
3807   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3808     code->set_check_type(RECEIVER_MAP_CHECK);
3809   }
3810   code->set_is_crankshafted(crankshafted);
3811   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3812   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3813   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3814   code->set_gc_metadata(Smi::FromInt(0));
3815   code->set_ic_age(global_ic_age_);
3816   code->set_prologue_offset(kPrologueOffsetNotSet);
3817   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3818     code->set_marked_for_deoptimization(false);
3819   }
3820   // Allow self references to created code object by patching the handle to
3821   // point to the newly allocated Code object.
3822   if (!self_reference.is_null()) {
3823     *(self_reference.location()) = code;
3824   }
3825   // Migrate generated code.
3826   // The generated code can contain Object** values (typically from handles)
3827   // that are dereferenced during the copy to point directly to the actual heap
3828   // objects. These pointers can include references to the code object itself,
3829   // through the self_reference parameter.
3830   code->CopyFrom(desc);
3831
3832 #ifdef VERIFY_HEAP
3833   if (FLAG_verify_heap) {
3834     code->Verify();
3835   }
3836 #endif
3837   return code;
3838 }
3839
3840
3841 MaybeObject* Heap::CopyCode(Code* code) {
3842   // Allocate an object the same size as the code object.
3843   int obj_size = code->Size();
3844   MaybeObject* maybe_result;
3845   if (obj_size > code_space()->AreaSize()) {
3846     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3847   } else {
3848     maybe_result = code_space_->AllocateRaw(obj_size);
3849   }
3850
3851   Object* result;
3852   if (!maybe_result->ToObject(&result)) return maybe_result;
3853
3854   // Copy code object.
3855   Address old_addr = code->address();
3856   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3857   CopyBlock(new_addr, old_addr, obj_size);
3858   // Relocate the copy.
3859   Code* new_code = Code::cast(result);
3860   ASSERT(!isolate_->code_range()->exists() ||
3861       isolate_->code_range()->contains(code->address()));
3862   new_code->Relocate(new_addr - old_addr);
3863   return new_code;
3864 }
3865
3866
3867 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3868   // Allocate ByteArray before the Code object, so that we do not risk
3869   // leaving uninitialized Code object (and breaking the heap).
3870   Object* reloc_info_array;
3871   { MaybeObject* maybe_reloc_info_array =
3872         AllocateByteArray(reloc_info.length(), TENURED);
3873     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3874       return maybe_reloc_info_array;
3875     }
3876   }
3877
3878   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3879
3880   int new_obj_size = Code::SizeFor(new_body_size);
3881
3882   Address old_addr = code->address();
3883
3884   size_t relocation_offset =
3885       static_cast<size_t>(code->instruction_end() - old_addr);
3886
3887   MaybeObject* maybe_result;
3888   if (new_obj_size > code_space()->AreaSize()) {
3889     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3890   } else {
3891     maybe_result = code_space_->AllocateRaw(new_obj_size);
3892   }
3893
3894   Object* result;
3895   if (!maybe_result->ToObject(&result)) return maybe_result;
3896
3897   // Copy code object.
3898   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3899
3900   // Copy header and instructions.
3901   CopyBytes(new_addr, old_addr, relocation_offset);
3902
3903   Code* new_code = Code::cast(result);
3904   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3905
3906   // Copy patched rinfo.
3907   CopyBytes(new_code->relocation_start(),
3908             reloc_info.start(),
3909             static_cast<size_t>(reloc_info.length()));
3910
3911   // Relocate the copy.
3912   ASSERT(!isolate_->code_range()->exists() ||
3913       isolate_->code_range()->contains(code->address()));
3914   new_code->Relocate(new_addr - old_addr);
3915
3916 #ifdef VERIFY_HEAP
3917   if (FLAG_verify_heap) {
3918     code->Verify();
3919   }
3920 #endif
3921   return new_code;
3922 }
3923
3924
3925 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
3926     Handle<Object> allocation_site_info_payload) {
3927   ASSERT(gc_state_ == NOT_IN_GC);
3928   ASSERT(map->instance_type() != MAP_TYPE);
3929   // If allocation failures are disallowed, we may allocate in a different
3930   // space when new space is full and the object is not a large object.
3931   AllocationSpace retry_space =
3932       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3933   int size = map->instance_size() + AllocationSiteInfo::kSize;
3934   Object* result;
3935   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3936   if (!maybe_result->ToObject(&result)) return maybe_result;
3937   // No need for write barrier since object is white and map is in old space.
3938   HeapObject::cast(result)->set_map_no_write_barrier(map);
3939   AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
3940       reinterpret_cast<Address>(result) + map->instance_size());
3941   alloc_info->set_map_no_write_barrier(allocation_site_info_map());
3942   alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
3943   return result;
3944 }
3945
3946
3947 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3948   ASSERT(gc_state_ == NOT_IN_GC);
3949   ASSERT(map->instance_type() != MAP_TYPE);
3950   // If allocation failures are disallowed, we may allocate in a different
3951   // space when new space is full and the object is not a large object.
3952   AllocationSpace retry_space =
3953       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3954   int size = map->instance_size();
3955   Object* result;
3956   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3957   if (!maybe_result->ToObject(&result)) return maybe_result;
3958   // No need for write barrier since object is white and map is in old space.
3959   HeapObject::cast(result)->set_map_no_write_barrier(map);
3960   return result;
3961 }
3962
3963
3964 void Heap::InitializeFunction(JSFunction* function,
3965                               SharedFunctionInfo* shared,
3966                               Object* prototype) {
3967   ASSERT(!prototype->IsMap());
3968   function->initialize_properties();
3969   function->initialize_elements();
3970   function->set_shared(shared);
3971   function->set_code(shared->code());
3972   function->set_prototype_or_initial_map(prototype);
3973   function->set_context(undefined_value());
3974   function->set_literals_or_bindings(empty_fixed_array());
3975   function->set_next_function_link(undefined_value());
3976 }
3977
3978
3979 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3980   // Make sure to use globals from the function's context, since the function
3981   // can be from a different context.
3982   Context* native_context = function->context()->native_context();
3983   bool needs_constructor_property;
3984   Map* new_map;
3985   if (function->shared()->is_generator()) {
3986     // Generator prototypes can share maps since they don't have "constructor"
3987     // properties.
3988     new_map = native_context->generator_object_prototype_map();
3989     needs_constructor_property = false;
3990   } else {
3991     // Each function prototype gets a fresh map to avoid unwanted sharing of
3992     // maps between prototypes of different constructors.
3993     JSFunction* object_function = native_context->object_function();
3994     ASSERT(object_function->has_initial_map());
3995     MaybeObject* maybe_map = object_function->initial_map()->Copy();
3996     if (!maybe_map->To(&new_map)) return maybe_map;
3997     needs_constructor_property = true;
3998   }
3999
4000   Object* prototype;
4001   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4002   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4003
4004   if (needs_constructor_property) {
4005     MaybeObject* maybe_failure =
4006         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4007             constructor_string(), function, DONT_ENUM);
4008     if (maybe_failure->IsFailure()) return maybe_failure;
4009   }
4010
4011   return prototype;
4012 }
4013
4014
4015 MaybeObject* Heap::AllocateFunction(Map* function_map,
4016                                     SharedFunctionInfo* shared,
4017                                     Object* prototype,
4018                                     PretenureFlag pretenure) {
4019   AllocationSpace space =
4020       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4021   Object* result;
4022   { MaybeObject* maybe_result = Allocate(function_map, space);
4023     if (!maybe_result->ToObject(&result)) return maybe_result;
4024   }
4025   InitializeFunction(JSFunction::cast(result), shared, prototype);
4026   return result;
4027 }
4028
4029
4030 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4031   // To get fast allocation and map sharing for arguments objects we
4032   // allocate them based on an arguments boilerplate.
4033
4034   JSObject* boilerplate;
4035   int arguments_object_size;
4036   bool strict_mode_callee = callee->IsJSFunction() &&
4037       !JSFunction::cast(callee)->shared()->is_classic_mode();
4038   if (strict_mode_callee) {
4039     boilerplate =
4040         isolate()->context()->native_context()->
4041             strict_mode_arguments_boilerplate();
4042     arguments_object_size = kArgumentsObjectSizeStrict;
4043   } else {
4044     boilerplate =
4045         isolate()->context()->native_context()->arguments_boilerplate();
4046     arguments_object_size = kArgumentsObjectSize;
4047   }
4048
4049   // This calls Copy directly rather than using Heap::AllocateRaw so we
4050   // duplicate the check here.
4051   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
4052
4053   // Check that the size of the boilerplate matches our
4054   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4055   // on the size being a known constant.
4056   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4057
4058   // Do the allocation.
4059   Object* result;
4060   { MaybeObject* maybe_result =
4061         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4062     if (!maybe_result->ToObject(&result)) return maybe_result;
4063   }
4064
4065   // Copy the content. The arguments boilerplate doesn't have any
4066   // fields that point to new space so it's safe to skip the write
4067   // barrier here.
4068   CopyBlock(HeapObject::cast(result)->address(),
4069             boilerplate->address(),
4070             JSObject::kHeaderSize);
4071
4072   // Set the length property.
4073   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4074                                                 Smi::FromInt(length),
4075                                                 SKIP_WRITE_BARRIER);
4076   // Set the callee property for non-strict mode arguments object only.
4077   if (!strict_mode_callee) {
4078     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4079                                                   callee);
4080   }
4081
4082   // Check the state of the object
4083   ASSERT(JSObject::cast(result)->HasFastProperties());
4084   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4085
4086   return result;
4087 }
4088
4089
4090 static bool HasDuplicates(DescriptorArray* descriptors) {
4091   int count = descriptors->number_of_descriptors();
4092   if (count > 1) {
4093     Name* prev_key = descriptors->GetKey(0);
4094     for (int i = 1; i != count; i++) {
4095       Name* current_key = descriptors->GetKey(i);
4096       if (prev_key == current_key) return true;
4097       prev_key = current_key;
4098     }
4099   }
4100   return false;
4101 }
4102
4103
4104 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4105   ASSERT(!fun->has_initial_map());
4106
4107   // First create a new map with the size and number of in-object properties
4108   // suggested by the function.
4109   InstanceType instance_type;
4110   int instance_size;
4111   int in_object_properties;
4112   if (fun->shared()->is_generator()) {
4113     instance_type = JS_GENERATOR_OBJECT_TYPE;
4114     instance_size = JSGeneratorObject::kSize;
4115     in_object_properties = 0;
4116   } else {
4117     instance_type = JS_OBJECT_TYPE;
4118     instance_size = fun->shared()->CalculateInstanceSize();
4119     in_object_properties = fun->shared()->CalculateInObjectProperties();
4120   }
4121   Map* map;
4122   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4123   if (!maybe_map->To(&map)) return maybe_map;
4124
4125   // Fetch or allocate prototype.
4126   Object* prototype;
4127   if (fun->has_instance_prototype()) {
4128     prototype = fun->instance_prototype();
4129   } else {
4130     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4131     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4132   }
4133   map->set_inobject_properties(in_object_properties);
4134   map->set_unused_property_fields(in_object_properties);
4135   map->set_prototype(prototype);
4136   ASSERT(map->has_fast_object_elements());
4137
4138   // If the function has only simple this property assignments add
4139   // field descriptors for these to the initial map as the object
4140   // cannot be constructed without having these properties.  Guard by
4141   // the inline_new flag so we only change the map if we generate a
4142   // specialized construct stub.
4143   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4144   if (instance_type == JS_OBJECT_TYPE &&
4145       fun->shared()->CanGenerateInlineConstructor(prototype)) {
4146     int count = fun->shared()->this_property_assignments_count();
4147     if (count > in_object_properties) {
4148       // Inline constructor can only handle inobject properties.
4149       fun->shared()->ForbidInlineConstructor();
4150     } else {
4151       DescriptorArray* descriptors;
4152       MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4153       if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4154
4155       DescriptorArray::WhitenessWitness witness(descriptors);
4156       for (int i = 0; i < count; i++) {
4157         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4158         ASSERT(name->IsInternalizedString());
4159         FieldDescriptor field(name, i, NONE, i + 1);
4160         descriptors->Set(i, &field, witness);
4161       }
4162       descriptors->Sort();
4163
4164       // The descriptors may contain duplicates because the compiler does not
4165       // guarantee the uniqueness of property names (it would have required
4166       // quadratic time). Once the descriptors are sorted we can check for
4167       // duplicates in linear time.
4168       if (HasDuplicates(descriptors)) {
4169         fun->shared()->ForbidInlineConstructor();
4170       } else {
4171         map->InitializeDescriptors(descriptors);
4172         map->set_pre_allocated_property_fields(count);
4173         map->set_unused_property_fields(in_object_properties - count);
4174       }
4175     }
4176   }
4177
4178   if (instance_type == JS_OBJECT_TYPE) {
4179     fun->shared()->StartInobjectSlackTracking(map);
4180   }
4181
4182   return map;
4183 }
4184
4185
4186 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4187                                      FixedArray* properties,
4188                                      Map* map) {
4189   obj->set_properties(properties);
4190   obj->initialize_elements();
4191   // TODO(1240798): Initialize the object's body using valid initial values
4192   // according to the object's initial map.  For example, if the map's
4193   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4194   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4195   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4196   // verification code has to cope with (temporarily) invalid objects.  See
4197   // for example, JSArray::JSArrayVerify).
4198   Object* filler;
4199   // We cannot always fill with one_pointer_filler_map because objects
4200   // created from API functions expect their internal fields to be initialized
4201   // with undefined_value.
4202   // Pre-allocated fields need to be initialized with undefined_value as well
4203   // so that object accesses before the constructor completes (e.g. in the
4204   // debugger) will not cause a crash.
4205   if (map->constructor()->IsJSFunction() &&
4206       JSFunction::cast(map->constructor())->shared()->
4207           IsInobjectSlackTrackingInProgress()) {
4208     // We might want to shrink the object later.
4209     ASSERT(obj->GetInternalFieldCount() == 0);
4210     filler = Heap::one_pointer_filler_map();
4211   } else {
4212     filler = Heap::undefined_value();
4213   }
4214   obj->InitializeBody(map, Heap::undefined_value(), filler);
4215 }
4216
4217
4218 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4219   // JSFunctions should be allocated using AllocateFunction to be
4220   // properly initialized.
4221   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4222
4223   // Both types of global objects should be allocated using
4224   // AllocateGlobalObject to be properly initialized.
4225   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4226   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4227
4228   // Allocate the backing storage for the properties.
4229   int prop_size =
4230       map->pre_allocated_property_fields() +
4231       map->unused_property_fields() -
4232       map->inobject_properties();
4233   ASSERT(prop_size >= 0);
4234   Object* properties;
4235   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4236     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4237   }
4238
4239   // Allocate the JSObject.
4240   AllocationSpace space =
4241       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4242   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4243   Object* obj;
4244   MaybeObject* maybe_obj = Allocate(map, space);
4245   if (!maybe_obj->To(&obj)) return maybe_obj;
4246
4247   // Initialize the JSObject.
4248   InitializeJSObjectFromMap(JSObject::cast(obj),
4249                             FixedArray::cast(properties),
4250                             map);
4251   ASSERT(JSObject::cast(obj)->HasFastElements());
4252   return obj;
4253 }
4254
4255
4256 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4257     Handle<Object> allocation_site_info_payload) {
4258   // JSFunctions should be allocated using AllocateFunction to be
4259   // properly initialized.
4260   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4261
4262   // Both types of global objects should be allocated using
4263   // AllocateGlobalObject to be properly initialized.
4264   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4265   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4266
4267   // Allocate the backing storage for the properties.
4268   int prop_size =
4269       map->pre_allocated_property_fields() +
4270       map->unused_property_fields() -
4271       map->inobject_properties();
4272   ASSERT(prop_size >= 0);
4273   Object* properties;
4274   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4275     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4276   }
4277
4278   // Allocate the JSObject.
4279   AllocationSpace space = NEW_SPACE;
4280   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4281   Object* obj;
4282   MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4283       allocation_site_info_payload);
4284   if (!maybe_obj->To(&obj)) return maybe_obj;
4285
4286   // Initialize the JSObject.
4287   InitializeJSObjectFromMap(JSObject::cast(obj),
4288                             FixedArray::cast(properties),
4289                             map);
4290   ASSERT(JSObject::cast(obj)->HasFastElements());
4291   return obj;
4292 }
4293
4294
4295 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4296                                     PretenureFlag pretenure) {
4297   // Allocate the initial map if absent.
4298   if (!constructor->has_initial_map()) {
4299     Object* initial_map;
4300     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4301       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4302     }
4303     constructor->set_initial_map(Map::cast(initial_map));
4304     Map::cast(initial_map)->set_constructor(constructor);
4305   }
4306   // Allocate the object based on the constructors initial map.
4307   MaybeObject* result = AllocateJSObjectFromMap(
4308       constructor->initial_map(), pretenure);
4309 #ifdef DEBUG
4310   // Make sure result is NOT a global object if valid.
4311   Object* non_failure;
4312   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4313 #endif
4314   return result;
4315 }
4316
4317
4318 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4319     Handle<Object> allocation_site_info_payload) {
4320   // Allocate the initial map if absent.
4321   if (!constructor->has_initial_map()) {
4322     Object* initial_map;
4323     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4324       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4325     }
4326     constructor->set_initial_map(Map::cast(initial_map));
4327     Map::cast(initial_map)->set_constructor(constructor);
4328   }
4329   // Allocate the object based on the constructors initial map, or the payload
4330   // advice
4331   Map* initial_map = constructor->initial_map();
4332
4333   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4334       *allocation_site_info_payload);
4335   Smi* smi = Smi::cast(cell->value());
4336   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4337   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4338   if (to_kind != initial_map->elements_kind()) {
4339     MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
4340         isolate(), to_kind);
4341     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4342     // Possibly alter the mode, since we found an updated elements kind
4343     // in the type info cell.
4344     mode = AllocationSiteInfo::GetMode(to_kind);
4345   }
4346
4347   MaybeObject* result;
4348   if (mode == TRACK_ALLOCATION_SITE) {
4349     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4350         allocation_site_info_payload);
4351   } else {
4352     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4353   }
4354 #ifdef DEBUG
4355   // Make sure result is NOT a global object if valid.
4356   Object* non_failure;
4357   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4358 #endif
4359   return result;
4360 }
4361
4362
4363 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4364   ASSERT(function->shared()->is_generator());
4365   Map *map;
4366   if (function->has_initial_map()) {
4367     map = function->initial_map();
4368   } else {
4369     // Allocate the initial map if absent.
4370     MaybeObject* maybe_map = AllocateInitialMap(function);
4371     if (!maybe_map->To(&map)) return maybe_map;
4372     function->set_initial_map(map);
4373     map->set_constructor(function);
4374   }
4375   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4376   return AllocateJSObjectFromMap(map);
4377 }
4378
4379
4380 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4381   // Allocate a fresh map. Modules do not have a prototype.
4382   Map* map;
4383   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4384   if (!maybe_map->To(&map)) return maybe_map;
4385   // Allocate the object based on the map.
4386   JSModule* module;
4387   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4388   if (!maybe_module->To(&module)) return maybe_module;
4389   module->set_context(context);
4390   module->set_scope_info(scope_info);
4391   return module;
4392 }
4393
4394
4395 MaybeObject* Heap::AllocateJSArrayAndStorage(
4396     ElementsKind elements_kind,
4397     int length,
4398     int capacity,
4399     ArrayStorageAllocationMode mode,
4400     PretenureFlag pretenure) {
4401   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4402   JSArray* array;
4403   if (!maybe_array->To(&array)) return maybe_array;
4404
4405   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4406   // for performance reasons.
4407   ASSERT(capacity >= length);
4408
4409   if (capacity == 0) {
4410     array->set_length(Smi::FromInt(0));
4411     array->set_elements(empty_fixed_array());
4412     return array;
4413   }
4414
4415   FixedArrayBase* elms;
4416   MaybeObject* maybe_elms = NULL;
4417   if (IsFastDoubleElementsKind(elements_kind)) {
4418     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4419       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4420     } else {
4421       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4422       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4423     }
4424   } else {
4425     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4426     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4427       maybe_elms = AllocateUninitializedFixedArray(capacity);
4428     } else {
4429       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4430       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4431     }
4432   }
4433   if (!maybe_elms->To(&elms)) return maybe_elms;
4434
4435   array->set_elements(elms);
4436   array->set_length(Smi::FromInt(length));
4437   return array;
4438 }
4439
4440
4441 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4442     ElementsKind elements_kind,
4443     int length,
4444     int capacity,
4445     Handle<Object> allocation_site_payload,
4446     ArrayStorageAllocationMode mode) {
4447   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4448       allocation_site_payload);
4449   JSArray* array;
4450   if (!maybe_array->To(&array)) return maybe_array;
4451   return AllocateJSArrayStorage(array, length, capacity, mode);
4452 }
4453
4454
4455 MaybeObject* Heap::AllocateJSArrayStorage(
4456     JSArray* array,
4457     int length,
4458     int capacity,
4459     ArrayStorageAllocationMode mode) {
4460   ASSERT(capacity >= length);
4461
4462   if (capacity == 0) {
4463     array->set_length(Smi::FromInt(0));
4464     array->set_elements(empty_fixed_array());
4465     return array;
4466   }
4467
4468   FixedArrayBase* elms;
4469   MaybeObject* maybe_elms = NULL;
4470   ElementsKind elements_kind = array->GetElementsKind();
4471   if (IsFastDoubleElementsKind(elements_kind)) {
4472     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4473       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4474     } else {
4475       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4476       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4477     }
4478   } else {
4479     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4480     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4481       maybe_elms = AllocateUninitializedFixedArray(capacity);
4482     } else {
4483       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4484       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4485     }
4486   }
4487   if (!maybe_elms->To(&elms)) return maybe_elms;
4488
4489   array->set_elements(elms);
4490   array->set_length(Smi::FromInt(length));
4491   return array;
4492 }
4493
4494
4495 MaybeObject* Heap::AllocateJSArrayWithElements(
4496     FixedArrayBase* elements,
4497     ElementsKind elements_kind,
4498     int length,
4499     PretenureFlag pretenure) {
4500   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4501   JSArray* array;
4502   if (!maybe_array->To(&array)) return maybe_array;
4503
4504   array->set_elements(elements);
4505   array->set_length(Smi::FromInt(length));
4506   array->ValidateElements();
4507   return array;
4508 }
4509
4510
4511 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4512   // Allocate map.
4513   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4514   // maps. Will probably depend on the identity of the handler object, too.
4515   Map* map;
4516   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4517   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4518   map->set_prototype(prototype);
4519
4520   // Allocate the proxy object.
4521   JSProxy* result;
4522   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4523   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4524   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4525   result->set_handler(handler);
4526   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4527   return result;
4528 }
4529
4530
4531 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4532                                            Object* call_trap,
4533                                            Object* construct_trap,
4534                                            Object* prototype) {
4535   // Allocate map.
4536   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4537   // maps. Will probably depend on the identity of the handler object, too.
4538   Map* map;
4539   MaybeObject* maybe_map_obj =
4540       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4541   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4542   map->set_prototype(prototype);
4543
4544   // Allocate the proxy object.
4545   JSFunctionProxy* result;
4546   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4547   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4548   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4549   result->set_handler(handler);
4550   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4551   result->set_call_trap(call_trap);
4552   result->set_construct_trap(construct_trap);
4553   return result;
4554 }
4555
4556
4557 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4558   ASSERT(constructor->has_initial_map());
4559   Map* map = constructor->initial_map();
4560   ASSERT(map->is_dictionary_map());
4561
4562   // Make sure no field properties are described in the initial map.
4563   // This guarantees us that normalizing the properties does not
4564   // require us to change property values to JSGlobalPropertyCells.
4565   ASSERT(map->NextFreePropertyIndex() == 0);
4566
4567   // Make sure we don't have a ton of pre-allocated slots in the
4568   // global objects. They will be unused once we normalize the object.
4569   ASSERT(map->unused_property_fields() == 0);
4570   ASSERT(map->inobject_properties() == 0);
4571
4572   // Initial size of the backing store to avoid resize of the storage during
4573   // bootstrapping. The size differs between the JS global object ad the
4574   // builtins object.
4575   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4576
4577   // Allocate a dictionary object for backing storage.
4578   NameDictionary* dictionary;
4579   MaybeObject* maybe_dictionary =
4580       NameDictionary::Allocate(
4581           this,
4582           map->NumberOfOwnDescriptors() * 2 + initial_size);
4583   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4584
4585   // The global object might be created from an object template with accessors.
4586   // Fill these accessors into the dictionary.
4587   DescriptorArray* descs = map->instance_descriptors();
4588   for (int i = 0; i < descs->number_of_descriptors(); i++) {
4589     PropertyDetails details = descs->GetDetails(i);
4590     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4591     PropertyDetails d = PropertyDetails(details.attributes(),
4592                                         CALLBACKS,
4593                                         details.descriptor_index());
4594     Object* value = descs->GetCallbacksObject(i);
4595     MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4596     if (!maybe_value->ToObject(&value)) return maybe_value;
4597
4598     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4599     if (!maybe_added->To(&dictionary)) return maybe_added;
4600   }
4601
4602   // Allocate the global object and initialize it with the backing store.
4603   JSObject* global;
4604   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4605   if (!maybe_global->To(&global)) return maybe_global;
4606
4607   InitializeJSObjectFromMap(global, dictionary, map);
4608
4609   // Create a new map for the global object.
4610   Map* new_map;
4611   MaybeObject* maybe_map = map->CopyDropDescriptors();
4612   if (!maybe_map->To(&new_map)) return maybe_map;
4613   new_map->set_dictionary_map(true);
4614
4615   // Set up the global object as a normalized object.
4616   global->set_map(new_map);
4617   global->set_properties(dictionary);
4618
4619   // Make sure result is a global object with properties in dictionary.
4620   ASSERT(global->IsGlobalObject());
4621   ASSERT(!global->HasFastProperties());
4622   return global;
4623 }
4624
4625
4626 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4627   // Never used to copy functions.  If functions need to be copied we
4628   // have to be careful to clear the literals array.
4629   SLOW_ASSERT(!source->IsJSFunction());
4630
4631   // Make the clone.
4632   Map* map = source->map();
4633   int object_size = map->instance_size();
4634   Object* clone;
4635
4636   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4637
4638   // If we're forced to always allocate, we use the general allocation
4639   // functions which may leave us with an object in old space.
4640   if (always_allocate()) {
4641     { MaybeObject* maybe_clone =
4642           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4643       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4644     }
4645     Address clone_address = HeapObject::cast(clone)->address();
4646     CopyBlock(clone_address,
4647               source->address(),
4648               object_size);
4649     // Update write barrier for all fields that lie beyond the header.
4650     RecordWrites(clone_address,
4651                  JSObject::kHeaderSize,
4652                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4653   } else {
4654     wb_mode = SKIP_WRITE_BARRIER;
4655
4656     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4657       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4658     }
4659     SLOW_ASSERT(InNewSpace(clone));
4660     // Since we know the clone is allocated in new space, we can copy
4661     // the contents without worrying about updating the write barrier.
4662     CopyBlock(HeapObject::cast(clone)->address(),
4663               source->address(),
4664               object_size);
4665   }
4666
4667   SLOW_ASSERT(
4668       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4669   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4670   FixedArray* properties = FixedArray::cast(source->properties());
4671   // Update elements if necessary.
4672   if (elements->length() > 0) {
4673     Object* elem;
4674     { MaybeObject* maybe_elem;
4675       if (elements->map() == fixed_cow_array_map()) {
4676         maybe_elem = FixedArray::cast(elements);
4677       } else if (source->HasFastDoubleElements()) {
4678         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4679       } else {
4680         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4681       }
4682       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4683     }
4684     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4685   }
4686   // Update properties if necessary.
4687   if (properties->length() > 0) {
4688     Object* prop;
4689     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4690       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4691     }
4692     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4693   }
4694   // Return the new clone.
4695   return clone;
4696 }
4697
4698
4699 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4700   // Never used to copy functions.  If functions need to be copied we
4701   // have to be careful to clear the literals array.
4702   SLOW_ASSERT(!source->IsJSFunction());
4703
4704   // Make the clone.
4705   Map* map = source->map();
4706   int object_size = map->instance_size();
4707   Object* clone;
4708
4709   ASSERT(map->CanTrackAllocationSite());
4710   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4711   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4712
4713   // If we're forced to always allocate, we use the general allocation
4714   // functions which may leave us with an object in old space.
4715   int adjusted_object_size = object_size;
4716   if (always_allocate()) {
4717     // We'll only track origin if we are certain to allocate in new space
4718     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4719     if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4720       adjusted_object_size += AllocationSiteInfo::kSize;
4721     }
4722
4723     { MaybeObject* maybe_clone =
4724           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4725       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4726     }
4727     Address clone_address = HeapObject::cast(clone)->address();
4728     CopyBlock(clone_address,
4729               source->address(),
4730               object_size);
4731     // Update write barrier for all fields that lie beyond the header.
4732     int write_barrier_offset = adjusted_object_size > object_size
4733         ? JSArray::kSize + AllocationSiteInfo::kSize
4734         : JSObject::kHeaderSize;
4735     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4736       RecordWrites(clone_address,
4737                    write_barrier_offset,
4738                    (object_size - write_barrier_offset) / kPointerSize);
4739     }
4740
4741     // Track allocation site information, if we failed to allocate it inline.
4742     if (InNewSpace(clone) &&
4743         adjusted_object_size == object_size) {
4744       MaybeObject* maybe_alloc_info =
4745           AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4746       AllocationSiteInfo* alloc_info;
4747       if (maybe_alloc_info->To(&alloc_info)) {
4748         alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4749         alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4750       }
4751     }
4752   } else {
4753     wb_mode = SKIP_WRITE_BARRIER;
4754     adjusted_object_size += AllocationSiteInfo::kSize;
4755
4756     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4757       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4758     }
4759     SLOW_ASSERT(InNewSpace(clone));
4760     // Since we know the clone is allocated in new space, we can copy
4761     // the contents without worrying about updating the write barrier.
4762     CopyBlock(HeapObject::cast(clone)->address(),
4763               source->address(),
4764               object_size);
4765   }
4766
4767   if (adjusted_object_size > object_size) {
4768     AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4769         reinterpret_cast<Address>(clone) + object_size);
4770     alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4771     alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4772   }
4773
4774   SLOW_ASSERT(
4775       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4776   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4777   FixedArray* properties = FixedArray::cast(source->properties());
4778   // Update elements if necessary.
4779   if (elements->length() > 0) {
4780     Object* elem;
4781     { MaybeObject* maybe_elem;
4782       if (elements->map() == fixed_cow_array_map()) {
4783         maybe_elem = FixedArray::cast(elements);
4784       } else if (source->HasFastDoubleElements()) {
4785         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4786       } else {
4787         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4788       }
4789       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4790     }
4791     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4792   }
4793   // Update properties if necessary.
4794   if (properties->length() > 0) {
4795     Object* prop;
4796     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4797       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4798     }
4799     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4800   }
4801   // Return the new clone.
4802   return clone;
4803 }
4804
4805
4806 MaybeObject* Heap::ReinitializeJSReceiver(
4807     JSReceiver* object, InstanceType type, int size) {
4808   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4809
4810   // Allocate fresh map.
4811   // TODO(rossberg): Once we optimize proxies, cache these maps.
4812   Map* map;
4813   MaybeObject* maybe = AllocateMap(type, size);
4814   if (!maybe->To<Map>(&map)) return maybe;
4815
4816   // Check that the receiver has at least the size of the fresh object.
4817   int size_difference = object->map()->instance_size() - map->instance_size();
4818   ASSERT(size_difference >= 0);
4819
4820   map->set_prototype(object->map()->prototype());
4821
4822   // Allocate the backing storage for the properties.
4823   int prop_size = map->unused_property_fields() - map->inobject_properties();
4824   Object* properties;
4825   maybe = AllocateFixedArray(prop_size, TENURED);
4826   if (!maybe->ToObject(&properties)) return maybe;
4827
4828   // Functions require some allocation, which might fail here.
4829   SharedFunctionInfo* shared = NULL;
4830   if (type == JS_FUNCTION_TYPE) {
4831     String* name;
4832     maybe =
4833         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4834     if (!maybe->To<String>(&name)) return maybe;
4835     maybe = AllocateSharedFunctionInfo(name);
4836     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4837   }
4838
4839   // Because of possible retries of this function after failure,
4840   // we must NOT fail after this point, where we have changed the type!
4841
4842   // Reset the map for the object.
4843   object->set_map(map);
4844   JSObject* jsobj = JSObject::cast(object);
4845
4846   // Reinitialize the object from the constructor map.
4847   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4848
4849   // Functions require some minimal initialization.
4850   if (type == JS_FUNCTION_TYPE) {
4851     map->set_function_with_prototype(true);
4852     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4853     JSFunction::cast(object)->set_context(
4854         isolate()->context()->native_context());
4855   }
4856
4857   // Put in filler if the new object is smaller than the old.
4858   if (size_difference > 0) {
4859     CreateFillerObjectAt(
4860         object->address() + map->instance_size(), size_difference);
4861   }
4862
4863   return object;
4864 }
4865
4866
4867 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4868                                              JSGlobalProxy* object) {
4869   ASSERT(constructor->has_initial_map());
4870   Map* map = constructor->initial_map();
4871
4872   // Check that the already allocated object has the same size and type as
4873   // objects allocated using the constructor.
4874   ASSERT(map->instance_size() == object->map()->instance_size());
4875   ASSERT(map->instance_type() == object->map()->instance_type());
4876
4877   // Allocate the backing storage for the properties.
4878   int prop_size = map->unused_property_fields() - map->inobject_properties();
4879   Object* properties;
4880   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4881     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4882   }
4883
4884   // Reset the map for the object.
4885   object->set_map(constructor->initial_map());
4886
4887   // Reinitialize the object from the constructor map.
4888   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4889   return object;
4890 }
4891
4892
4893 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4894                                            PretenureFlag pretenure) {
4895   int length = string.length();
4896   if (length == 1) {
4897     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4898   }
4899   Object* result;
4900   { MaybeObject* maybe_result =
4901         AllocateRawOneByteString(string.length(), pretenure);
4902     if (!maybe_result->ToObject(&result)) return maybe_result;
4903   }
4904
4905   // Copy the characters into the new object.
4906   CopyChars(SeqOneByteString::cast(result)->GetChars(),
4907             string.start(),
4908             length);
4909   return result;
4910 }
4911
4912
4913 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4914                                               int non_ascii_start,
4915                                               PretenureFlag pretenure) {
4916   // Continue counting the number of characters in the UTF-8 string, starting
4917   // from the first non-ascii character or word.
4918   Access<UnicodeCache::Utf8Decoder>
4919       decoder(isolate_->unicode_cache()->utf8_decoder());
4920   decoder->Reset(string.start() + non_ascii_start,
4921                  string.length() - non_ascii_start);
4922   int utf16_length = decoder->Utf16Length();
4923   ASSERT(utf16_length > 0);
4924   // Allocate string.
4925   Object* result;
4926   {
4927     int chars = non_ascii_start + utf16_length;
4928     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4929     if (!maybe_result->ToObject(&result)) return maybe_result;
4930   }
4931   // Convert and copy the characters into the new object.
4932   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4933   // Copy ascii portion.
4934   uint16_t* data = twobyte->GetChars();
4935   if (non_ascii_start != 0) {
4936     const char* ascii_data = string.start();
4937     for (int i = 0; i < non_ascii_start; i++) {
4938       *data++ = *ascii_data++;
4939     }
4940   }
4941   // Now write the remainder.
4942   decoder->WriteUtf16(data, utf16_length);
4943   return result;
4944 }
4945
4946
4947 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4948                                              PretenureFlag pretenure) {
4949   // Check if the string is an ASCII string.
4950   Object* result;
4951   int length = string.length();
4952   const uc16* start = string.start();
4953
4954   if (String::IsOneByte(start, length)) {
4955     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4956     if (!maybe_result->ToObject(&result)) return maybe_result;
4957     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4958   } else {  // It's not a one byte string.
4959     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4960     if (!maybe_result->ToObject(&result)) return maybe_result;
4961     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4962   }
4963   return result;
4964 }
4965
4966
4967 Map* Heap::InternalizedStringMapForString(String* string) {
4968   // If the string is in new space it cannot be used as internalized.
4969   if (InNewSpace(string)) return NULL;
4970
4971   // Find the corresponding internalized string map for strings.
4972   switch (string->map()->instance_type()) {
4973     case STRING_TYPE: return internalized_string_map();
4974     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4975     case CONS_STRING_TYPE: return cons_internalized_string_map();
4976     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4977     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4978     case EXTERNAL_ASCII_STRING_TYPE:
4979       return external_ascii_internalized_string_map();
4980     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4981       return external_internalized_string_with_one_byte_data_map();
4982     case SHORT_EXTERNAL_STRING_TYPE:
4983       return short_external_internalized_string_map();
4984     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4985       return short_external_ascii_internalized_string_map();
4986     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4987       return short_external_internalized_string_with_one_byte_data_map();
4988     default: return NULL;  // No match found.
4989   }
4990 }
4991
4992
4993 static inline void WriteOneByteData(Vector<const char> vector,
4994                                     uint8_t* chars,
4995                                     int len) {
4996   // Only works for ascii.
4997   ASSERT(vector.length() == len);
4998   OS::MemCopy(chars, vector.start(), len);
4999 }
5000
5001 static inline void WriteTwoByteData(Vector<const char> vector,
5002                                     uint16_t* chars,
5003                                     int len) {
5004   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5005   unsigned stream_length = vector.length();
5006   while (stream_length != 0) {
5007     unsigned consumed = 0;
5008     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5009     ASSERT(c != unibrow::Utf8::kBadChar);
5010     ASSERT(consumed <= stream_length);
5011     stream_length -= consumed;
5012     stream += consumed;
5013     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5014       len -= 2;
5015       if (len < 0) break;
5016       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5017       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5018     } else {
5019       len -= 1;
5020       if (len < 0) break;
5021       *chars++ = c;
5022     }
5023   }
5024   ASSERT(stream_length == 0);
5025   ASSERT(len == 0);
5026 }
5027
5028
5029 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5030   ASSERT(s->length() == len);
5031   String::WriteToFlat(s, chars, 0, len);
5032 }
5033
5034 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5035   ASSERT(s->length() == len);
5036   String::WriteToFlat(s, chars, 0, len);
5037 }
5038
5039
5040 template<bool is_one_byte, typename T>
5041 MaybeObject* Heap::AllocateInternalizedStringImpl(
5042     T t, int chars, uint32_t hash_field) {
5043   ASSERT(chars >= 0);
5044   // Compute map and object size.
5045   int size;
5046   Map* map;
5047
5048   if (is_one_byte) {
5049     if (chars > SeqOneByteString::kMaxLength) {
5050       return Failure::OutOfMemoryException(0x9);
5051     }
5052     map = ascii_internalized_string_map();
5053     size = SeqOneByteString::SizeFor(chars);
5054   } else {
5055     if (chars > SeqTwoByteString::kMaxLength) {
5056       return Failure::OutOfMemoryException(0xa);
5057     }
5058     map = internalized_string_map();
5059     size = SeqTwoByteString::SizeFor(chars);
5060   }
5061
5062   // Allocate string.
5063   Object* result;
5064   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5065                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5066                    : old_data_space_->AllocateRaw(size);
5067     if (!maybe_result->ToObject(&result)) return maybe_result;
5068   }
5069
5070   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5071   // Set length and hash fields of the allocated string.
5072   String* answer = String::cast(result);
5073   answer->set_length(chars);
5074   answer->set_hash_field(hash_field);
5075
5076   ASSERT_EQ(size, answer->Size());
5077
5078   if (is_one_byte) {
5079     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5080   } else {
5081     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5082   }
5083   return answer;
5084 }
5085
5086
5087 // Need explicit instantiations.
5088 template
5089 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5090 template
5091 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5092     String*, int, uint32_t);
5093 template
5094 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5095     Vector<const char>, int, uint32_t);
5096
5097
5098 MaybeObject* Heap::AllocateRawOneByteString(int length,
5099                                             PretenureFlag pretenure) {
5100   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5101     return Failure::OutOfMemoryException(0xb);
5102   }
5103
5104   int size = SeqOneByteString::SizeFor(length);
5105   ASSERT(size <= SeqOneByteString::kMaxSize);
5106
5107   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5108   AllocationSpace retry_space = OLD_DATA_SPACE;
5109
5110   if (space == NEW_SPACE) {
5111     if (size > kMaxObjectSizeInNewSpace) {
5112       // Allocate in large object space, retry space will be ignored.
5113       space = LO_SPACE;
5114     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5115       // Allocate in new space, retry in large object space.
5116       retry_space = LO_SPACE;
5117     }
5118   } else if (space == OLD_DATA_SPACE &&
5119              size > Page::kMaxNonCodeHeapObjectSize) {
5120     space = LO_SPACE;
5121   }
5122   Object* result;
5123   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5124     if (!maybe_result->ToObject(&result)) return maybe_result;
5125   }
5126
5127   // Partially initialize the object.
5128   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5129   String::cast(result)->set_length(length);
5130   String::cast(result)->set_hash_field(String::kEmptyHashField);
5131   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5132
5133   return result;
5134 }
5135
5136
5137 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5138                                             PretenureFlag pretenure) {
5139   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5140     return Failure::OutOfMemoryException(0xc);
5141   }
5142   int size = SeqTwoByteString::SizeFor(length);
5143   ASSERT(size <= SeqTwoByteString::kMaxSize);
5144   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5145   AllocationSpace retry_space = OLD_DATA_SPACE;
5146
5147   if (space == NEW_SPACE) {
5148     if (size > kMaxObjectSizeInNewSpace) {
5149       // Allocate in large object space, retry space will be ignored.
5150       space = LO_SPACE;
5151     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5152       // Allocate in new space, retry in large object space.
5153       retry_space = LO_SPACE;
5154     }
5155   } else if (space == OLD_DATA_SPACE &&
5156              size > Page::kMaxNonCodeHeapObjectSize) {
5157     space = LO_SPACE;
5158   }
5159   Object* result;
5160   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5161     if (!maybe_result->ToObject(&result)) return maybe_result;
5162   }
5163
5164   // Partially initialize the object.
5165   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5166   String::cast(result)->set_length(length);
5167   String::cast(result)->set_hash_field(String::kEmptyHashField);
5168   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5169   return result;
5170 }
5171
5172
5173 MaybeObject* Heap::AllocateJSArray(
5174     ElementsKind elements_kind,
5175     PretenureFlag pretenure) {
5176   Context* native_context = isolate()->context()->native_context();
5177   JSFunction* array_function = native_context->array_function();
5178   Map* map = array_function->initial_map();
5179   Object* maybe_map_array = native_context->js_array_maps();
5180   if (!maybe_map_array->IsUndefined()) {
5181     Object* maybe_transitioned_map =
5182         FixedArray::cast(maybe_map_array)->get(elements_kind);
5183     if (!maybe_transitioned_map->IsUndefined()) {
5184       map = Map::cast(maybe_transitioned_map);
5185     }
5186   }
5187
5188   return AllocateJSObjectFromMap(map, pretenure);
5189 }
5190
5191
5192 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5193     ElementsKind elements_kind,
5194     Handle<Object> allocation_site_info_payload) {
5195   Context* native_context = isolate()->context()->native_context();
5196   JSFunction* array_function = native_context->array_function();
5197   Map* map = array_function->initial_map();
5198   Object* maybe_map_array = native_context->js_array_maps();
5199   if (!maybe_map_array->IsUndefined()) {
5200     Object* maybe_transitioned_map =
5201         FixedArray::cast(maybe_map_array)->get(elements_kind);
5202     if (!maybe_transitioned_map->IsUndefined()) {
5203       map = Map::cast(maybe_transitioned_map);
5204     }
5205   }
5206   return AllocateJSObjectFromMapWithAllocationSite(map,
5207       allocation_site_info_payload);
5208 }
5209
5210
5211 MaybeObject* Heap::AllocateEmptyFixedArray() {
5212   int size = FixedArray::SizeFor(0);
5213   Object* result;
5214   { MaybeObject* maybe_result =
5215         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5216     if (!maybe_result->ToObject(&result)) return maybe_result;
5217   }
5218   // Initialize the object.
5219   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5220       fixed_array_map());
5221   reinterpret_cast<FixedArray*>(result)->set_length(0);
5222   return result;
5223 }
5224
5225
5226 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5227   if (length < 0 || length > FixedArray::kMaxLength) {
5228     return Failure::OutOfMemoryException(0xd);
5229   }
5230   ASSERT(length > 0);
5231   // Use the general function if we're forced to always allocate.
5232   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5233   // Allocate the raw data for a fixed array.
5234   int size = FixedArray::SizeFor(length);
5235   return size <= kMaxObjectSizeInNewSpace
5236       ? new_space_.AllocateRaw(size)
5237       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5238 }
5239
5240
5241 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5242   int len = src->length();
5243   Object* obj;
5244   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5245     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5246   }
5247   if (InNewSpace(obj)) {
5248     HeapObject* dst = HeapObject::cast(obj);
5249     dst->set_map_no_write_barrier(map);
5250     CopyBlock(dst->address() + kPointerSize,
5251               src->address() + kPointerSize,
5252               FixedArray::SizeFor(len) - kPointerSize);
5253     return obj;
5254   }
5255   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5256   FixedArray* result = FixedArray::cast(obj);
5257   result->set_length(len);
5258
5259   // Copy the content
5260   AssertNoAllocation no_gc;
5261   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5262   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5263   return result;
5264 }
5265
5266
5267 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5268                                                Map* map) {
5269   int len = src->length();
5270   Object* obj;
5271   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5272     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5273   }
5274   HeapObject* dst = HeapObject::cast(obj);
5275   dst->set_map_no_write_barrier(map);
5276   CopyBlock(
5277       dst->address() + FixedDoubleArray::kLengthOffset,
5278       src->address() + FixedDoubleArray::kLengthOffset,
5279       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5280   return obj;
5281 }
5282
5283
5284 MaybeObject* Heap::AllocateFixedArray(int length) {
5285   ASSERT(length >= 0);
5286   if (length == 0) return empty_fixed_array();
5287   Object* result;
5288   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5289     if (!maybe_result->ToObject(&result)) return maybe_result;
5290   }
5291   // Initialize header.
5292   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5293   array->set_map_no_write_barrier(fixed_array_map());
5294   array->set_length(length);
5295   // Initialize body.
5296   ASSERT(!InNewSpace(undefined_value()));
5297   MemsetPointer(array->data_start(), undefined_value(), length);
5298   return result;
5299 }
5300
5301
5302 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5303   if (length < 0 || length > FixedArray::kMaxLength) {
5304     return Failure::OutOfMemoryException(0xe);
5305   }
5306
5307   AllocationSpace space =
5308       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5309   int size = FixedArray::SizeFor(length);
5310   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5311     // Too big for new space.
5312     space = LO_SPACE;
5313   } else if (space == OLD_POINTER_SPACE &&
5314              size > Page::kMaxNonCodeHeapObjectSize) {
5315     // Too big for old pointer space.
5316     space = LO_SPACE;
5317   }
5318
5319   AllocationSpace retry_space =
5320       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5321
5322   return AllocateRaw(size, space, retry_space);
5323 }
5324
5325
5326 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5327     Heap* heap,
5328     int length,
5329     PretenureFlag pretenure,
5330     Object* filler) {
5331   ASSERT(length >= 0);
5332   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5333   if (length == 0) return heap->empty_fixed_array();
5334
5335   ASSERT(!heap->InNewSpace(filler));
5336   Object* result;
5337   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5338     if (!maybe_result->ToObject(&result)) return maybe_result;
5339   }
5340
5341   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5342   FixedArray* array = FixedArray::cast(result);
5343   array->set_length(length);
5344   MemsetPointer(array->data_start(), filler, length);
5345   return array;
5346 }
5347
5348
5349 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5350   return AllocateFixedArrayWithFiller(this,
5351                                       length,
5352                                       pretenure,
5353                                       undefined_value());
5354 }
5355
5356
5357 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5358                                                PretenureFlag pretenure) {
5359   return AllocateFixedArrayWithFiller(this,
5360                                       length,
5361                                       pretenure,
5362                                       the_hole_value());
5363 }
5364
5365
5366 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5367   if (length == 0) return empty_fixed_array();
5368
5369   Object* obj;
5370   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5371     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5372   }
5373
5374   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5375       fixed_array_map());
5376   FixedArray::cast(obj)->set_length(length);
5377   return obj;
5378 }
5379
5380
5381 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5382   int size = FixedDoubleArray::SizeFor(0);
5383   Object* result;
5384   { MaybeObject* maybe_result =
5385         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5386     if (!maybe_result->ToObject(&result)) return maybe_result;
5387   }
5388   // Initialize the object.
5389   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5390       fixed_double_array_map());
5391   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5392   return result;
5393 }
5394
5395
5396 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5397     int length,
5398     PretenureFlag pretenure) {
5399   if (length == 0) return empty_fixed_array();
5400
5401   Object* elements_object;
5402   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5403   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5404   FixedDoubleArray* elements =
5405       reinterpret_cast<FixedDoubleArray*>(elements_object);
5406
5407   elements->set_map_no_write_barrier(fixed_double_array_map());
5408   elements->set_length(length);
5409   return elements;
5410 }
5411
5412
5413 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5414     int length,
5415     PretenureFlag pretenure) {
5416   if (length == 0) return empty_fixed_array();
5417
5418   Object* elements_object;
5419   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5420   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5421   FixedDoubleArray* elements =
5422       reinterpret_cast<FixedDoubleArray*>(elements_object);
5423
5424   for (int i = 0; i < length; ++i) {
5425     elements->set_the_hole(i);
5426   }
5427
5428   elements->set_map_no_write_barrier(fixed_double_array_map());
5429   elements->set_length(length);
5430   return elements;
5431 }
5432
5433
5434 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5435                                                PretenureFlag pretenure) {
5436   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5437     return Failure::OutOfMemoryException(0xf);
5438   }
5439
5440   AllocationSpace space =
5441       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5442   int size = FixedDoubleArray::SizeFor(length);
5443
5444 #ifndef V8_HOST_ARCH_64_BIT
5445   size += kPointerSize;
5446 #endif
5447
5448   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5449     // Too big for new space.
5450     space = LO_SPACE;
5451   } else if (space == OLD_DATA_SPACE &&
5452              size > Page::kMaxNonCodeHeapObjectSize) {
5453     // Too big for old data space.
5454     space = LO_SPACE;
5455   }
5456
5457   AllocationSpace retry_space =
5458       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5459
5460   HeapObject* object;
5461   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5462     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5463   }
5464
5465   return EnsureDoubleAligned(this, object, size);
5466 }
5467
5468
5469 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5470   Object* result;
5471   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5472     if (!maybe_result->ToObject(&result)) return maybe_result;
5473   }
5474   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5475       hash_table_map());
5476   ASSERT(result->IsHashTable());
5477   return result;
5478 }
5479
5480
5481 MaybeObject* Heap::AllocateSymbol() {
5482   // Statically ensure that it is safe to allocate symbols in paged spaces.
5483   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5484
5485   Object* result;
5486   MaybeObject* maybe =
5487       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5488   if (!maybe->ToObject(&result)) return maybe;
5489
5490   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5491
5492   // Generate a random hash value.
5493   int hash;
5494   int attempts = 0;
5495   do {
5496     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5497     attempts++;
5498   } while (hash == 0 && attempts < 30);
5499   if (hash == 0) hash = 1;  // never return 0
5500
5501   Symbol::cast(result)->set_hash_field(
5502       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5503   Symbol::cast(result)->set_name(undefined_value());
5504
5505   ASSERT(result->IsSymbol());
5506   return result;
5507 }
5508
5509
5510 MaybeObject* Heap::AllocateNativeContext() {
5511   Object* result;
5512   { MaybeObject* maybe_result =
5513         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5514     if (!maybe_result->ToObject(&result)) return maybe_result;
5515   }
5516   Context* context = reinterpret_cast<Context*>(result);
5517   context->set_map_no_write_barrier(native_context_map());
5518   context->set_js_array_maps(undefined_value());
5519   ASSERT(context->IsNativeContext());
5520   ASSERT(result->IsContext());
5521   return result;
5522 }
5523
5524
5525 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5526                                          ScopeInfo* scope_info) {
5527   Object* result;
5528   { MaybeObject* maybe_result =
5529         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5530     if (!maybe_result->ToObject(&result)) return maybe_result;
5531   }
5532   Context* context = reinterpret_cast<Context*>(result);
5533   context->set_map_no_write_barrier(global_context_map());
5534   context->set_closure(function);
5535   context->set_previous(function->context());
5536   context->set_extension(scope_info);
5537   context->set_global_object(function->context()->global_object());
5538   ASSERT(context->IsGlobalContext());
5539   ASSERT(result->IsContext());
5540   return context;
5541 }
5542
5543
5544 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5545   Object* result;
5546   { MaybeObject* maybe_result =
5547         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5548     if (!maybe_result->ToObject(&result)) return maybe_result;
5549   }
5550   Context* context = reinterpret_cast<Context*>(result);
5551   context->set_map_no_write_barrier(module_context_map());
5552   // Instance link will be set later.
5553   context->set_extension(Smi::FromInt(0));
5554   return context;
5555 }
5556
5557
5558 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5559   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5560   Object* result;
5561   { MaybeObject* maybe_result = AllocateFixedArray(length);
5562     if (!maybe_result->ToObject(&result)) return maybe_result;
5563   }
5564   Context* context = reinterpret_cast<Context*>(result);
5565   context->set_map_no_write_barrier(function_context_map());
5566   context->set_closure(function);
5567   context->set_previous(function->context());
5568   context->set_extension(Smi::FromInt(0));
5569   context->set_global_object(function->context()->global_object());
5570   return context;
5571 }
5572
5573
5574 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5575                                         Context* previous,
5576                                         String* name,
5577                                         Object* thrown_object) {
5578   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5579   Object* result;
5580   { MaybeObject* maybe_result =
5581         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5582     if (!maybe_result->ToObject(&result)) return maybe_result;
5583   }
5584   Context* context = reinterpret_cast<Context*>(result);
5585   context->set_map_no_write_barrier(catch_context_map());
5586   context->set_closure(function);
5587   context->set_previous(previous);
5588   context->set_extension(name);
5589   context->set_global_object(previous->global_object());
5590   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5591   return context;
5592 }
5593
5594
5595 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5596                                        Context* previous,
5597                                        JSObject* extension) {
5598   Object* result;
5599   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5600     if (!maybe_result->ToObject(&result)) return maybe_result;
5601   }
5602   Context* context = reinterpret_cast<Context*>(result);
5603   context->set_map_no_write_barrier(with_context_map());
5604   context->set_closure(function);
5605   context->set_previous(previous);
5606   context->set_extension(extension);
5607   context->set_global_object(previous->global_object());
5608   return context;
5609 }
5610
5611
5612 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5613                                         Context* previous,
5614                                         ScopeInfo* scope_info) {
5615   Object* result;
5616   { MaybeObject* maybe_result =
5617         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5618     if (!maybe_result->ToObject(&result)) return maybe_result;
5619   }
5620   Context* context = reinterpret_cast<Context*>(result);
5621   context->set_map_no_write_barrier(block_context_map());
5622   context->set_closure(function);
5623   context->set_previous(previous);
5624   context->set_extension(scope_info);
5625   context->set_global_object(previous->global_object());
5626   return context;
5627 }
5628
5629
5630 MaybeObject* Heap::AllocateScopeInfo(int length) {
5631   FixedArray* scope_info;
5632   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5633   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5634   scope_info->set_map_no_write_barrier(scope_info_map());
5635   return scope_info;
5636 }
5637
5638
5639 MaybeObject* Heap::AllocateExternal(void* value) {
5640   Foreign* foreign;
5641   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5642     if (!maybe_result->To(&foreign)) return maybe_result;
5643   }
5644   JSObject* external;
5645   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5646     if (!maybe_result->To(&external)) return maybe_result;
5647   }
5648   external->SetInternalField(0, foreign);
5649   return external;
5650 }
5651
5652
5653 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5654   Map* map;
5655   switch (type) {
5656 #define MAKE_CASE(NAME, Name, name) \
5657     case NAME##_TYPE: map = name##_map(); break;
5658 STRUCT_LIST(MAKE_CASE)
5659 #undef MAKE_CASE
5660     default:
5661       UNREACHABLE();
5662       return Failure::InternalError();
5663   }
5664   int size = map->instance_size();
5665   AllocationSpace space =
5666       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5667   Object* result;
5668   { MaybeObject* maybe_result = Allocate(map, space);
5669     if (!maybe_result->ToObject(&result)) return maybe_result;
5670   }
5671   Struct::cast(result)->InitializeBody(size);
5672   return result;
5673 }
5674
5675
5676 bool Heap::IsHeapIterable() {
5677   return (!old_pointer_space()->was_swept_conservatively() &&
5678           !old_data_space()->was_swept_conservatively());
5679 }
5680
5681
5682 void Heap::EnsureHeapIsIterable() {
5683   ASSERT(IsAllocationAllowed());
5684   if (!IsHeapIterable()) {
5685     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5686   }
5687   ASSERT(IsHeapIterable());
5688 }
5689
5690
5691 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5692   incremental_marking()->Step(step_size,
5693                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5694
5695   if (incremental_marking()->IsComplete()) {
5696     bool uncommit = false;
5697     if (gc_count_at_last_idle_gc_ == gc_count_) {
5698       // No GC since the last full GC, the mutator is probably not active.
5699       isolate_->compilation_cache()->Clear();
5700       uncommit = true;
5701     }
5702     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5703     gc_count_at_last_idle_gc_ = gc_count_;
5704     if (uncommit) {
5705       new_space_.Shrink();
5706       UncommitFromSpace();
5707     }
5708   }
5709 }
5710
5711
5712 bool Heap::IdleNotification(int hint) {
5713   // Hints greater than this value indicate that
5714   // the embedder is requesting a lot of GC work.
5715   const int kMaxHint = 1000;
5716   // Minimal hint that allows to do full GC.
5717   const int kMinHintForFullGC = 100;
5718   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5719   // The size factor is in range [5..250]. The numbers here are chosen from
5720   // experiments. If you changes them, make sure to test with
5721   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5722   intptr_t step_size =
5723       size_factor * IncrementalMarking::kAllocatedThreshold;
5724
5725   if (contexts_disposed_ > 0) {
5726     if (hint >= kMaxHint) {
5727       // The embedder is requesting a lot of GC work after context disposal,
5728       // we age inline caches so that they don't keep objects from
5729       // the old context alive.
5730       AgeInlineCaches();
5731     }
5732     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5733     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5734         incremental_marking()->IsStopped()) {
5735       HistogramTimerScope scope(isolate_->counters()->gc_context());
5736       CollectAllGarbage(kReduceMemoryFootprintMask,
5737                         "idle notification: contexts disposed");
5738     } else {
5739       AdvanceIdleIncrementalMarking(step_size);
5740       contexts_disposed_ = 0;
5741     }
5742     // After context disposal there is likely a lot of garbage remaining, reset
5743     // the idle notification counters in order to trigger more incremental GCs
5744     // on subsequent idle notifications.
5745     StartIdleRound();
5746     return false;
5747   }
5748
5749   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5750     return IdleGlobalGC();
5751   }
5752
5753   // By doing small chunks of GC work in each IdleNotification,
5754   // perform a round of incremental GCs and after that wait until
5755   // the mutator creates enough garbage to justify a new round.
5756   // An incremental GC progresses as follows:
5757   // 1. many incremental marking steps,
5758   // 2. one old space mark-sweep-compact,
5759   // 3. many lazy sweep steps.
5760   // Use mark-sweep-compact events to count incremental GCs in a round.
5761
5762   if (incremental_marking()->IsStopped()) {
5763     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5764         !IsSweepingComplete() &&
5765         !AdvanceSweepers(static_cast<int>(step_size))) {
5766       return false;
5767     }
5768   }
5769
5770   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5771     if (EnoughGarbageSinceLastIdleRound()) {
5772       StartIdleRound();
5773     } else {
5774       return true;
5775     }
5776   }
5777
5778   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5779   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5780   ms_count_at_last_idle_notification_ = ms_count_;
5781
5782   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5783                               mark_sweeps_since_idle_round_started_;
5784
5785   if (remaining_mark_sweeps <= 0) {
5786     FinishIdleRound();
5787     return true;
5788   }
5789
5790   if (incremental_marking()->IsStopped()) {
5791     // If there are no more than two GCs left in this idle round and we are
5792     // allowed to do a full GC, then make those GCs full in order to compact
5793     // the code space.
5794     // TODO(ulan): Once we enable code compaction for incremental marking,
5795     // we can get rid of this special case and always start incremental marking.
5796     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5797       CollectAllGarbage(kReduceMemoryFootprintMask,
5798                         "idle notification: finalize idle round");
5799     } else {
5800       incremental_marking()->Start();
5801     }
5802   }
5803   if (!incremental_marking()->IsStopped()) {
5804     AdvanceIdleIncrementalMarking(step_size);
5805   }
5806   return false;
5807 }
5808
5809
5810 bool Heap::IdleGlobalGC() {
5811   static const int kIdlesBeforeScavenge = 4;
5812   static const int kIdlesBeforeMarkSweep = 7;
5813   static const int kIdlesBeforeMarkCompact = 8;
5814   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5815   static const unsigned int kGCsBetweenCleanup = 4;
5816
5817   if (!last_idle_notification_gc_count_init_) {
5818     last_idle_notification_gc_count_ = gc_count_;
5819     last_idle_notification_gc_count_init_ = true;
5820   }
5821
5822   bool uncommit = true;
5823   bool finished = false;
5824
5825   // Reset the number of idle notifications received when a number of
5826   // GCs have taken place. This allows another round of cleanup based
5827   // on idle notifications if enough work has been carried out to
5828   // provoke a number of garbage collections.
5829   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5830     number_idle_notifications_ =
5831         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5832   } else {
5833     number_idle_notifications_ = 0;
5834     last_idle_notification_gc_count_ = gc_count_;
5835   }
5836
5837   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5838     CollectGarbage(NEW_SPACE, "idle notification");
5839     new_space_.Shrink();
5840     last_idle_notification_gc_count_ = gc_count_;
5841   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5842     // Before doing the mark-sweep collections we clear the
5843     // compilation cache to avoid hanging on to source code and
5844     // generated code for cached functions.
5845     isolate_->compilation_cache()->Clear();
5846
5847     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5848     new_space_.Shrink();
5849     last_idle_notification_gc_count_ = gc_count_;
5850
5851   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5852     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5853     new_space_.Shrink();
5854     last_idle_notification_gc_count_ = gc_count_;
5855     number_idle_notifications_ = 0;
5856     finished = true;
5857   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5858     // If we have received more than kIdlesBeforeMarkCompact idle
5859     // notifications we do not perform any cleanup because we don't
5860     // expect to gain much by doing so.
5861     finished = true;
5862   }
5863
5864   if (uncommit) UncommitFromSpace();
5865
5866   return finished;
5867 }
5868
5869
5870 #ifdef DEBUG
5871
5872 void Heap::Print() {
5873   if (!HasBeenSetUp()) return;
5874   isolate()->PrintStack();
5875   AllSpaces spaces(this);
5876   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5877     space->Print();
5878   }
5879 }
5880
5881
5882 void Heap::ReportCodeStatistics(const char* title) {
5883   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5884   PagedSpace::ResetCodeStatistics();
5885   // We do not look for code in new space, map space, or old space.  If code
5886   // somehow ends up in those spaces, we would miss it here.
5887   code_space_->CollectCodeStatistics();
5888   lo_space_->CollectCodeStatistics();
5889   PagedSpace::ReportCodeStatistics();
5890 }
5891
5892
5893 // This function expects that NewSpace's allocated objects histogram is
5894 // populated (via a call to CollectStatistics or else as a side effect of a
5895 // just-completed scavenge collection).
5896 void Heap::ReportHeapStatistics(const char* title) {
5897   USE(title);
5898   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5899          title, gc_count_);
5900   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5901          old_gen_promotion_limit_);
5902   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5903          old_gen_allocation_limit_);
5904   PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5905
5906   PrintF("\n");
5907   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5908   isolate_->global_handles()->PrintStats();
5909   PrintF("\n");
5910
5911   PrintF("Heap statistics : ");
5912   isolate_->memory_allocator()->ReportStatistics();
5913   PrintF("To space : ");
5914   new_space_.ReportStatistics();
5915   PrintF("Old pointer space : ");
5916   old_pointer_space_->ReportStatistics();
5917   PrintF("Old data space : ");
5918   old_data_space_->ReportStatistics();
5919   PrintF("Code space : ");
5920   code_space_->ReportStatistics();
5921   PrintF("Map space : ");
5922   map_space_->ReportStatistics();
5923   PrintF("Cell space : ");
5924   cell_space_->ReportStatistics();
5925   PrintF("Large object space : ");
5926   lo_space_->ReportStatistics();
5927   PrintF(">>>>>> ========================================= >>>>>>\n");
5928 }
5929
5930 #endif  // DEBUG
5931
5932 bool Heap::Contains(HeapObject* value) {
5933   return Contains(value->address());
5934 }
5935
5936
5937 bool Heap::Contains(Address addr) {
5938   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5939   return HasBeenSetUp() &&
5940     (new_space_.ToSpaceContains(addr) ||
5941      old_pointer_space_->Contains(addr) ||
5942      old_data_space_->Contains(addr) ||
5943      code_space_->Contains(addr) ||
5944      map_space_->Contains(addr) ||
5945      cell_space_->Contains(addr) ||
5946      lo_space_->SlowContains(addr));
5947 }
5948
5949
5950 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5951   return InSpace(value->address(), space);
5952 }
5953
5954
5955 bool Heap::InSpace(Address addr, AllocationSpace space) {
5956   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5957   if (!HasBeenSetUp()) return false;
5958
5959   switch (space) {
5960     case NEW_SPACE:
5961       return new_space_.ToSpaceContains(addr);
5962     case OLD_POINTER_SPACE:
5963       return old_pointer_space_->Contains(addr);
5964     case OLD_DATA_SPACE:
5965       return old_data_space_->Contains(addr);
5966     case CODE_SPACE:
5967       return code_space_->Contains(addr);
5968     case MAP_SPACE:
5969       return map_space_->Contains(addr);
5970     case CELL_SPACE:
5971       return cell_space_->Contains(addr);
5972     case LO_SPACE:
5973       return lo_space_->SlowContains(addr);
5974   }
5975
5976   return false;
5977 }
5978
5979
5980 #ifdef VERIFY_HEAP
5981 void Heap::Verify() {
5982   CHECK(HasBeenSetUp());
5983
5984   store_buffer()->Verify();
5985
5986   VerifyPointersVisitor visitor;
5987   IterateRoots(&visitor, VISIT_ONLY_STRONG);
5988
5989   new_space_.Verify();
5990
5991   old_pointer_space_->Verify(&visitor);
5992   map_space_->Verify(&visitor);
5993
5994   VerifyPointersVisitor no_dirty_regions_visitor;
5995   old_data_space_->Verify(&no_dirty_regions_visitor);
5996   code_space_->Verify(&no_dirty_regions_visitor);
5997   cell_space_->Verify(&no_dirty_regions_visitor);
5998
5999   lo_space_->Verify();
6000 }
6001 #endif
6002
6003
6004 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6005   Object* result = NULL;
6006   Object* new_table;
6007   { MaybeObject* maybe_new_table =
6008         string_table()->LookupUtf8String(string, &result);
6009     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6010   }
6011   // Can't use set_string_table because StringTable::cast knows that
6012   // StringTable is a singleton and checks for identity.
6013   roots_[kStringTableRootIndex] = new_table;
6014   ASSERT(result != NULL);
6015   return result;
6016 }
6017
6018
6019 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6020   Object* result = NULL;
6021   Object* new_table;
6022   { MaybeObject* maybe_new_table =
6023         string_table()->LookupOneByteString(string, &result);
6024     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6025   }
6026   // Can't use set_string_table because StringTable::cast knows that
6027   // StringTable is a singleton and checks for identity.
6028   roots_[kStringTableRootIndex] = new_table;
6029   ASSERT(result != NULL);
6030   return result;
6031 }
6032
6033
6034 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6035                                      int from,
6036                                      int length) {
6037   Object* result = NULL;
6038   Object* new_table;
6039   { MaybeObject* maybe_new_table =
6040         string_table()->LookupSubStringOneByteString(string,
6041                                                    from,
6042                                                    length,
6043                                                    &result);
6044     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6045   }
6046   // Can't use set_string_table because StringTable::cast knows that
6047   // StringTable is a singleton and checks for identity.
6048   roots_[kStringTableRootIndex] = new_table;
6049   ASSERT(result != NULL);
6050   return result;
6051 }
6052
6053
6054 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6055   Object* result = NULL;
6056   Object* new_table;
6057   { MaybeObject* maybe_new_table =
6058         string_table()->LookupTwoByteString(string, &result);
6059     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6060   }
6061   // Can't use set_string_table because StringTable::cast knows that
6062   // StringTable is a singleton and checks for identity.
6063   roots_[kStringTableRootIndex] = new_table;
6064   ASSERT(result != NULL);
6065   return result;
6066 }
6067
6068
6069 MaybeObject* Heap::InternalizeString(String* string) {
6070   if (string->IsInternalizedString()) return string;
6071   Object* result = NULL;
6072   Object* new_table;
6073   { MaybeObject* maybe_new_table =
6074         string_table()->LookupString(string, &result);
6075     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6076   }
6077   // Can't use set_string_table because StringTable::cast knows that
6078   // StringTable is a singleton and checks for identity.
6079   roots_[kStringTableRootIndex] = new_table;
6080   ASSERT(result != NULL);
6081   return result;
6082 }
6083
6084
6085 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6086   if (string->IsInternalizedString()) {
6087     *result = string;
6088     return true;
6089   }
6090   return string_table()->LookupStringIfExists(string, result);
6091 }
6092
6093
6094 void Heap::ZapFromSpace() {
6095   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6096                           new_space_.FromSpaceEnd());
6097   while (it.has_next()) {
6098     NewSpacePage* page = it.next();
6099     for (Address cursor = page->area_start(), limit = page->area_end();
6100          cursor < limit;
6101          cursor += kPointerSize) {
6102       Memory::Address_at(cursor) = kFromSpaceZapValue;
6103     }
6104   }
6105 }
6106
6107
6108 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6109                                              Address end,
6110                                              ObjectSlotCallback callback) {
6111   Address slot_address = start;
6112
6113   // We are not collecting slots on new space objects during mutation
6114   // thus we have to scan for pointers to evacuation candidates when we
6115   // promote objects. But we should not record any slots in non-black
6116   // objects. Grey object's slots would be rescanned.
6117   // White object might not survive until the end of collection
6118   // it would be a violation of the invariant to record it's slots.
6119   bool record_slots = false;
6120   if (incremental_marking()->IsCompacting()) {
6121     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6122     record_slots = Marking::IsBlack(mark_bit);
6123   }
6124
6125   while (slot_address < end) {
6126     Object** slot = reinterpret_cast<Object**>(slot_address);
6127     Object* object = *slot;
6128     // If the store buffer becomes overfull we mark pages as being exempt from
6129     // the store buffer.  These pages are scanned to find pointers that point
6130     // to the new space.  In that case we may hit newly promoted objects and
6131     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6132     if (object->IsHeapObject()) {
6133       if (Heap::InFromSpace(object)) {
6134         callback(reinterpret_cast<HeapObject**>(slot),
6135                  HeapObject::cast(object));
6136         Object* new_object = *slot;
6137         if (InNewSpace(new_object)) {
6138           SLOW_ASSERT(Heap::InToSpace(new_object));
6139           SLOW_ASSERT(new_object->IsHeapObject());
6140           store_buffer_.EnterDirectlyIntoStoreBuffer(
6141               reinterpret_cast<Address>(slot));
6142         }
6143         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6144       } else if (record_slots &&
6145                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6146         mark_compact_collector()->RecordSlot(slot, slot, object);
6147       }
6148     }
6149     slot_address += kPointerSize;
6150   }
6151 }
6152
6153
6154 #ifdef DEBUG
6155 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6156
6157
6158 bool IsAMapPointerAddress(Object** addr) {
6159   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6160   int mod = a % Map::kSize;
6161   return mod >= Map::kPointerFieldsBeginOffset &&
6162          mod < Map::kPointerFieldsEndOffset;
6163 }
6164
6165
6166 bool EverythingsAPointer(Object** addr) {
6167   return true;
6168 }
6169
6170
6171 static void CheckStoreBuffer(Heap* heap,
6172                              Object** current,
6173                              Object** limit,
6174                              Object**** store_buffer_position,
6175                              Object*** store_buffer_top,
6176                              CheckStoreBufferFilter filter,
6177                              Address special_garbage_start,
6178                              Address special_garbage_end) {
6179   Map* free_space_map = heap->free_space_map();
6180   for ( ; current < limit; current++) {
6181     Object* o = *current;
6182     Address current_address = reinterpret_cast<Address>(current);
6183     // Skip free space.
6184     if (o == free_space_map) {
6185       Address current_address = reinterpret_cast<Address>(current);
6186       FreeSpace* free_space =
6187           FreeSpace::cast(HeapObject::FromAddress(current_address));
6188       int skip = free_space->Size();
6189       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6190       ASSERT(skip > 0);
6191       current_address += skip - kPointerSize;
6192       current = reinterpret_cast<Object**>(current_address);
6193       continue;
6194     }
6195     // Skip the current linear allocation space between top and limit which is
6196     // unmarked with the free space map, but can contain junk.
6197     if (current_address == special_garbage_start &&
6198         special_garbage_end != special_garbage_start) {
6199       current_address = special_garbage_end - kPointerSize;
6200       current = reinterpret_cast<Object**>(current_address);
6201       continue;
6202     }
6203     if (!(*filter)(current)) continue;
6204     ASSERT(current_address < special_garbage_start ||
6205            current_address >= special_garbage_end);
6206     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6207     // We have to check that the pointer does not point into new space
6208     // without trying to cast it to a heap object since the hash field of
6209     // a string can contain values like 1 and 3 which are tagged null
6210     // pointers.
6211     if (!heap->InNewSpace(o)) continue;
6212     while (**store_buffer_position < current &&
6213            *store_buffer_position < store_buffer_top) {
6214       (*store_buffer_position)++;
6215     }
6216     if (**store_buffer_position != current ||
6217         *store_buffer_position == store_buffer_top) {
6218       Object** obj_start = current;
6219       while (!(*obj_start)->IsMap()) obj_start--;
6220       UNREACHABLE();
6221     }
6222   }
6223 }
6224
6225
6226 // Check that the store buffer contains all intergenerational pointers by
6227 // scanning a page and ensuring that all pointers to young space are in the
6228 // store buffer.
6229 void Heap::OldPointerSpaceCheckStoreBuffer() {
6230   OldSpace* space = old_pointer_space();
6231   PageIterator pages(space);
6232
6233   store_buffer()->SortUniq();
6234
6235   while (pages.has_next()) {
6236     Page* page = pages.next();
6237     Object** current = reinterpret_cast<Object**>(page->area_start());
6238
6239     Address end = page->area_end();
6240
6241     Object*** store_buffer_position = store_buffer()->Start();
6242     Object*** store_buffer_top = store_buffer()->Top();
6243
6244     Object** limit = reinterpret_cast<Object**>(end);
6245     CheckStoreBuffer(this,
6246                      current,
6247                      limit,
6248                      &store_buffer_position,
6249                      store_buffer_top,
6250                      &EverythingsAPointer,
6251                      space->top(),
6252                      space->limit());
6253   }
6254 }
6255
6256
6257 void Heap::MapSpaceCheckStoreBuffer() {
6258   MapSpace* space = map_space();
6259   PageIterator pages(space);
6260
6261   store_buffer()->SortUniq();
6262
6263   while (pages.has_next()) {
6264     Page* page = pages.next();
6265     Object** current = reinterpret_cast<Object**>(page->area_start());
6266
6267     Address end = page->area_end();
6268
6269     Object*** store_buffer_position = store_buffer()->Start();
6270     Object*** store_buffer_top = store_buffer()->Top();
6271
6272     Object** limit = reinterpret_cast<Object**>(end);
6273     CheckStoreBuffer(this,
6274                      current,
6275                      limit,
6276                      &store_buffer_position,
6277                      store_buffer_top,
6278                      &IsAMapPointerAddress,
6279                      space->top(),
6280                      space->limit());
6281   }
6282 }
6283
6284
6285 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6286   LargeObjectIterator it(lo_space());
6287   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6288     // We only have code, sequential strings, or fixed arrays in large
6289     // object space, and only fixed arrays can possibly contain pointers to
6290     // the young generation.
6291     if (object->IsFixedArray()) {
6292       Object*** store_buffer_position = store_buffer()->Start();
6293       Object*** store_buffer_top = store_buffer()->Top();
6294       Object** current = reinterpret_cast<Object**>(object->address());
6295       Object** limit =
6296           reinterpret_cast<Object**>(object->address() + object->Size());
6297       CheckStoreBuffer(this,
6298                        current,
6299                        limit,
6300                        &store_buffer_position,
6301                        store_buffer_top,
6302                        &EverythingsAPointer,
6303                        NULL,
6304                        NULL);
6305     }
6306   }
6307 }
6308 #endif
6309
6310
6311 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6312   IterateStrongRoots(v, mode);
6313   IterateWeakRoots(v, mode);
6314 }
6315
6316
6317 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6318   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6319   v->Synchronize(VisitorSynchronization::kStringTable);
6320   if (mode != VISIT_ALL_IN_SCAVENGE &&
6321       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6322     // Scavenge collections have special processing for this.
6323     external_string_table_.Iterate(v);
6324     error_object_list_.Iterate(v);
6325   }
6326   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6327 }
6328
6329
6330 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6331   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6332   v->Synchronize(VisitorSynchronization::kStrongRootList);
6333
6334   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6335   v->Synchronize(VisitorSynchronization::kInternalizedString);
6336
6337   isolate_->bootstrapper()->Iterate(v);
6338   v->Synchronize(VisitorSynchronization::kBootstrapper);
6339   isolate_->Iterate(v);
6340   v->Synchronize(VisitorSynchronization::kTop);
6341   Relocatable::Iterate(v);
6342   v->Synchronize(VisitorSynchronization::kRelocatable);
6343
6344 #ifdef ENABLE_DEBUGGER_SUPPORT
6345   isolate_->debug()->Iterate(v);
6346   if (isolate_->deoptimizer_data() != NULL) {
6347     isolate_->deoptimizer_data()->Iterate(v);
6348   }
6349 #endif
6350   v->Synchronize(VisitorSynchronization::kDebug);
6351   isolate_->compilation_cache()->Iterate(v);
6352   v->Synchronize(VisitorSynchronization::kCompilationCache);
6353
6354   // Iterate over local handles in handle scopes.
6355   isolate_->handle_scope_implementer()->Iterate(v);
6356   isolate_->IterateDeferredHandles(v);
6357   v->Synchronize(VisitorSynchronization::kHandleScope);
6358
6359   // Iterate over the builtin code objects and code stubs in the
6360   // heap. Note that it is not necessary to iterate over code objects
6361   // on scavenge collections.
6362   if (mode != VISIT_ALL_IN_SCAVENGE) {
6363     isolate_->builtins()->IterateBuiltins(v);
6364   }
6365   v->Synchronize(VisitorSynchronization::kBuiltins);
6366
6367   // Iterate over global handles.
6368   switch (mode) {
6369     case VISIT_ONLY_STRONG:
6370       isolate_->global_handles()->IterateStrongRoots(v);
6371       break;
6372     case VISIT_ALL_IN_SCAVENGE:
6373       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6374       break;
6375     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6376     case VISIT_ALL:
6377       isolate_->global_handles()->IterateAllRoots(v);
6378       break;
6379   }
6380   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6381
6382   // Iterate over pointers being held by inactive threads.
6383   isolate_->thread_manager()->Iterate(v);
6384   v->Synchronize(VisitorSynchronization::kThreadManager);
6385
6386   // Iterate over the pointers the Serialization/Deserialization code is
6387   // holding.
6388   // During garbage collection this keeps the partial snapshot cache alive.
6389   // During deserialization of the startup snapshot this creates the partial
6390   // snapshot cache and deserializes the objects it refers to.  During
6391   // serialization this does nothing, since the partial snapshot cache is
6392   // empty.  However the next thing we do is create the partial snapshot,
6393   // filling up the partial snapshot cache with objects it needs as we go.
6394   SerializerDeserializer::Iterate(v);
6395   // We don't do a v->Synchronize call here, because in debug mode that will
6396   // output a flag to the snapshot.  However at this point the serializer and
6397   // deserializer are deliberately a little unsynchronized (see above) so the
6398   // checking of the sync flag in the snapshot would fail.
6399 }
6400
6401
6402 // TODO(1236194): Since the heap size is configurable on the command line
6403 // and through the API, we should gracefully handle the case that the heap
6404 // size is not big enough to fit all the initial objects.
6405 bool Heap::ConfigureHeap(int max_semispace_size,
6406                          intptr_t max_old_gen_size,
6407                          intptr_t max_executable_size) {
6408   if (HasBeenSetUp()) return false;
6409
6410   if (FLAG_stress_compaction) {
6411     // This will cause more frequent GCs when stressing.
6412     max_semispace_size_ = Page::kPageSize;
6413   }
6414
6415   if (max_semispace_size > 0) {
6416     if (max_semispace_size < Page::kPageSize) {
6417       max_semispace_size = Page::kPageSize;
6418       if (FLAG_trace_gc) {
6419         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6420                  Page::kPageSize >> 10);
6421       }
6422     }
6423     max_semispace_size_ = max_semispace_size;
6424   }
6425
6426   if (Snapshot::IsEnabled()) {
6427     // If we are using a snapshot we always reserve the default amount
6428     // of memory for each semispace because code in the snapshot has
6429     // write-barrier code that relies on the size and alignment of new
6430     // space.  We therefore cannot use a larger max semispace size
6431     // than the default reserved semispace size.
6432     if (max_semispace_size_ > reserved_semispace_size_) {
6433       max_semispace_size_ = reserved_semispace_size_;
6434       if (FLAG_trace_gc) {
6435         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6436                  reserved_semispace_size_ >> 10);
6437       }
6438     }
6439   } else {
6440     // If we are not using snapshots we reserve space for the actual
6441     // max semispace size.
6442     reserved_semispace_size_ = max_semispace_size_;
6443   }
6444
6445   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6446   if (max_executable_size > 0) {
6447     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6448   }
6449
6450   // The max executable size must be less than or equal to the max old
6451   // generation size.
6452   if (max_executable_size_ > max_old_generation_size_) {
6453     max_executable_size_ = max_old_generation_size_;
6454   }
6455
6456   // The new space size must be a power of two to support single-bit testing
6457   // for containment.
6458   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6459   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6460   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6461   external_allocation_limit_ = 16 * max_semispace_size_;
6462
6463   // The old generation is paged and needs at least one page for each space.
6464   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6465   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6466                                                        Page::kPageSize),
6467                                  RoundUp(max_old_generation_size_,
6468                                          Page::kPageSize));
6469
6470   configured_ = true;
6471   return true;
6472 }
6473
6474
6475 bool Heap::ConfigureHeapDefault() {
6476   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6477                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6478                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6479 }
6480
6481
6482 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6483   *stats->start_marker = HeapStats::kStartMarker;
6484   *stats->end_marker = HeapStats::kEndMarker;
6485   *stats->new_space_size = new_space_.SizeAsInt();
6486   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6487   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6488   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6489   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6490   *stats->old_data_space_capacity = old_data_space_->Capacity();
6491   *stats->code_space_size = code_space_->SizeOfObjects();
6492   *stats->code_space_capacity = code_space_->Capacity();
6493   *stats->map_space_size = map_space_->SizeOfObjects();
6494   *stats->map_space_capacity = map_space_->Capacity();
6495   *stats->cell_space_size = cell_space_->SizeOfObjects();
6496   *stats->cell_space_capacity = cell_space_->Capacity();
6497   *stats->lo_space_size = lo_space_->Size();
6498   isolate_->global_handles()->RecordStats(stats);
6499   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6500   *stats->memory_allocator_capacity =
6501       isolate()->memory_allocator()->Size() +
6502       isolate()->memory_allocator()->Available();
6503   *stats->os_error = OS::GetLastError();
6504       isolate()->memory_allocator()->Available();
6505   if (take_snapshot) {
6506     HeapIterator iterator(this);
6507     for (HeapObject* obj = iterator.next();
6508          obj != NULL;
6509          obj = iterator.next()) {
6510       InstanceType type = obj->map()->instance_type();
6511       ASSERT(0 <= type && type <= LAST_TYPE);
6512       stats->objects_per_type[type]++;
6513       stats->size_per_type[type] += obj->Size();
6514     }
6515   }
6516 }
6517
6518
6519 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6520   return old_pointer_space_->SizeOfObjects()
6521       + old_data_space_->SizeOfObjects()
6522       + code_space_->SizeOfObjects()
6523       + map_space_->SizeOfObjects()
6524       + cell_space_->SizeOfObjects()
6525       + lo_space_->SizeOfObjects();
6526 }
6527
6528
6529 intptr_t Heap::PromotedExternalMemorySize() {
6530   if (amount_of_external_allocated_memory_
6531       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6532   return amount_of_external_allocated_memory_
6533       - amount_of_external_allocated_memory_at_last_global_gc_;
6534 }
6535
6536
6537 V8_DECLARE_ONCE(initialize_gc_once);
6538
6539 static void InitializeGCOnce() {
6540   InitializeScavengingVisitorsTables();
6541   NewSpaceScavenger::Initialize();
6542   MarkCompactCollector::Initialize();
6543 }
6544
6545 bool Heap::SetUp() {
6546 #ifdef DEBUG
6547   allocation_timeout_ = FLAG_gc_interval;
6548 #endif
6549
6550   // Initialize heap spaces and initial maps and objects. Whenever something
6551   // goes wrong, just return false. The caller should check the results and
6552   // call Heap::TearDown() to release allocated memory.
6553   //
6554   // If the heap is not yet configured (e.g. through the API), configure it.
6555   // Configuration is based on the flags new-space-size (really the semispace
6556   // size) and old-space-size if set or the initial values of semispace_size_
6557   // and old_generation_size_ otherwise.
6558   if (!configured_) {
6559     if (!ConfigureHeapDefault()) return false;
6560   }
6561
6562   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6563
6564   MarkMapPointersAsEncoded(false);
6565
6566   // Set up memory allocator.
6567   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6568       return false;
6569
6570   // Set up new space.
6571   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6572     return false;
6573   }
6574
6575   // Initialize old pointer space.
6576   old_pointer_space_ =
6577       new OldSpace(this,
6578                    max_old_generation_size_,
6579                    OLD_POINTER_SPACE,
6580                    NOT_EXECUTABLE);
6581   if (old_pointer_space_ == NULL) return false;
6582   if (!old_pointer_space_->SetUp()) return false;
6583
6584   // Initialize old data space.
6585   old_data_space_ =
6586       new OldSpace(this,
6587                    max_old_generation_size_,
6588                    OLD_DATA_SPACE,
6589                    NOT_EXECUTABLE);
6590   if (old_data_space_ == NULL) return false;
6591   if (!old_data_space_->SetUp()) return false;
6592
6593   // Initialize the code space, set its maximum capacity to the old
6594   // generation size. It needs executable memory.
6595   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6596   // virtual address space, so that they can call each other with near calls.
6597   if (code_range_size_ > 0) {
6598     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6599       return false;
6600     }
6601   }
6602
6603   code_space_ =
6604       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6605   if (code_space_ == NULL) return false;
6606   if (!code_space_->SetUp()) return false;
6607
6608   // Initialize map space.
6609   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6610   if (map_space_ == NULL) return false;
6611   if (!map_space_->SetUp()) return false;
6612
6613   // Initialize global property cell space.
6614   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6615   if (cell_space_ == NULL) return false;
6616   if (!cell_space_->SetUp()) return false;
6617
6618   // The large object code space may contain code or data.  We set the memory
6619   // to be non-executable here for safety, but this means we need to enable it
6620   // explicitly when allocating large code objects.
6621   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6622   if (lo_space_ == NULL) return false;
6623   if (!lo_space_->SetUp()) return false;
6624
6625   // Set up the seed that is used to randomize the string hash function.
6626   ASSERT(hash_seed() == 0);
6627   if (FLAG_randomize_hashes) {
6628     if (FLAG_hash_seed == 0) {
6629       set_hash_seed(
6630           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6631     } else {
6632       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6633     }
6634   }
6635
6636   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6637   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6638
6639   store_buffer()->SetUp();
6640
6641   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6642 #ifdef DEBUG
6643   relocation_mutex_locked_by_optimizer_thread_ = false;
6644 #endif  // DEBUG
6645
6646   return true;
6647 }
6648
6649 bool Heap::CreateHeapObjects() {
6650   // Create initial maps.
6651   if (!CreateInitialMaps()) return false;
6652   if (!CreateApiObjects()) return false;
6653
6654   // Create initial objects
6655   if (!CreateInitialObjects()) return false;
6656
6657   native_contexts_list_ = undefined_value();
6658   return true;
6659 }
6660
6661
6662 void Heap::SetStackLimits() {
6663   ASSERT(isolate_ != NULL);
6664   ASSERT(isolate_ == isolate());
6665   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6666   // something that looks like an out of range Smi to the GC.
6667
6668   // Set up the special root array entries containing the stack limits.
6669   // These are actually addresses, but the tag makes the GC ignore it.
6670   roots_[kStackLimitRootIndex] =
6671       reinterpret_cast<Object*>(
6672           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6673   roots_[kRealStackLimitRootIndex] =
6674       reinterpret_cast<Object*>(
6675           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6676 }
6677
6678
6679 void Heap::TearDown() {
6680 #ifdef VERIFY_HEAP
6681   if (FLAG_verify_heap) {
6682     Verify();
6683   }
6684 #endif
6685
6686   if (FLAG_print_cumulative_gc_stat) {
6687     PrintF("\n");
6688     PrintF("gc_count=%d ", gc_count_);
6689     PrintF("mark_sweep_count=%d ", ms_count_);
6690     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6691     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6692     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6693     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6694            get_max_alive_after_gc());
6695     PrintF("total_marking_time=%.1f ", marking_time());
6696     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6697     PrintF("\n\n");
6698   }
6699
6700   isolate_->global_handles()->TearDown();
6701
6702   external_string_table_.TearDown();
6703
6704   error_object_list_.TearDown();
6705
6706   new_space_.TearDown();
6707
6708   if (old_pointer_space_ != NULL) {
6709     old_pointer_space_->TearDown();
6710     delete old_pointer_space_;
6711     old_pointer_space_ = NULL;
6712   }
6713
6714   if (old_data_space_ != NULL) {
6715     old_data_space_->TearDown();
6716     delete old_data_space_;
6717     old_data_space_ = NULL;
6718   }
6719
6720   if (code_space_ != NULL) {
6721     code_space_->TearDown();
6722     delete code_space_;
6723     code_space_ = NULL;
6724   }
6725
6726   if (map_space_ != NULL) {
6727     map_space_->TearDown();
6728     delete map_space_;
6729     map_space_ = NULL;
6730   }
6731
6732   if (cell_space_ != NULL) {
6733     cell_space_->TearDown();
6734     delete cell_space_;
6735     cell_space_ = NULL;
6736   }
6737
6738   if (lo_space_ != NULL) {
6739     lo_space_->TearDown();
6740     delete lo_space_;
6741     lo_space_ = NULL;
6742   }
6743
6744   store_buffer()->TearDown();
6745   incremental_marking()->TearDown();
6746
6747   isolate_->memory_allocator()->TearDown();
6748
6749   delete relocation_mutex_;
6750 }
6751
6752
6753 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6754   ASSERT(callback != NULL);
6755   GCPrologueCallbackPair pair(callback, gc_type);
6756   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6757   return gc_prologue_callbacks_.Add(pair);
6758 }
6759
6760
6761 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6762   ASSERT(callback != NULL);
6763   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6764     if (gc_prologue_callbacks_[i].callback == callback) {
6765       gc_prologue_callbacks_.Remove(i);
6766       return;
6767     }
6768   }
6769   UNREACHABLE();
6770 }
6771
6772
6773 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6774   ASSERT(callback != NULL);
6775   GCEpilogueCallbackPair pair(callback, gc_type);
6776   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6777   return gc_epilogue_callbacks_.Add(pair);
6778 }
6779
6780
6781 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6782   ASSERT(callback != NULL);
6783   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6784     if (gc_epilogue_callbacks_[i].callback == callback) {
6785       gc_epilogue_callbacks_.Remove(i);
6786       return;
6787     }
6788   }
6789   UNREACHABLE();
6790 }
6791
6792
6793 #ifdef DEBUG
6794
6795 class PrintHandleVisitor: public ObjectVisitor {
6796  public:
6797   void VisitPointers(Object** start, Object** end) {
6798     for (Object** p = start; p < end; p++)
6799       PrintF("  handle %p to %p\n",
6800              reinterpret_cast<void*>(p),
6801              reinterpret_cast<void*>(*p));
6802   }
6803 };
6804
6805 void Heap::PrintHandles() {
6806   PrintF("Handles:\n");
6807   PrintHandleVisitor v;
6808   isolate_->handle_scope_implementer()->Iterate(&v);
6809 }
6810
6811 #endif
6812
6813
6814 Space* AllSpaces::next() {
6815   switch (counter_++) {
6816     case NEW_SPACE:
6817       return heap_->new_space();
6818     case OLD_POINTER_SPACE:
6819       return heap_->old_pointer_space();
6820     case OLD_DATA_SPACE:
6821       return heap_->old_data_space();
6822     case CODE_SPACE:
6823       return heap_->code_space();
6824     case MAP_SPACE:
6825       return heap_->map_space();
6826     case CELL_SPACE:
6827       return heap_->cell_space();
6828     case LO_SPACE:
6829       return heap_->lo_space();
6830     default:
6831       return NULL;
6832   }
6833 }
6834
6835
6836 PagedSpace* PagedSpaces::next() {
6837   switch (counter_++) {
6838     case OLD_POINTER_SPACE:
6839       return heap_->old_pointer_space();
6840     case OLD_DATA_SPACE:
6841       return heap_->old_data_space();
6842     case CODE_SPACE:
6843       return heap_->code_space();
6844     case MAP_SPACE:
6845       return heap_->map_space();
6846     case CELL_SPACE:
6847       return heap_->cell_space();
6848     default:
6849       return NULL;
6850   }
6851 }
6852
6853
6854
6855 OldSpace* OldSpaces::next() {
6856   switch (counter_++) {
6857     case OLD_POINTER_SPACE:
6858       return heap_->old_pointer_space();
6859     case OLD_DATA_SPACE:
6860       return heap_->old_data_space();
6861     case CODE_SPACE:
6862       return heap_->code_space();
6863     default:
6864       return NULL;
6865   }
6866 }
6867
6868
6869 SpaceIterator::SpaceIterator(Heap* heap)
6870     : heap_(heap),
6871       current_space_(FIRST_SPACE),
6872       iterator_(NULL),
6873       size_func_(NULL) {
6874 }
6875
6876
6877 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6878     : heap_(heap),
6879       current_space_(FIRST_SPACE),
6880       iterator_(NULL),
6881       size_func_(size_func) {
6882 }
6883
6884
6885 SpaceIterator::~SpaceIterator() {
6886   // Delete active iterator if any.
6887   delete iterator_;
6888 }
6889
6890
6891 bool SpaceIterator::has_next() {
6892   // Iterate until no more spaces.
6893   return current_space_ != LAST_SPACE;
6894 }
6895
6896
6897 ObjectIterator* SpaceIterator::next() {
6898   if (iterator_ != NULL) {
6899     delete iterator_;
6900     iterator_ = NULL;
6901     // Move to the next space
6902     current_space_++;
6903     if (current_space_ > LAST_SPACE) {
6904       return NULL;
6905     }
6906   }
6907
6908   // Return iterator for the new current space.
6909   return CreateIterator();
6910 }
6911
6912
6913 // Create an iterator for the space to iterate.
6914 ObjectIterator* SpaceIterator::CreateIterator() {
6915   ASSERT(iterator_ == NULL);
6916
6917   switch (current_space_) {
6918     case NEW_SPACE:
6919       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6920       break;
6921     case OLD_POINTER_SPACE:
6922       iterator_ =
6923           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6924       break;
6925     case OLD_DATA_SPACE:
6926       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6927       break;
6928     case CODE_SPACE:
6929       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6930       break;
6931     case MAP_SPACE:
6932       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6933       break;
6934     case CELL_SPACE:
6935       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6936       break;
6937     case LO_SPACE:
6938       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6939       break;
6940   }
6941
6942   // Return the newly allocated iterator;
6943   ASSERT(iterator_ != NULL);
6944   return iterator_;
6945 }
6946
6947
6948 class HeapObjectsFilter {
6949  public:
6950   virtual ~HeapObjectsFilter() {}
6951   virtual bool SkipObject(HeapObject* object) = 0;
6952 };
6953
6954
6955 class UnreachableObjectsFilter : public HeapObjectsFilter {
6956  public:
6957   UnreachableObjectsFilter() {
6958     MarkReachableObjects();
6959   }
6960
6961   ~UnreachableObjectsFilter() {
6962     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6963   }
6964
6965   bool SkipObject(HeapObject* object) {
6966     MarkBit mark_bit = Marking::MarkBitFrom(object);
6967     return !mark_bit.Get();
6968   }
6969
6970  private:
6971   class MarkingVisitor : public ObjectVisitor {
6972    public:
6973     MarkingVisitor() : marking_stack_(10) {}
6974
6975     void VisitPointers(Object** start, Object** end) {
6976       for (Object** p = start; p < end; p++) {
6977         if (!(*p)->IsHeapObject()) continue;
6978         HeapObject* obj = HeapObject::cast(*p);
6979         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6980         if (!mark_bit.Get()) {
6981           mark_bit.Set();
6982           marking_stack_.Add(obj);
6983         }
6984       }
6985     }
6986
6987     void TransitiveClosure() {
6988       while (!marking_stack_.is_empty()) {
6989         HeapObject* obj = marking_stack_.RemoveLast();
6990         obj->Iterate(this);
6991       }
6992     }
6993
6994    private:
6995     List<HeapObject*> marking_stack_;
6996   };
6997
6998   void MarkReachableObjects() {
6999     Heap* heap = Isolate::Current()->heap();
7000     MarkingVisitor visitor;
7001     heap->IterateRoots(&visitor, VISIT_ALL);
7002     visitor.TransitiveClosure();
7003   }
7004
7005   AssertNoAllocation no_alloc;
7006 };
7007
7008
7009 HeapIterator::HeapIterator(Heap* heap)
7010     : heap_(heap),
7011       filtering_(HeapIterator::kNoFiltering),
7012       filter_(NULL) {
7013   Init();
7014 }
7015
7016
7017 HeapIterator::HeapIterator(Heap* heap,
7018                            HeapIterator::HeapObjectsFiltering filtering)
7019     : heap_(heap),
7020       filtering_(filtering),
7021       filter_(NULL) {
7022   Init();
7023 }
7024
7025
7026 HeapIterator::~HeapIterator() {
7027   Shutdown();
7028 }
7029
7030
7031 void HeapIterator::Init() {
7032   // Start the iteration.
7033   space_iterator_ = new SpaceIterator(heap_);
7034   switch (filtering_) {
7035     case kFilterUnreachable:
7036       filter_ = new UnreachableObjectsFilter;
7037       break;
7038     default:
7039       break;
7040   }
7041   object_iterator_ = space_iterator_->next();
7042 }
7043
7044
7045 void HeapIterator::Shutdown() {
7046 #ifdef DEBUG
7047   // Assert that in filtering mode we have iterated through all
7048   // objects. Otherwise, heap will be left in an inconsistent state.
7049   if (filtering_ != kNoFiltering) {
7050     ASSERT(object_iterator_ == NULL);
7051   }
7052 #endif
7053   // Make sure the last iterator is deallocated.
7054   delete space_iterator_;
7055   space_iterator_ = NULL;
7056   object_iterator_ = NULL;
7057   delete filter_;
7058   filter_ = NULL;
7059 }
7060
7061
7062 HeapObject* HeapIterator::next() {
7063   if (filter_ == NULL) return NextObject();
7064
7065   HeapObject* obj = NextObject();
7066   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7067   return obj;
7068 }
7069
7070
7071 HeapObject* HeapIterator::NextObject() {
7072   // No iterator means we are done.
7073   if (object_iterator_ == NULL) return NULL;
7074
7075   if (HeapObject* obj = object_iterator_->next_object()) {
7076     // If the current iterator has more objects we are fine.
7077     return obj;
7078   } else {
7079     // Go though the spaces looking for one that has objects.
7080     while (space_iterator_->has_next()) {
7081       object_iterator_ = space_iterator_->next();
7082       if (HeapObject* obj = object_iterator_->next_object()) {
7083         return obj;
7084       }
7085     }
7086   }
7087   // Done with the last space.
7088   object_iterator_ = NULL;
7089   return NULL;
7090 }
7091
7092
7093 void HeapIterator::reset() {
7094   // Restart the iterator.
7095   Shutdown();
7096   Init();
7097 }
7098
7099
7100 #ifdef DEBUG
7101
7102 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7103
7104 class PathTracer::MarkVisitor: public ObjectVisitor {
7105  public:
7106   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7107   void VisitPointers(Object** start, Object** end) {
7108     // Scan all HeapObject pointers in [start, end)
7109     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7110       if ((*p)->IsHeapObject())
7111         tracer_->MarkRecursively(p, this);
7112     }
7113   }
7114
7115  private:
7116   PathTracer* tracer_;
7117 };
7118
7119
7120 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7121  public:
7122   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7123   void VisitPointers(Object** start, Object** end) {
7124     // Scan all HeapObject pointers in [start, end)
7125     for (Object** p = start; p < end; p++) {
7126       if ((*p)->IsHeapObject())
7127         tracer_->UnmarkRecursively(p, this);
7128     }
7129   }
7130
7131  private:
7132   PathTracer* tracer_;
7133 };
7134
7135
7136 void PathTracer::VisitPointers(Object** start, Object** end) {
7137   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7138   // Visit all HeapObject pointers in [start, end)
7139   for (Object** p = start; !done && (p < end); p++) {
7140     if ((*p)->IsHeapObject()) {
7141       TracePathFrom(p);
7142       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7143     }
7144   }
7145 }
7146
7147
7148 void PathTracer::Reset() {
7149   found_target_ = false;
7150   object_stack_.Clear();
7151 }
7152
7153
7154 void PathTracer::TracePathFrom(Object** root) {
7155   ASSERT((search_target_ == kAnyGlobalObject) ||
7156          search_target_->IsHeapObject());
7157   found_target_in_trace_ = false;
7158   Reset();
7159
7160   MarkVisitor mark_visitor(this);
7161   MarkRecursively(root, &mark_visitor);
7162
7163   UnmarkVisitor unmark_visitor(this);
7164   UnmarkRecursively(root, &unmark_visitor);
7165
7166   ProcessResults();
7167 }
7168
7169
7170 static bool SafeIsNativeContext(HeapObject* obj) {
7171   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7172 }
7173
7174
7175 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7176   if (!(*p)->IsHeapObject()) return;
7177
7178   HeapObject* obj = HeapObject::cast(*p);
7179
7180   Object* map = obj->map();
7181
7182   if (!map->IsHeapObject()) return;  // visited before
7183
7184   if (found_target_in_trace_) return;  // stop if target found
7185   object_stack_.Add(obj);
7186   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7187       (obj == search_target_)) {
7188     found_target_in_trace_ = true;
7189     found_target_ = true;
7190     return;
7191   }
7192
7193   bool is_native_context = SafeIsNativeContext(obj);
7194
7195   // not visited yet
7196   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7197
7198   Address map_addr = map_p->address();
7199
7200   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7201
7202   // Scan the object body.
7203   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7204     // This is specialized to scan Context's properly.
7205     Object** start = reinterpret_cast<Object**>(obj->address() +
7206                                                 Context::kHeaderSize);
7207     Object** end = reinterpret_cast<Object**>(obj->address() +
7208         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7209     mark_visitor->VisitPointers(start, end);
7210   } else {
7211     obj->IterateBody(map_p->instance_type(),
7212                      obj->SizeFromMap(map_p),
7213                      mark_visitor);
7214   }
7215
7216   // Scan the map after the body because the body is a lot more interesting
7217   // when doing leak detection.
7218   MarkRecursively(&map, mark_visitor);
7219
7220   if (!found_target_in_trace_)  // don't pop if found the target
7221     object_stack_.RemoveLast();
7222 }
7223
7224
7225 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7226   if (!(*p)->IsHeapObject()) return;
7227
7228   HeapObject* obj = HeapObject::cast(*p);
7229
7230   Object* map = obj->map();
7231
7232   if (map->IsHeapObject()) return;  // unmarked already
7233
7234   Address map_addr = reinterpret_cast<Address>(map);
7235
7236   map_addr -= kMarkTag;
7237
7238   ASSERT_TAG_ALIGNED(map_addr);
7239
7240   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7241
7242   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7243
7244   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7245
7246   obj->IterateBody(Map::cast(map_p)->instance_type(),
7247                    obj->SizeFromMap(Map::cast(map_p)),
7248                    unmark_visitor);
7249 }
7250
7251
7252 void PathTracer::ProcessResults() {
7253   if (found_target_) {
7254     PrintF("=====================================\n");
7255     PrintF("====        Path to object       ====\n");
7256     PrintF("=====================================\n\n");
7257
7258     ASSERT(!object_stack_.is_empty());
7259     for (int i = 0; i < object_stack_.length(); i++) {
7260       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7261       Object* obj = object_stack_[i];
7262       obj->Print();
7263     }
7264     PrintF("=====================================\n");
7265   }
7266 }
7267
7268
7269 // Triggers a depth-first traversal of reachable objects from one
7270 // given root object and finds a path to a specific heap object and
7271 // prints it.
7272 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7273   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7274   tracer.VisitPointer(&root);
7275 }
7276
7277
7278 // Triggers a depth-first traversal of reachable objects from roots
7279 // and finds a path to a specific heap object and prints it.
7280 void Heap::TracePathToObject(Object* target) {
7281   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7282   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7283 }
7284
7285
7286 // Triggers a depth-first traversal of reachable objects from roots
7287 // and finds a path to any global object and prints it. Useful for
7288 // determining the source for leaks of global objects.
7289 void Heap::TracePathToGlobal() {
7290   PathTracer tracer(PathTracer::kAnyGlobalObject,
7291                     PathTracer::FIND_ALL,
7292                     VISIT_ALL);
7293   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7294 }
7295 #endif
7296
7297
7298 static intptr_t CountTotalHolesSize(Heap* heap) {
7299   intptr_t holes_size = 0;
7300   OldSpaces spaces(heap);
7301   for (OldSpace* space = spaces.next();
7302        space != NULL;
7303        space = spaces.next()) {
7304     holes_size += space->Waste() + space->Available();
7305   }
7306   return holes_size;
7307 }
7308
7309
7310 GCTracer::GCTracer(Heap* heap,
7311                    const char* gc_reason,
7312                    const char* collector_reason)
7313     : start_time_(0.0),
7314       start_object_size_(0),
7315       start_memory_size_(0),
7316       gc_count_(0),
7317       full_gc_count_(0),
7318       allocated_since_last_gc_(0),
7319       spent_in_mutator_(0),
7320       promoted_objects_size_(0),
7321       nodes_died_in_new_space_(0),
7322       nodes_copied_in_new_space_(0),
7323       nodes_promoted_(0),
7324       heap_(heap),
7325       gc_reason_(gc_reason),
7326       collector_reason_(collector_reason) {
7327   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7328   start_time_ = OS::TimeCurrentMillis();
7329   start_object_size_ = heap_->SizeOfObjects();
7330   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7331
7332   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7333     scopes_[i] = 0;
7334   }
7335
7336   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7337
7338   allocated_since_last_gc_ =
7339       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7340
7341   if (heap_->last_gc_end_timestamp_ > 0) {
7342     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7343   }
7344
7345   steps_count_ = heap_->incremental_marking()->steps_count();
7346   steps_took_ = heap_->incremental_marking()->steps_took();
7347   longest_step_ = heap_->incremental_marking()->longest_step();
7348   steps_count_since_last_gc_ =
7349       heap_->incremental_marking()->steps_count_since_last_gc();
7350   steps_took_since_last_gc_ =
7351       heap_->incremental_marking()->steps_took_since_last_gc();
7352 }
7353
7354
7355 GCTracer::~GCTracer() {
7356   // Printf ONE line iff flag is set.
7357   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7358
7359   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7360
7361   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7362   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7363
7364   double time = heap_->last_gc_end_timestamp_ - start_time_;
7365
7366   // Update cumulative GC statistics if required.
7367   if (FLAG_print_cumulative_gc_stat) {
7368     heap_->total_gc_time_ms_ += time;
7369     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7370     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7371                                      heap_->alive_after_last_gc_);
7372     if (!first_gc) {
7373       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7374                                    spent_in_mutator_);
7375     }
7376   } else if (FLAG_trace_gc_verbose) {
7377     heap_->total_gc_time_ms_ += time;
7378   }
7379
7380   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7381
7382   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7383
7384   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7385   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7386
7387   if (!FLAG_trace_gc_nvp) {
7388     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7389
7390     double end_memory_size_mb =
7391         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7392
7393     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7394            CollectorString(),
7395            static_cast<double>(start_object_size_) / MB,
7396            static_cast<double>(start_memory_size_) / MB,
7397            SizeOfHeapObjects(),
7398            end_memory_size_mb);
7399
7400     if (external_time > 0) PrintF("%d / ", external_time);
7401     PrintF("%.1f ms", time);
7402     if (steps_count_ > 0) {
7403       if (collector_ == SCAVENGER) {
7404         PrintF(" (+ %.1f ms in %d steps since last GC)",
7405                steps_took_since_last_gc_,
7406                steps_count_since_last_gc_);
7407       } else {
7408         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7409                    "biggest step %.1f ms)",
7410                steps_took_,
7411                steps_count_,
7412                longest_step_);
7413       }
7414     }
7415
7416     if (gc_reason_ != NULL) {
7417       PrintF(" [%s]", gc_reason_);
7418     }
7419
7420     if (collector_reason_ != NULL) {
7421       PrintF(" [%s]", collector_reason_);
7422     }
7423
7424     PrintF(".\n");
7425   } else {
7426     PrintF("pause=%.1f ", time);
7427     PrintF("mutator=%.1f ", spent_in_mutator_);
7428     PrintF("gc=");
7429     switch (collector_) {
7430       case SCAVENGER:
7431         PrintF("s");
7432         break;
7433       case MARK_COMPACTOR:
7434         PrintF("ms");
7435         break;
7436       default:
7437         UNREACHABLE();
7438     }
7439     PrintF(" ");
7440
7441     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7442     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7443     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7444     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7445     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7446     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7447     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7448     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7449     PrintF("compaction_ptrs=%.1f ",
7450         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7451     PrintF("intracompaction_ptrs=%.1f ",
7452         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7453     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7454
7455     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7456     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7457     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7458            in_free_list_or_wasted_before_gc_);
7459     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7460
7461     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7462     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7463     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7464     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7465     PrintF("nodes_promoted=%d ", nodes_promoted_);
7466
7467     if (collector_ == SCAVENGER) {
7468       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7469       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7470     } else {
7471       PrintF("stepscount=%d ", steps_count_);
7472       PrintF("stepstook=%.1f ", steps_took_);
7473       PrintF("longeststep=%.1f ", longest_step_);
7474     }
7475
7476     PrintF("\n");
7477   }
7478
7479   heap_->PrintShortHeapStatistics();
7480 }
7481
7482
7483 const char* GCTracer::CollectorString() {
7484   switch (collector_) {
7485     case SCAVENGER:
7486       return "Scavenge";
7487     case MARK_COMPACTOR:
7488       return "Mark-sweep";
7489   }
7490   return "Unknown GC";
7491 }
7492
7493
7494 int KeyedLookupCache::Hash(Map* map, Name* name) {
7495   // Uses only lower 32 bits if pointers are larger.
7496   uintptr_t addr_hash =
7497       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7498   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7499 }
7500
7501
7502 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7503   int index = (Hash(map, name) & kHashMask);
7504   for (int i = 0; i < kEntriesPerBucket; i++) {
7505     Key& key = keys_[index + i];
7506     if ((key.map == map) && key.name->Equals(name)) {
7507       return field_offsets_[index + i];
7508     }
7509   }
7510   return kNotFound;
7511 }
7512
7513
7514 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7515   if (!name->IsUniqueName()) {
7516     String* internalized_string;
7517     if (!HEAP->InternalizeStringIfExists(
7518             String::cast(name), &internalized_string)) {
7519       return;
7520     }
7521     name = internalized_string;
7522   }
7523   // This cache is cleared only between mark compact passes, so we expect the
7524   // cache to only contain old space names.
7525   ASSERT(!HEAP->InNewSpace(name));
7526
7527   int index = (Hash(map, name) & kHashMask);
7528   // After a GC there will be free slots, so we use them in order (this may
7529   // help to get the most frequently used one in position 0).
7530   for (int i = 0; i< kEntriesPerBucket; i++) {
7531     Key& key = keys_[index];
7532     Object* free_entry_indicator = NULL;
7533     if (key.map == free_entry_indicator) {
7534       key.map = map;
7535       key.name = name;
7536       field_offsets_[index + i] = field_offset;
7537       return;
7538     }
7539   }
7540   // No free entry found in this bucket, so we move them all down one and
7541   // put the new entry at position zero.
7542   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7543     Key& key = keys_[index + i];
7544     Key& key2 = keys_[index + i - 1];
7545     key = key2;
7546     field_offsets_[index + i] = field_offsets_[index + i - 1];
7547   }
7548
7549   // Write the new first entry.
7550   Key& key = keys_[index];
7551   key.map = map;
7552   key.name = name;
7553   field_offsets_[index] = field_offset;
7554 }
7555
7556
7557 void KeyedLookupCache::Clear() {
7558   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7559 }
7560
7561
7562 void DescriptorLookupCache::Clear() {
7563   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7564 }
7565
7566
7567 #ifdef DEBUG
7568 void Heap::GarbageCollectionGreedyCheck() {
7569   ASSERT(FLAG_gc_greedy);
7570   if (isolate_->bootstrapper()->IsActive()) return;
7571   if (disallow_allocation_failure()) return;
7572   CollectGarbage(NEW_SPACE);
7573 }
7574 #endif
7575
7576
7577 TranscendentalCache::SubCache::SubCache(Type t)
7578   : type_(t),
7579     isolate_(Isolate::Current()) {
7580   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7581   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7582   for (int i = 0; i < kCacheSize; i++) {
7583     elements_[i].in[0] = in0;
7584     elements_[i].in[1] = in1;
7585     elements_[i].output = NULL;
7586   }
7587 }
7588
7589
7590 void TranscendentalCache::Clear() {
7591   for (int i = 0; i < kNumberOfCaches; i++) {
7592     if (caches_[i] != NULL) {
7593       delete caches_[i];
7594       caches_[i] = NULL;
7595     }
7596   }
7597 }
7598
7599
7600 void ExternalStringTable::CleanUp() {
7601   int last = 0;
7602   for (int i = 0; i < new_space_strings_.length(); ++i) {
7603     if (new_space_strings_[i] == heap_->the_hole_value()) {
7604       continue;
7605     }
7606     if (heap_->InNewSpace(new_space_strings_[i])) {
7607       new_space_strings_[last++] = new_space_strings_[i];
7608     } else {
7609       old_space_strings_.Add(new_space_strings_[i]);
7610     }
7611   }
7612   new_space_strings_.Rewind(last);
7613   new_space_strings_.Trim();
7614
7615   last = 0;
7616   for (int i = 0; i < old_space_strings_.length(); ++i) {
7617     if (old_space_strings_[i] == heap_->the_hole_value()) {
7618       continue;
7619     }
7620     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7621     old_space_strings_[last++] = old_space_strings_[i];
7622   }
7623   old_space_strings_.Rewind(last);
7624   old_space_strings_.Trim();
7625 #ifdef VERIFY_HEAP
7626   if (FLAG_verify_heap) {
7627     Verify();
7628   }
7629 #endif
7630 }
7631
7632
7633 void ExternalStringTable::TearDown() {
7634   new_space_strings_.Free();
7635   old_space_strings_.Free();
7636 }
7637
7638
7639 // Update all references.
7640 void ErrorObjectList::UpdateReferences() {
7641   for (int i = 0; i < list_.length(); i++) {
7642     HeapObject* object = HeapObject::cast(list_[i]);
7643     MapWord first_word = object->map_word();
7644     if (first_word.IsForwardingAddress()) {
7645       list_[i] = first_word.ToForwardingAddress();
7646     }
7647   }
7648 }
7649
7650
7651 // Unforwarded objects in new space are dead and removed from the list.
7652 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7653   if (list_.is_empty()) return;
7654   if (!nested_) {
7655     int write_index = 0;
7656     for (int i = 0; i < list_.length(); i++) {
7657       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7658       if (first_word.IsForwardingAddress()) {
7659         list_[write_index++] = first_word.ToForwardingAddress();
7660       }
7661     }
7662     list_.Rewind(write_index);
7663   } else {
7664     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7665     // objects in the list, just remove dead ones, as to not confuse the
7666     // loop in DeferredFormatStackTrace.
7667     for (int i = 0; i < list_.length(); i++) {
7668       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7669       list_[i] = first_word.IsForwardingAddress()
7670                      ? first_word.ToForwardingAddress()
7671                      : heap->the_hole_value();
7672     }
7673   }
7674 }
7675
7676
7677 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7678   // If formatting the stack trace causes a GC, this method will be
7679   // recursively called.  In that case, skip the recursive call, since
7680   // the loop modifies the list while iterating over it.
7681   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7682   nested_ = true;
7683   HandleScope scope(isolate);
7684   Handle<String> stack_key = isolate->factory()->stack_string();
7685   int write_index = 0;
7686   int budget = kBudgetPerGC;
7687   for (int i = 0; i < list_.length(); i++) {
7688     Object* object = list_[i];
7689     JSFunction* getter_fun;
7690
7691     { AssertNoAllocation assert;
7692       // Skip possible holes in the list.
7693       if (object->IsTheHole()) continue;
7694       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7695         list_[write_index++] = object;
7696         continue;
7697       }
7698
7699       // Check whether the stack property is backed by the original getter.
7700       LookupResult lookup(isolate);
7701       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7702       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7703       Object* callback = lookup.GetCallbackObject();
7704       if (!callback->IsAccessorPair()) continue;
7705       Object* getter_obj = AccessorPair::cast(callback)->getter();
7706       if (!getter_obj->IsJSFunction()) continue;
7707       getter_fun = JSFunction::cast(getter_obj);
7708       String* key = isolate->heap()->hidden_stack_trace_string();
7709       Object* value = getter_fun->GetHiddenProperty(key);
7710       if (key != value) continue;
7711     }
7712
7713     budget--;
7714     HandleScope scope(isolate);
7715     bool has_exception = false;
7716 #ifdef DEBUG
7717     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7718 #endif
7719     Handle<Object> object_handle(object, isolate);
7720     Handle<Object> getter_handle(getter_fun, isolate);
7721     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7722     ASSERT(*map == HeapObject::cast(*object_handle)->map());
7723     if (has_exception) {
7724       // Hit an exception (most likely a stack overflow).
7725       // Wrap up this pass and retry after another GC.
7726       isolate->clear_pending_exception();
7727       // We use the handle since calling the getter might have caused a GC.
7728       list_[write_index++] = *object_handle;
7729       budget = 0;
7730     }
7731   }
7732   list_.Rewind(write_index);
7733   list_.Trim();
7734   nested_ = false;
7735 }
7736
7737
7738 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7739   for (int i = 0; i < list_.length(); i++) {
7740     HeapObject* object = HeapObject::cast(list_[i]);
7741     if (!Marking::MarkBitFrom(object).Get()) {
7742       list_[i] = heap->the_hole_value();
7743     }
7744   }
7745 }
7746
7747
7748 void ErrorObjectList::TearDown() {
7749   list_.Free();
7750 }
7751
7752
7753 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7754   chunk->set_next_chunk(chunks_queued_for_free_);
7755   chunks_queued_for_free_ = chunk;
7756 }
7757
7758
7759 void Heap::FreeQueuedChunks() {
7760   if (chunks_queued_for_free_ == NULL) return;
7761   MemoryChunk* next;
7762   MemoryChunk* chunk;
7763   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7764     next = chunk->next_chunk();
7765     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7766
7767     if (chunk->owner()->identity() == LO_SPACE) {
7768       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7769       // If FromAnyPointerAddress encounters a slot that belongs to a large
7770       // chunk queued for deletion it will fail to find the chunk because
7771       // it try to perform a search in the list of pages owned by of the large
7772       // object space and queued chunks were detached from that list.
7773       // To work around this we split large chunk into normal kPageSize aligned
7774       // pieces and initialize size, owner and flags field of every piece.
7775       // If FromAnyPointerAddress encounters a slot that belongs to one of
7776       // these smaller pieces it will treat it as a slot on a normal Page.
7777       Address chunk_end = chunk->address() + chunk->size();
7778       MemoryChunk* inner = MemoryChunk::FromAddress(
7779           chunk->address() + Page::kPageSize);
7780       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7781       while (inner <= inner_last) {
7782         // Size of a large chunk is always a multiple of
7783         // OS::AllocateAlignment() so there is always
7784         // enough space for a fake MemoryChunk header.
7785         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7786         // Guard against overflow.
7787         if (area_end < inner->address()) area_end = chunk_end;
7788         inner->SetArea(inner->address(), area_end);
7789         inner->set_size(Page::kPageSize);
7790         inner->set_owner(lo_space());
7791         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7792         inner = MemoryChunk::FromAddress(
7793             inner->address() + Page::kPageSize);
7794       }
7795     }
7796   }
7797   isolate_->heap()->store_buffer()->Compact();
7798   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7799   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7800     next = chunk->next_chunk();
7801     isolate_->memory_allocator()->Free(chunk);
7802   }
7803   chunks_queued_for_free_ = NULL;
7804 }
7805
7806
7807 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7808   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7809   // Tag the page pointer to make it findable in the dump file.
7810   if (compacted) {
7811     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7812   } else {
7813     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7814   }
7815   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7816       reinterpret_cast<Address>(p);
7817   remembered_unmapped_pages_index_++;
7818   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7819 }
7820
7821
7822 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7823   memset(object_counts_, 0, sizeof(object_counts_));
7824   memset(object_sizes_, 0, sizeof(object_sizes_));
7825   if (clear_last_time_stats) {
7826     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7827     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7828   }
7829 }
7830
7831
7832 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7833
7834
7835 void Heap::CheckpointObjectStats() {
7836   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7837   Counters* counters = isolate()->counters();
7838 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7839   counters->count_of_##name()->Increment(                                      \
7840       static_cast<int>(object_counts_[name]));                                 \
7841   counters->count_of_##name()->Decrement(                                      \
7842       static_cast<int>(object_counts_last_time_[name]));                       \
7843   counters->size_of_##name()->Increment(                                       \
7844       static_cast<int>(object_sizes_[name]));                                  \
7845   counters->size_of_##name()->Decrement(                                       \
7846       static_cast<int>(object_sizes_last_time_[name]));
7847   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7848 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7849   int index;
7850 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7851   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7852   counters->count_of_CODE_TYPE_##name()->Increment(       \
7853       static_cast<int>(object_counts_[index]));           \
7854   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7855       static_cast<int>(object_counts_last_time_[index])); \
7856   counters->size_of_CODE_TYPE_##name()->Increment(        \
7857       static_cast<int>(object_sizes_[index]));            \
7858   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7859       static_cast<int>(object_sizes_last_time_[index]));
7860   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7861 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7862 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7863   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7864   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7865       static_cast<int>(object_counts_[index]));           \
7866   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7867       static_cast<int>(object_counts_last_time_[index])); \
7868   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7869       static_cast<int>(object_sizes_[index]));            \
7870   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7871       static_cast<int>(object_sizes_last_time_[index]));
7872   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7873 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7874
7875   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7876   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7877   ClearObjectStats();
7878 }
7879
7880
7881 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7882   if (FLAG_parallel_recompilation) {
7883     heap_->relocation_mutex_->Lock();
7884 #ifdef DEBUG
7885     heap_->relocation_mutex_locked_by_optimizer_thread_ =
7886         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7887 #endif  // DEBUG
7888   }
7889 }
7890
7891 } }  // namespace v8::internal