v8: upgrade to v8 3.20.7
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       property_cell_space_(NULL),
110       lo_space_(NULL),
111       gc_state_(NOT_IN_GC),
112       gc_post_processing_depth_(0),
113       ms_count_(0),
114       gc_count_(0),
115       remembered_unmapped_pages_index_(0),
116       unflattened_strings_length_(0),
117 #ifdef DEBUG
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       gc_count_at_last_idle_gc_(0),
157       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158       gcs_since_last_deopt_(0),
159 #ifdef VERIFY_HEAP
160       no_weak_embedded_maps_verification_scope_depth_(0),
161 #endif
162       promotion_queue_(this),
163       configured_(false),
164       chunks_queued_for_free_(NULL),
165       relocation_mutex_(NULL) {
166   // Allow build-time customization of the max semispace size. Building
167   // V8 with snapshots and a non-default max semispace size is much
168   // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171 #endif
172
173   intptr_t max_virtual = OS::MaxVirtualMemory();
174
175   if (max_virtual > 0) {
176     if (code_range_size_ > 0) {
177       // Reserve no more than 1/8 of the memory for the code range.
178       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179     }
180   }
181
182   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183   native_contexts_list_ = NULL;
184   array_buffers_list_ = Smi::FromInt(0);
185   allocation_sites_list_ = Smi::FromInt(0);
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity() +
205       property_cell_space_->Capacity();
206 }
207
208
209 intptr_t Heap::CommittedMemory() {
210   if (!HasBeenSetUp()) return 0;
211
212   return new_space_.CommittedMemory() +
213       old_pointer_space_->CommittedMemory() +
214       old_data_space_->CommittedMemory() +
215       code_space_->CommittedMemory() +
216       map_space_->CommittedMemory() +
217       cell_space_->CommittedMemory() +
218       property_cell_space_->CommittedMemory() +
219       lo_space_->Size();
220 }
221
222
223 size_t Heap::CommittedPhysicalMemory() {
224   if (!HasBeenSetUp()) return 0;
225
226   return new_space_.CommittedPhysicalMemory() +
227       old_pointer_space_->CommittedPhysicalMemory() +
228       old_data_space_->CommittedPhysicalMemory() +
229       code_space_->CommittedPhysicalMemory() +
230       map_space_->CommittedPhysicalMemory() +
231       cell_space_->CommittedPhysicalMemory() +
232       property_cell_space_->CommittedPhysicalMemory() +
233       lo_space_->CommittedPhysicalMemory();
234 }
235
236
237 intptr_t Heap::CommittedMemoryExecutable() {
238   if (!HasBeenSetUp()) return 0;
239
240   return isolate()->memory_allocator()->SizeExecutable();
241 }
242
243
244 intptr_t Heap::Available() {
245   if (!HasBeenSetUp()) return 0;
246
247   return new_space_.Available() +
248       old_pointer_space_->Available() +
249       old_data_space_->Available() +
250       code_space_->Available() +
251       map_space_->Available() +
252       cell_space_->Available() +
253       property_cell_space_->Available();
254 }
255
256
257 bool Heap::HasBeenSetUp() {
258   return old_pointer_space_ != NULL &&
259          old_data_space_ != NULL &&
260          code_space_ != NULL &&
261          map_space_ != NULL &&
262          cell_space_ != NULL &&
263          property_cell_space_ != NULL &&
264          lo_space_ != NULL;
265 }
266
267
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269   if (IntrusiveMarking::IsMarked(object)) {
270     return IntrusiveMarking::SizeOfMarkedObject(object);
271   }
272   return object->SizeFromMap(object->map());
273 }
274
275
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277                                               const char** reason) {
278   // Is global GC requested?
279   if (space != NEW_SPACE) {
280     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281     *reason = "GC in old space requested";
282     return MARK_COMPACTOR;
283   }
284
285   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286     *reason = "GC in old space forced by flags";
287     return MARK_COMPACTOR;
288   }
289
290   // Is enough data promoted to justify a global GC?
291   if (OldGenerationAllocationLimitReached()) {
292     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293     *reason = "promotion limit reached";
294     return MARK_COMPACTOR;
295   }
296
297   // Have allocation in OLD and LO failed?
298   if (old_gen_exhausted_) {
299     isolate_->counters()->
300         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301     *reason = "old generations exhausted";
302     return MARK_COMPACTOR;
303   }
304
305   // Is there enough space left in OLD to guarantee that a scavenge can
306   // succeed?
307   //
308   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309   // for object promotion. It counts only the bytes that the memory
310   // allocator has not yet allocated from the OS and assigned to any space,
311   // and does not count available bytes already in the old space or code
312   // space.  Undercounting is safe---we may get an unrequested full GC when
313   // a scavenge would have succeeded.
314   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315     isolate_->counters()->
316         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317     *reason = "scavenge might not succeed";
318     return MARK_COMPACTOR;
319   }
320
321   // Default
322   *reason = NULL;
323   return SCAVENGER;
324 }
325
326
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330   // Heap::ReportHeapStatistics will also log NewSpace statistics when
331   // compiled --log-gc is set.  The following logic is used to avoid
332   // double logging.
333 #ifdef DEBUG
334   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335   if (FLAG_heap_stats) {
336     ReportHeapStatistics("Before GC");
337   } else if (FLAG_log_gc) {
338     new_space_.ReportStatistics();
339   }
340   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
341 #else
342   if (FLAG_log_gc) {
343     new_space_.CollectStatistics();
344     new_space_.ReportStatistics();
345     new_space_.ClearHistograms();
346   }
347 #endif  // DEBUG
348 }
349
350
351 void Heap::PrintShortHeapStatistics() {
352   if (!FLAG_trace_gc_verbose) return;
353   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
354                ", available: %6" V8_PTR_PREFIX "d KB\n",
355            isolate_->memory_allocator()->Size() / KB,
356            isolate_->memory_allocator()->Available() / KB);
357   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB"
359                ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            new_space_.Size() / KB,
361            new_space_.Available() / KB,
362            new_space_.CommittedMemory() / KB);
363   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
364                ", available: %6" V8_PTR_PREFIX "d KB"
365                ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            old_pointer_space_->SizeOfObjects() / KB,
367            old_pointer_space_->Available() / KB,
368            old_pointer_space_->CommittedMemory() / KB);
369   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
370                ", available: %6" V8_PTR_PREFIX "d KB"
371                ", committed: %6" V8_PTR_PREFIX "d KB\n",
372            old_data_space_->SizeOfObjects() / KB,
373            old_data_space_->Available() / KB,
374            old_data_space_->CommittedMemory() / KB);
375   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
376                ", available: %6" V8_PTR_PREFIX "d KB"
377                ", committed: %6" V8_PTR_PREFIX "d KB\n",
378            code_space_->SizeOfObjects() / KB,
379            code_space_->Available() / KB,
380            code_space_->CommittedMemory() / KB);
381   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
382                ", available: %6" V8_PTR_PREFIX "d KB"
383                ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            map_space_->SizeOfObjects() / KB,
385            map_space_->Available() / KB,
386            map_space_->CommittedMemory() / KB);
387   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
388                ", available: %6" V8_PTR_PREFIX "d KB"
389                ", committed: %6" V8_PTR_PREFIX "d KB\n",
390            cell_space_->SizeOfObjects() / KB,
391            cell_space_->Available() / KB,
392            cell_space_->CommittedMemory() / KB);
393   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394                ", available: %6" V8_PTR_PREFIX "d KB"
395                ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            property_cell_space_->SizeOfObjects() / KB,
397            property_cell_space_->Available() / KB,
398            property_cell_space_->CommittedMemory() / KB);
399   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400                ", available: %6" V8_PTR_PREFIX "d KB"
401                ", committed: %6" V8_PTR_PREFIX "d KB\n",
402            lo_space_->SizeOfObjects() / KB,
403            lo_space_->Available() / KB,
404            lo_space_->CommittedMemory() / KB);
405   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
406                ", available: %6" V8_PTR_PREFIX "d KB"
407                ", committed: %6" V8_PTR_PREFIX "d KB\n",
408            this->SizeOfObjects() / KB,
409            this->Available() / KB,
410            this->CommittedMemory() / KB);
411   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412            amount_of_external_allocated_memory_ / KB);
413   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
414 }
415
416
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420   // Similar to the before GC, we use some complicated logic to ensure that
421   // NewSpace statistics are logged exactly once when --log-gc is turned on.
422 #if defined(DEBUG)
423   if (FLAG_heap_stats) {
424     new_space_.CollectStatistics();
425     ReportHeapStatistics("After GC");
426   } else if (FLAG_log_gc) {
427     new_space_.ReportStatistics();
428   }
429 #else
430   if (FLAG_log_gc) new_space_.ReportStatistics();
431 #endif  // DEBUG
432 }
433
434
435 void Heap::GarbageCollectionPrologue() {
436   {  AllowHeapAllocation for_the_first_part_of_prologue;
437     isolate_->transcendental_cache()->Clear();
438     ClearJSFunctionResultCaches();
439     gc_count_++;
440     unflattened_strings_length_ = 0;
441
442     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443       mark_compact_collector()->EnableCodeFlushing(true);
444     }
445
446 #ifdef VERIFY_HEAP
447     if (FLAG_verify_heap) {
448       Verify();
449     }
450 #endif
451   }
452
453 #ifdef DEBUG
454   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455
456   if (FLAG_gc_verbose) Print();
457
458   ReportStatisticsBeforeGC();
459 #endif  // DEBUG
460
461   store_buffer()->GCPrologue();
462 }
463
464
465 intptr_t Heap::SizeOfObjects() {
466   intptr_t total = 0;
467   AllSpaces spaces(this);
468   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469     total += space->SizeOfObjects();
470   }
471   return total;
472 }
473
474
475 void Heap::RepairFreeListsAfterBoot() {
476   PagedSpaces spaces(this);
477   for (PagedSpace* space = spaces.next();
478        space != NULL;
479        space = spaces.next()) {
480     space->RepairFreeListsAfterBoot();
481   }
482 }
483
484
485 void Heap::GarbageCollectionEpilogue() {
486   store_buffer()->GCEpilogue();
487
488   // In release mode, we only zap the from space under heap verification.
489   if (Heap::ShouldZapGarbage()) {
490     ZapFromSpace();
491   }
492
493 #ifdef VERIFY_HEAP
494   if (FLAG_verify_heap) {
495     Verify();
496   }
497 #endif
498
499   AllowHeapAllocation for_the_rest_of_the_epilogue;
500
501 #ifdef DEBUG
502   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503   if (FLAG_print_handles) PrintHandles();
504   if (FLAG_gc_verbose) Print();
505   if (FLAG_code_stats) ReportCodeStatistics("After GC");
506 #endif
507   if (FLAG_deopt_every_n_garbage_collections > 0) {
508     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509       Deoptimizer::DeoptimizeAll(isolate());
510       gcs_since_last_deopt_ = 0;
511     }
512   }
513
514   isolate_->counters()->alive_after_last_gc()->Set(
515       static_cast<int>(SizeOfObjects()));
516
517   isolate_->counters()->string_table_capacity()->Set(
518       string_table()->Capacity());
519   isolate_->counters()->number_of_symbols()->Set(
520       string_table()->NumberOfElements());
521
522   if (CommittedMemory() > 0) {
523     isolate_->counters()->external_fragmentation_total()->AddSample(
524         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525
526     isolate_->counters()->heap_fraction_map_space()->AddSample(
527         static_cast<int>(
528             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529     isolate_->counters()->heap_fraction_cell_space()->AddSample(
530         static_cast<int>(
531             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532     isolate_->counters()->heap_fraction_property_cell_space()->
533         AddSample(static_cast<int>(
534             (property_cell_space()->CommittedMemory() * 100.0) /
535             CommittedMemory()));
536
537     isolate_->counters()->heap_sample_total_committed()->AddSample(
538         static_cast<int>(CommittedMemory() / KB));
539     isolate_->counters()->heap_sample_total_used()->AddSample(
540         static_cast<int>(SizeOfObjects() / KB));
541     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542         static_cast<int>(map_space()->CommittedMemory() / KB));
543     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544         static_cast<int>(cell_space()->CommittedMemory() / KB));
545     isolate_->counters()->
546         heap_sample_property_cell_space_committed()->
547             AddSample(static_cast<int>(
548                 property_cell_space()->CommittedMemory() / KB));
549   }
550
551 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
552   isolate_->counters()->space##_bytes_available()->Set(                        \
553       static_cast<int>(space()->Available()));                                 \
554   isolate_->counters()->space##_bytes_committed()->Set(                        \
555       static_cast<int>(space()->CommittedMemory()));                           \
556   isolate_->counters()->space##_bytes_used()->Set(                             \
557       static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
559   if (space()->CommittedMemory() > 0) {                                        \
560     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
561         static_cast<int>(100 -                                                 \
562             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563   }
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
565   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
566   UPDATE_FRAGMENTATION_FOR_SPACE(space)
567
568   UPDATE_COUNTERS_FOR_SPACE(new_space)
569   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579
580 #if defined(DEBUG)
581   ReportStatisticsAfterGC();
582 #endif  // DEBUG
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584   isolate_->debug()->AfterGarbageCollection();
585 #endif  // ENABLE_DEBUGGER_SUPPORT
586
587   error_object_list_.DeferredFormatStackTrace(isolate());
588 }
589
590
591 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
592   // Since we are ignoring the return value, the exact choice of space does
593   // not matter, so long as we do not specify NEW_SPACE, which would not
594   // cause a full GC.
595   mark_compact_collector_.SetFlags(flags);
596   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
597   mark_compact_collector_.SetFlags(kNoGCFlags);
598 }
599
600
601 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
602   // Since we are ignoring the return value, the exact choice of space does
603   // not matter, so long as we do not specify NEW_SPACE, which would not
604   // cause a full GC.
605   // Major GC would invoke weak handle callbacks on weakly reachable
606   // handles, but won't collect weakly reachable objects until next
607   // major GC.  Therefore if we collect aggressively and weak handle callback
608   // has been invoked, we rerun major GC to release objects which become
609   // garbage.
610   // Note: as weak callbacks can execute arbitrary code, we cannot
611   // hope that eventually there will be no weak callbacks invocations.
612   // Therefore stop recollecting after several attempts.
613   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
614                                      kReduceMemoryFootprintMask);
615   isolate_->compilation_cache()->Clear();
616   const int kMaxNumberOfAttempts = 7;
617   const int kMinNumberOfAttempts = 2;
618   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
619     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
620         attempt + 1 >= kMinNumberOfAttempts) {
621       break;
622     }
623   }
624   mark_compact_collector()->SetFlags(kNoGCFlags);
625   new_space_.Shrink();
626   UncommitFromSpace();
627   incremental_marking()->UncommitMarkingDeque();
628 }
629
630
631 bool Heap::CollectGarbage(AllocationSpace space,
632                           GarbageCollector collector,
633                           const char* gc_reason,
634                           const char* collector_reason) {
635   // The VM is in the GC state until exiting this function.
636   VMState<GC> state(isolate_);
637
638 #ifdef DEBUG
639   // Reset the allocation timeout to the GC interval, but make sure to
640   // allow at least a few allocations after a collection. The reason
641   // for this is that we have a lot of allocation sequences and we
642   // assume that a garbage collection will allow the subsequent
643   // allocation attempts to go through.
644   allocation_timeout_ = Max(6, FLAG_gc_interval);
645 #endif
646
647   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
648     if (FLAG_trace_incremental_marking) {
649       PrintF("[IncrementalMarking] Scavenge during marking.\n");
650     }
651   }
652
653   if (collector == MARK_COMPACTOR &&
654       !mark_compact_collector()->abort_incremental_marking() &&
655       !incremental_marking()->IsStopped() &&
656       !incremental_marking()->should_hurry() &&
657       FLAG_incremental_marking_steps) {
658     // Make progress in incremental marking.
659     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
660     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
661                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
662     if (!incremental_marking()->IsComplete()) {
663       if (FLAG_trace_incremental_marking) {
664         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
665       }
666       collector = SCAVENGER;
667       collector_reason = "incremental marking delaying mark-sweep";
668     }
669   }
670
671   bool next_gc_likely_to_collect_more = false;
672
673   { GCTracer tracer(this, gc_reason, collector_reason);
674     ASSERT(AllowHeapAllocation::IsAllowed());
675     DisallowHeapAllocation no_allocation_during_gc;
676     GarbageCollectionPrologue();
677     // The GC count was incremented in the prologue.  Tell the tracer about
678     // it.
679     tracer.set_gc_count(gc_count_);
680
681     // Tell the tracer which collector we've selected.
682     tracer.set_collector(collector);
683
684     {
685       HistogramTimerScope histogram_timer_scope(
686           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
687                                    : isolate_->counters()->gc_compactor());
688       next_gc_likely_to_collect_more =
689           PerformGarbageCollection(collector, &tracer);
690     }
691
692     GarbageCollectionEpilogue();
693   }
694
695   // Start incremental marking for the next cycle. The heap snapshot
696   // generator needs incremental marking to stay off after it aborted.
697   if (!mark_compact_collector()->abort_incremental_marking() &&
698       incremental_marking()->IsStopped() &&
699       incremental_marking()->WorthActivating() &&
700       NextGCIsLikelyToBeFull()) {
701     incremental_marking()->Start();
702   }
703
704   return next_gc_likely_to_collect_more;
705 }
706
707
708 void Heap::PerformScavenge() {
709   GCTracer tracer(this, NULL, NULL);
710   if (incremental_marking()->IsStopped()) {
711     PerformGarbageCollection(SCAVENGER, &tracer);
712   } else {
713     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
714   }
715 }
716
717
718 void Heap::MoveElements(FixedArray* array,
719                         int dst_index,
720                         int src_index,
721                         int len) {
722   if (len == 0) return;
723
724   ASSERT(array->map() != HEAP->fixed_cow_array_map());
725   Object** dst_objects = array->data_start() + dst_index;
726   OS::MemMove(dst_objects,
727               array->data_start() + src_index,
728               len * kPointerSize);
729   if (!InNewSpace(array)) {
730     for (int i = 0; i < len; i++) {
731       // TODO(hpayer): check store buffer for entries
732       if (InNewSpace(dst_objects[i])) {
733         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
734       }
735     }
736   }
737   incremental_marking()->RecordWrites(array);
738 }
739
740
741 #ifdef VERIFY_HEAP
742 // Helper class for verifying the string table.
743 class StringTableVerifier : public ObjectVisitor {
744  public:
745   void VisitPointers(Object** start, Object** end) {
746     // Visit all HeapObject pointers in [start, end).
747     for (Object** p = start; p < end; p++) {
748       if ((*p)->IsHeapObject()) {
749         // Check that the string is actually internalized.
750         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
751               (*p)->IsInternalizedString());
752       }
753     }
754   }
755 };
756
757
758 static void VerifyStringTable() {
759   StringTableVerifier verifier;
760   HEAP->string_table()->IterateElements(&verifier);
761 }
762 #endif  // VERIFY_HEAP
763
764
765 static bool AbortIncrementalMarkingAndCollectGarbage(
766     Heap* heap,
767     AllocationSpace space,
768     const char* gc_reason = NULL) {
769   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
770   bool result = heap->CollectGarbage(space, gc_reason);
771   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
772   return result;
773 }
774
775
776 void Heap::ReserveSpace(
777     int *sizes,
778     Address *locations_out) {
779   bool gc_performed = true;
780   int counter = 0;
781   static const int kThreshold = 20;
782   while (gc_performed && counter++ < kThreshold) {
783     gc_performed = false;
784     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
785     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
786       if (sizes[space] != 0) {
787         MaybeObject* allocation;
788         if (space == NEW_SPACE) {
789           allocation = new_space()->AllocateRaw(sizes[space]);
790         } else {
791           allocation = paged_space(space)->AllocateRaw(sizes[space]);
792         }
793         FreeListNode* node;
794         if (!allocation->To<FreeListNode>(&node)) {
795           if (space == NEW_SPACE) {
796             Heap::CollectGarbage(NEW_SPACE,
797                                  "failed to reserve space in the new space");
798           } else {
799             AbortIncrementalMarkingAndCollectGarbage(
800                 this,
801                 static_cast<AllocationSpace>(space),
802                 "failed to reserve space in paged space");
803           }
804           gc_performed = true;
805           break;
806         } else {
807           // Mark with a free list node, in case we have a GC before
808           // deserializing.
809           node->set_size(this, sizes[space]);
810           locations_out[space] = node->address();
811         }
812       }
813     }
814   }
815
816   if (gc_performed) {
817     // Failed to reserve the space after several attempts.
818     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
819   }
820 }
821
822
823 void Heap::EnsureFromSpaceIsCommitted() {
824   if (new_space_.CommitFromSpaceIfNeeded()) return;
825
826   // Committing memory to from space failed.
827   // Memory is exhausted and we will die.
828   V8::FatalProcessOutOfMemory("Committing semi space failed.");
829 }
830
831
832 void Heap::ClearJSFunctionResultCaches() {
833   if (isolate_->bootstrapper()->IsActive()) return;
834
835   Object* context = native_contexts_list_;
836   while (!context->IsUndefined()) {
837     // Get the caches for this context. GC can happen when the context
838     // is not fully initialized, so the caches can be undefined.
839     Object* caches_or_undefined =
840         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
841     if (!caches_or_undefined->IsUndefined()) {
842       FixedArray* caches = FixedArray::cast(caches_or_undefined);
843       // Clear the caches:
844       int length = caches->length();
845       for (int i = 0; i < length; i++) {
846         JSFunctionResultCache::cast(caches->get(i))->Clear();
847       }
848     }
849     // Get the next context:
850     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
851   }
852 }
853
854
855 void Heap::ClearNormalizedMapCaches() {
856   if (isolate_->bootstrapper()->IsActive() &&
857       !incremental_marking()->IsMarking()) {
858     return;
859   }
860
861   Object* context = native_contexts_list_;
862   while (!context->IsUndefined()) {
863     // GC can happen when the context is not fully initialized,
864     // so the cache can be undefined.
865     Object* cache =
866         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
867     if (!cache->IsUndefined()) {
868       NormalizedMapCache::cast(cache)->Clear();
869     }
870     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
871   }
872 }
873
874
875 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
876   double survival_rate =
877       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
878       start_new_space_size;
879
880   if (survival_rate > kYoungSurvivalRateHighThreshold) {
881     high_survival_rate_period_length_++;
882   } else {
883     high_survival_rate_period_length_ = 0;
884   }
885
886   if (survival_rate < kYoungSurvivalRateLowThreshold) {
887     low_survival_rate_period_length_++;
888   } else {
889     low_survival_rate_period_length_ = 0;
890   }
891
892   double survival_rate_diff = survival_rate_ - survival_rate;
893
894   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
895     set_survival_rate_trend(DECREASING);
896   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
897     set_survival_rate_trend(INCREASING);
898   } else {
899     set_survival_rate_trend(STABLE);
900   }
901
902   survival_rate_ = survival_rate;
903 }
904
905 bool Heap::PerformGarbageCollection(GarbageCollector collector,
906                                     GCTracer* tracer) {
907   bool next_gc_likely_to_collect_more = false;
908
909   if (collector != SCAVENGER) {
910     PROFILE(isolate_, CodeMovingGCEvent());
911   }
912
913 #ifdef VERIFY_HEAP
914   if (FLAG_verify_heap) {
915     VerifyStringTable();
916   }
917 #endif
918
919   GCType gc_type =
920       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
921
922   {
923     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
924     VMState<EXTERNAL> state(isolate_);
925     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
926   }
927
928   EnsureFromSpaceIsCommitted();
929
930   int start_new_space_size = Heap::new_space()->SizeAsInt();
931
932   if (IsHighSurvivalRate()) {
933     // We speed up the incremental marker if it is running so that it
934     // does not fall behind the rate of promotion, which would cause a
935     // constantly growing old space.
936     incremental_marking()->NotifyOfHighPromotionRate();
937   }
938
939   if (collector == MARK_COMPACTOR) {
940     // Perform mark-sweep with optional compaction.
941     MarkCompact(tracer);
942     sweep_generation_++;
943
944     UpdateSurvivalRateTrend(start_new_space_size);
945
946     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
947
948     old_generation_allocation_limit_ =
949         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
950
951     old_gen_exhausted_ = false;
952   } else {
953     tracer_ = tracer;
954     Scavenge();
955     tracer_ = NULL;
956
957     UpdateSurvivalRateTrend(start_new_space_size);
958   }
959
960   if (!new_space_high_promotion_mode_active_ &&
961       new_space_.Capacity() == new_space_.MaximumCapacity() &&
962       IsStableOrIncreasingSurvivalTrend() &&
963       IsHighSurvivalRate()) {
964     // Stable high survival rates even though young generation is at
965     // maximum capacity indicates that most objects will be promoted.
966     // To decrease scavenger pauses and final mark-sweep pauses, we
967     // have to limit maximal capacity of the young generation.
968     SetNewSpaceHighPromotionModeActive(true);
969     if (FLAG_trace_gc) {
970       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
971                new_space_.InitialCapacity() / MB);
972     }
973     // Support for global pre-tenuring uses the high promotion mode as a
974     // heuristic indicator of whether to pretenure or not, we trigger
975     // deoptimization here to take advantage of pre-tenuring as soon as
976     // possible.
977     if (FLAG_pretenuring) {
978       isolate_->stack_guard()->FullDeopt();
979     }
980   } else if (new_space_high_promotion_mode_active_ &&
981       IsStableOrDecreasingSurvivalTrend() &&
982       IsLowSurvivalRate()) {
983     // Decreasing low survival rates might indicate that the above high
984     // promotion mode is over and we should allow the young generation
985     // to grow again.
986     SetNewSpaceHighPromotionModeActive(false);
987     if (FLAG_trace_gc) {
988       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
989                new_space_.MaximumCapacity() / MB);
990     }
991     // Trigger deoptimization here to turn off pre-tenuring as soon as
992     // possible.
993     if (FLAG_pretenuring) {
994       isolate_->stack_guard()->FullDeopt();
995     }
996   }
997
998   if (new_space_high_promotion_mode_active_ &&
999       new_space_.Capacity() > new_space_.InitialCapacity()) {
1000     new_space_.Shrink();
1001   }
1002
1003   isolate_->counters()->objs_since_last_young()->Set(0);
1004
1005   // Callbacks that fire after this point might trigger nested GCs and
1006   // restart incremental marking, the assertion can't be moved down.
1007   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1008
1009   gc_post_processing_depth_++;
1010   { AllowHeapAllocation allow_allocation;
1011     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1012     next_gc_likely_to_collect_more =
1013         isolate_->global_handles()->PostGarbageCollectionProcessing(
1014             collector, tracer);
1015   }
1016   gc_post_processing_depth_--;
1017
1018   // Update relocatables.
1019   Relocatable::PostGarbageCollectionProcessing();
1020
1021   if (collector == MARK_COMPACTOR) {
1022     // Register the amount of external allocated memory.
1023     amount_of_external_allocated_memory_at_last_global_gc_ =
1024         amount_of_external_allocated_memory_;
1025   }
1026
1027   {
1028     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1029     VMState<EXTERNAL> state(isolate_);
1030     CallGCEpilogueCallbacks(gc_type);
1031   }
1032
1033 #ifdef VERIFY_HEAP
1034   if (FLAG_verify_heap) {
1035     VerifyStringTable();
1036   }
1037 #endif
1038
1039   return next_gc_likely_to_collect_more;
1040 }
1041
1042
1043 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1044   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1045     global_gc_prologue_callback_();
1046   }
1047   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1048     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1049       gc_prologue_callbacks_[i].callback(gc_type, flags);
1050     }
1051   }
1052 }
1053
1054
1055 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1056   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1057     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1058       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1059     }
1060   }
1061   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1062     global_gc_epilogue_callback_();
1063   }
1064 }
1065
1066
1067 void Heap::MarkCompact(GCTracer* tracer) {
1068   gc_state_ = MARK_COMPACT;
1069   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1070
1071   mark_compact_collector_.Prepare(tracer);
1072
1073   ms_count_++;
1074   tracer->set_full_gc_count(ms_count_);
1075
1076   MarkCompactPrologue();
1077
1078   mark_compact_collector_.CollectGarbage();
1079
1080   LOG(isolate_, ResourceEvent("markcompact", "end"));
1081
1082   gc_state_ = NOT_IN_GC;
1083
1084   isolate_->counters()->objs_since_last_full()->Set(0);
1085
1086   contexts_disposed_ = 0;
1087
1088   flush_monomorphic_ics_ = false;
1089 }
1090
1091
1092 void Heap::MarkCompactPrologue() {
1093   // At any old GC clear the keyed lookup cache to enable collection of unused
1094   // maps.
1095   isolate_->keyed_lookup_cache()->Clear();
1096   isolate_->context_slot_cache()->Clear();
1097   isolate_->descriptor_lookup_cache()->Clear();
1098   RegExpResultsCache::Clear(string_split_cache());
1099   RegExpResultsCache::Clear(regexp_multiple_cache());
1100
1101   isolate_->compilation_cache()->MarkCompactPrologue();
1102
1103   CompletelyClearInstanceofCache();
1104
1105   FlushNumberStringCache();
1106   if (FLAG_cleanup_code_caches_at_gc) {
1107     polymorphic_code_cache()->set_cache(undefined_value());
1108   }
1109
1110   ClearNormalizedMapCaches();
1111 }
1112
1113
1114 // Helper class for copying HeapObjects
1115 class ScavengeVisitor: public ObjectVisitor {
1116  public:
1117   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1118
1119   void VisitPointer(Object** p) { ScavengePointer(p); }
1120
1121   void VisitPointers(Object** start, Object** end) {
1122     // Copy all HeapObject pointers in [start, end)
1123     for (Object** p = start; p < end; p++) ScavengePointer(p);
1124   }
1125
1126  private:
1127   void ScavengePointer(Object** p) {
1128     Object* object = *p;
1129     if (!heap_->InNewSpace(object)) return;
1130     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1131                          reinterpret_cast<HeapObject*>(object));
1132   }
1133
1134   Heap* heap_;
1135 };
1136
1137
1138 #ifdef VERIFY_HEAP
1139 // Visitor class to verify pointers in code or data space do not point into
1140 // new space.
1141 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1142  public:
1143   void VisitPointers(Object** start, Object**end) {
1144     for (Object** current = start; current < end; current++) {
1145       if ((*current)->IsHeapObject()) {
1146         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1147       }
1148     }
1149   }
1150 };
1151
1152
1153 static void VerifyNonPointerSpacePointers() {
1154   // Verify that there are no pointers to new space in spaces where we
1155   // do not expect them.
1156   VerifyNonPointerSpacePointersVisitor v;
1157   HeapObjectIterator code_it(HEAP->code_space());
1158   for (HeapObject* object = code_it.Next();
1159        object != NULL; object = code_it.Next())
1160     object->Iterate(&v);
1161
1162   // The old data space was normally swept conservatively so that the iterator
1163   // doesn't work, so we normally skip the next bit.
1164   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1165     HeapObjectIterator data_it(HEAP->old_data_space());
1166     for (HeapObject* object = data_it.Next();
1167          object != NULL; object = data_it.Next())
1168       object->Iterate(&v);
1169   }
1170 }
1171 #endif  // VERIFY_HEAP
1172
1173
1174 void Heap::CheckNewSpaceExpansionCriteria() {
1175   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1176       survived_since_last_expansion_ > new_space_.Capacity() &&
1177       !new_space_high_promotion_mode_active_) {
1178     // Grow the size of new space if there is room to grow, enough data
1179     // has survived scavenge since the last expansion and we are not in
1180     // high promotion mode.
1181     new_space_.Grow();
1182     survived_since_last_expansion_ = 0;
1183   }
1184 }
1185
1186
1187 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1188   return heap->InNewSpace(*p) &&
1189       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1190 }
1191
1192
1193 void Heap::ScavengeStoreBufferCallback(
1194     Heap* heap,
1195     MemoryChunk* page,
1196     StoreBufferEvent event) {
1197   heap->store_buffer_rebuilder_.Callback(page, event);
1198 }
1199
1200
1201 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1202   if (event == kStoreBufferStartScanningPagesEvent) {
1203     start_of_current_page_ = NULL;
1204     current_page_ = NULL;
1205   } else if (event == kStoreBufferScanningPageEvent) {
1206     if (current_page_ != NULL) {
1207       // If this page already overflowed the store buffer during this iteration.
1208       if (current_page_->scan_on_scavenge()) {
1209         // Then we should wipe out the entries that have been added for it.
1210         store_buffer_->SetTop(start_of_current_page_);
1211       } else if (store_buffer_->Top() - start_of_current_page_ >=
1212                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1213         // Did we find too many pointers in the previous page?  The heuristic is
1214         // that no page can take more then 1/5 the remaining slots in the store
1215         // buffer.
1216         current_page_->set_scan_on_scavenge(true);
1217         store_buffer_->SetTop(start_of_current_page_);
1218       } else {
1219         // In this case the page we scanned took a reasonable number of slots in
1220         // the store buffer.  It has now been rehabilitated and is no longer
1221         // marked scan_on_scavenge.
1222         ASSERT(!current_page_->scan_on_scavenge());
1223       }
1224     }
1225     start_of_current_page_ = store_buffer_->Top();
1226     current_page_ = page;
1227   } else if (event == kStoreBufferFullEvent) {
1228     // The current page overflowed the store buffer again.  Wipe out its entries
1229     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1230     // several times while scanning.
1231     if (current_page_ == NULL) {
1232       // Store Buffer overflowed while scanning promoted objects.  These are not
1233       // in any particular page, though they are likely to be clustered by the
1234       // allocation routines.
1235       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1236     } else {
1237       // Store Buffer overflowed while scanning a particular old space page for
1238       // pointers to new space.
1239       ASSERT(current_page_ == page);
1240       ASSERT(page != NULL);
1241       current_page_->set_scan_on_scavenge(true);
1242       ASSERT(start_of_current_page_ != store_buffer_->Top());
1243       store_buffer_->SetTop(start_of_current_page_);
1244     }
1245   } else {
1246     UNREACHABLE();
1247   }
1248 }
1249
1250
1251 void PromotionQueue::Initialize() {
1252   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1253   // entries (where each is a pair of intptr_t). This allows us to simplify
1254   // the test fpr when to switch pages.
1255   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1256          == 0);
1257   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1258   front_ = rear_ =
1259       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1260   emergency_stack_ = NULL;
1261   guard_ = false;
1262 }
1263
1264
1265 void PromotionQueue::RelocateQueueHead() {
1266   ASSERT(emergency_stack_ == NULL);
1267
1268   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1269   intptr_t* head_start = rear_;
1270   intptr_t* head_end =
1271       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1272
1273   int entries_count =
1274       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1275
1276   emergency_stack_ = new List<Entry>(2 * entries_count);
1277
1278   while (head_start != head_end) {
1279     int size = static_cast<int>(*(head_start++));
1280     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1281     emergency_stack_->Add(Entry(obj, size));
1282   }
1283   rear_ = head_end;
1284 }
1285
1286
1287 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1288  public:
1289   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1290
1291   virtual Object* RetainAs(Object* object) {
1292     if (!heap_->InFromSpace(object)) {
1293       return object;
1294     }
1295
1296     MapWord map_word = HeapObject::cast(object)->map_word();
1297     if (map_word.IsForwardingAddress()) {
1298       return map_word.ToForwardingAddress();
1299     }
1300     return NULL;
1301   }
1302
1303  private:
1304   Heap* heap_;
1305 };
1306
1307
1308 void Heap::Scavenge() {
1309   RelocationLock relocation_lock(this);
1310
1311 #ifdef VERIFY_HEAP
1312   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1313 #endif
1314
1315   gc_state_ = SCAVENGE;
1316
1317   // Implements Cheney's copying algorithm
1318   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1319
1320   // Clear descriptor cache.
1321   isolate_->descriptor_lookup_cache()->Clear();
1322
1323   // Used for updating survived_since_last_expansion_ at function end.
1324   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1325
1326   CheckNewSpaceExpansionCriteria();
1327
1328   SelectScavengingVisitorsTable();
1329
1330   incremental_marking()->PrepareForScavenge();
1331
1332   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1333   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1334
1335   // Flip the semispaces.  After flipping, to space is empty, from space has
1336   // live objects.
1337   new_space_.Flip();
1338   new_space_.ResetAllocationInfo();
1339
1340   // We need to sweep newly copied objects which can be either in the
1341   // to space or promoted to the old generation.  For to-space
1342   // objects, we treat the bottom of the to space as a queue.  Newly
1343   // copied and unswept objects lie between a 'front' mark and the
1344   // allocation pointer.
1345   //
1346   // Promoted objects can go into various old-generation spaces, and
1347   // can be allocated internally in the spaces (from the free list).
1348   // We treat the top of the to space as a queue of addresses of
1349   // promoted objects.  The addresses of newly promoted and unswept
1350   // objects lie between a 'front' mark and a 'rear' mark that is
1351   // updated as a side effect of promoting an object.
1352   //
1353   // There is guaranteed to be enough room at the top of the to space
1354   // for the addresses of promoted objects: every object promoted
1355   // frees up its size in bytes from the top of the new space, and
1356   // objects are at least one pointer in size.
1357   Address new_space_front = new_space_.ToSpaceStart();
1358   promotion_queue_.Initialize();
1359
1360 #ifdef DEBUG
1361   store_buffer()->Clean();
1362 #endif
1363
1364   ScavengeVisitor scavenge_visitor(this);
1365   // Copy roots.
1366   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1367
1368   // Copy objects reachable from the old generation.
1369   {
1370     StoreBufferRebuildScope scope(this,
1371                                   store_buffer(),
1372                                   &ScavengeStoreBufferCallback);
1373     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1374   }
1375
1376   // Copy objects reachable from simple cells by scavenging cell values
1377   // directly.
1378   HeapObjectIterator cell_iterator(cell_space_);
1379   for (HeapObject* heap_object = cell_iterator.Next();
1380        heap_object != NULL;
1381        heap_object = cell_iterator.Next()) {
1382     if (heap_object->IsCell()) {
1383       Cell* cell = Cell::cast(heap_object);
1384       Address value_address = cell->ValueAddress();
1385       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1386     }
1387   }
1388
1389   // Copy objects reachable from global property cells by scavenging global
1390   // property cell values directly.
1391   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1392   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1393        heap_object != NULL;
1394        heap_object = js_global_property_cell_iterator.Next()) {
1395     if (heap_object->IsPropertyCell()) {
1396       PropertyCell* cell = PropertyCell::cast(heap_object);
1397       Address value_address = cell->ValueAddress();
1398       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1399       Address type_address = cell->TypeAddress();
1400       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1401     }
1402   }
1403
1404   // Copy objects reachable from the code flushing candidates list.
1405   MarkCompactCollector* collector = mark_compact_collector();
1406   if (collector->is_code_flushing_enabled()) {
1407     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1408   }
1409
1410   // Scavenge object reachable from the native contexts list directly.
1411   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1412
1413   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1414
1415   while (isolate()->global_handles()->IterateObjectGroups(
1416       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1417     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1418   }
1419   isolate()->global_handles()->RemoveObjectGroups();
1420   isolate()->global_handles()->RemoveImplicitRefGroups();
1421
1422   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1423       &IsUnscavengedHeapObject);
1424   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1425       &scavenge_visitor);
1426   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1427
1428   UpdateNewSpaceReferencesInExternalStringTable(
1429       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1430
1431   error_object_list_.UpdateReferencesInNewSpace(this);
1432
1433   promotion_queue_.Destroy();
1434
1435   if (!FLAG_watch_ic_patching) {
1436     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1437   }
1438   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1439
1440   ScavengeWeakObjectRetainer weak_object_retainer(this);
1441   ProcessWeakReferences(&weak_object_retainer);
1442
1443   ASSERT(new_space_front == new_space_.top());
1444
1445   // Set age mark.
1446   new_space_.set_age_mark(new_space_.top());
1447
1448   new_space_.LowerInlineAllocationLimit(
1449       new_space_.inline_allocation_limit_step());
1450
1451   // Update how much has survived scavenge.
1452   IncrementYoungSurvivorsCounter(static_cast<int>(
1453       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1454
1455   LOG(isolate_, ResourceEvent("scavenge", "end"));
1456
1457   gc_state_ = NOT_IN_GC;
1458
1459   scavenges_since_last_idle_round_++;
1460 }
1461
1462
1463 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1464                                                                 Object** p) {
1465   MapWord first_word = HeapObject::cast(*p)->map_word();
1466
1467   if (!first_word.IsForwardingAddress()) {
1468     // Unreachable external string can be finalized.
1469     heap->FinalizeExternalString(String::cast(*p));
1470     return NULL;
1471   }
1472
1473   // String is still reachable.
1474   return String::cast(first_word.ToForwardingAddress());
1475 }
1476
1477
1478 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1479     ExternalStringTableUpdaterCallback updater_func) {
1480 #ifdef VERIFY_HEAP
1481   if (FLAG_verify_heap) {
1482     external_string_table_.Verify();
1483   }
1484 #endif
1485
1486   if (external_string_table_.new_space_strings_.is_empty()) return;
1487
1488   Object** start = &external_string_table_.new_space_strings_[0];
1489   Object** end = start + external_string_table_.new_space_strings_.length();
1490   Object** last = start;
1491
1492   for (Object** p = start; p < end; ++p) {
1493     ASSERT(InFromSpace(*p));
1494     String* target = updater_func(this, p);
1495
1496     if (target == NULL) continue;
1497
1498     ASSERT(target->IsExternalString());
1499
1500     if (InNewSpace(target)) {
1501       // String is still in new space.  Update the table entry.
1502       *last = target;
1503       ++last;
1504     } else {
1505       // String got promoted.  Move it to the old string list.
1506       external_string_table_.AddOldString(target);
1507     }
1508   }
1509
1510   ASSERT(last <= end);
1511   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1512 }
1513
1514
1515 void Heap::UpdateReferencesInExternalStringTable(
1516     ExternalStringTableUpdaterCallback updater_func) {
1517
1518   // Update old space string references.
1519   if (external_string_table_.old_space_strings_.length() > 0) {
1520     Object** start = &external_string_table_.old_space_strings_[0];
1521     Object** end = start + external_string_table_.old_space_strings_.length();
1522     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1523   }
1524
1525   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1526 }
1527
1528
1529 template <class T>
1530 struct WeakListVisitor;
1531
1532
1533 template <class T>
1534 static Object* VisitWeakList(Heap* heap,
1535                              Object* list,
1536                              WeakObjectRetainer* retainer,
1537                              bool record_slots) {
1538   Object* undefined = heap->undefined_value();
1539   Object* head = undefined;
1540   T* tail = NULL;
1541   MarkCompactCollector* collector = heap->mark_compact_collector();
1542   while (list != undefined) {
1543     // Check whether to keep the candidate in the list.
1544     T* candidate = reinterpret_cast<T*>(list);
1545     Object* retained = retainer->RetainAs(list);
1546     if (retained != NULL) {
1547       if (head == undefined) {
1548         // First element in the list.
1549         head = retained;
1550       } else {
1551         // Subsequent elements in the list.
1552         ASSERT(tail != NULL);
1553         WeakListVisitor<T>::SetWeakNext(tail, retained);
1554         if (record_slots) {
1555           Object** next_slot =
1556             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1557           collector->RecordSlot(next_slot, next_slot, retained);
1558         }
1559       }
1560       // Retained object is new tail.
1561       ASSERT(!retained->IsUndefined());
1562       candidate = reinterpret_cast<T*>(retained);
1563       tail = candidate;
1564
1565
1566       // tail is a live object, visit it.
1567       WeakListVisitor<T>::VisitLiveObject(
1568           heap, tail, retainer, record_slots);
1569     } else {
1570       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1571     }
1572
1573     // Move to next element in the list.
1574     list = WeakListVisitor<T>::WeakNext(candidate);
1575   }
1576
1577   // Terminate the list if there is one or more elements.
1578   if (tail != NULL) {
1579     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1580   }
1581   return head;
1582 }
1583
1584
1585 template<>
1586 struct WeakListVisitor<JSFunction> {
1587   static void SetWeakNext(JSFunction* function, Object* next) {
1588     function->set_next_function_link(next);
1589   }
1590
1591   static Object* WeakNext(JSFunction* function) {
1592     return function->next_function_link();
1593   }
1594
1595   static int WeakNextOffset() {
1596     return JSFunction::kNextFunctionLinkOffset;
1597   }
1598
1599   static void VisitLiveObject(Heap*, JSFunction*,
1600                               WeakObjectRetainer*, bool) {
1601   }
1602
1603   static void VisitPhantomObject(Heap*, JSFunction*) {
1604   }
1605 };
1606
1607
1608 template<>
1609 struct WeakListVisitor<Context> {
1610   static void SetWeakNext(Context* context, Object* next) {
1611     context->set(Context::NEXT_CONTEXT_LINK,
1612                  next,
1613                  UPDATE_WRITE_BARRIER);
1614   }
1615
1616   static Object* WeakNext(Context* context) {
1617     return context->get(Context::NEXT_CONTEXT_LINK);
1618   }
1619
1620   static void VisitLiveObject(Heap* heap,
1621                               Context* context,
1622                               WeakObjectRetainer* retainer,
1623                               bool record_slots) {
1624     // Process the weak list of optimized functions for the context.
1625     Object* function_list_head =
1626         VisitWeakList<JSFunction>(
1627             heap,
1628             context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1629             retainer,
1630             record_slots);
1631     context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1632                  function_list_head,
1633                  UPDATE_WRITE_BARRIER);
1634     if (record_slots) {
1635       Object** optimized_functions =
1636           HeapObject::RawField(
1637               context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1638       heap->mark_compact_collector()->RecordSlot(
1639           optimized_functions, optimized_functions, function_list_head);
1640     }
1641   }
1642
1643   static void VisitPhantomObject(Heap*, Context*) {
1644   }
1645
1646   static int WeakNextOffset() {
1647     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1648   }
1649 };
1650
1651
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653   // We don't record weak slots during marking or scavenges.
1654   // Instead we do it once when we complete mark-compact cycle.
1655   // Note that write barrier has no effect if we are already in the middle of
1656   // compacting mark-sweep cycle and we have to record slots manually.
1657   bool record_slots =
1658       gc_state() == MARK_COMPACT &&
1659       mark_compact_collector()->is_compacting();
1660   ProcessArrayBuffers(retainer, record_slots);
1661   ProcessNativeContexts(retainer, record_slots);
1662   ProcessAllocationSites(retainer, record_slots);
1663 }
1664
1665 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1666                                  bool record_slots) {
1667   Object* head =
1668       VisitWeakList<Context>(
1669           this, native_contexts_list(), retainer, record_slots);
1670   // Update the head of the list of contexts.
1671   native_contexts_list_ = head;
1672 }
1673
1674
1675 template<>
1676 struct WeakListVisitor<JSArrayBufferView> {
1677   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1678     obj->set_weak_next(next);
1679   }
1680
1681   static Object* WeakNext(JSArrayBufferView* obj) {
1682     return obj->weak_next();
1683   }
1684
1685   static void VisitLiveObject(Heap*,
1686                               JSArrayBufferView* obj,
1687                               WeakObjectRetainer* retainer,
1688                               bool record_slots) {}
1689
1690   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1691
1692   static int WeakNextOffset() {
1693     return JSArrayBufferView::kWeakNextOffset;
1694   }
1695 };
1696
1697
1698 template<>
1699 struct WeakListVisitor<JSArrayBuffer> {
1700   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1701     obj->set_weak_next(next);
1702   }
1703
1704   static Object* WeakNext(JSArrayBuffer* obj) {
1705     return obj->weak_next();
1706   }
1707
1708   static void VisitLiveObject(Heap* heap,
1709                               JSArrayBuffer* array_buffer,
1710                               WeakObjectRetainer* retainer,
1711                               bool record_slots) {
1712     Object* typed_array_obj =
1713         VisitWeakList<JSArrayBufferView>(
1714             heap,
1715             array_buffer->weak_first_view(),
1716             retainer, record_slots);
1717     array_buffer->set_weak_first_view(typed_array_obj);
1718     if (typed_array_obj != heap->undefined_value() && record_slots) {
1719       Object** slot = HeapObject::RawField(
1720           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1721       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1722     }
1723   }
1724
1725   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1726     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1727   }
1728
1729   static int WeakNextOffset() {
1730     return JSArrayBuffer::kWeakNextOffset;
1731   }
1732 };
1733
1734
1735 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1736                                bool record_slots) {
1737   Object* array_buffer_obj =
1738       VisitWeakList<JSArrayBuffer>(this,
1739                                    array_buffers_list(),
1740                                    retainer, record_slots);
1741   set_array_buffers_list(array_buffer_obj);
1742 }
1743
1744
1745 void Heap::TearDownArrayBuffers() {
1746   Object* undefined = undefined_value();
1747   for (Object* o = array_buffers_list(); o != undefined;) {
1748     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1749     Runtime::FreeArrayBuffer(isolate(), buffer);
1750     o = buffer->weak_next();
1751   }
1752   array_buffers_list_ = undefined;
1753 }
1754
1755
1756 template<>
1757 struct WeakListVisitor<AllocationSite> {
1758   static void SetWeakNext(AllocationSite* obj, Object* next) {
1759     obj->set_weak_next(next);
1760   }
1761
1762   static Object* WeakNext(AllocationSite* obj) {
1763     return obj->weak_next();
1764   }
1765
1766   static void VisitLiveObject(Heap* heap,
1767                               AllocationSite* array_buffer,
1768                               WeakObjectRetainer* retainer,
1769                               bool record_slots) {}
1770
1771   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1772
1773   static int WeakNextOffset() {
1774     return AllocationSite::kWeakNextOffset;
1775   }
1776 };
1777
1778
1779 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1780                                   bool record_slots) {
1781   Object* allocation_site_obj =
1782       VisitWeakList<AllocationSite>(this,
1783                                     allocation_sites_list(),
1784                                     retainer, record_slots);
1785   set_allocation_sites_list(allocation_site_obj);
1786 }
1787
1788
1789 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1790   DisallowHeapAllocation no_allocation;
1791
1792   // Both the external string table and the string table may contain
1793   // external strings, but neither lists them exhaustively, nor is the
1794   // intersection set empty.  Therefore we iterate over the external string
1795   // table first, ignoring internalized strings, and then over the
1796   // internalized string table.
1797
1798   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1799    public:
1800     explicit ExternalStringTableVisitorAdapter(
1801         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1802     virtual void VisitPointers(Object** start, Object** end) {
1803       for (Object** p = start; p < end; p++) {
1804         // Visit non-internalized external strings,
1805         // since internalized strings are listed in the string table.
1806         if (!(*p)->IsInternalizedString()) {
1807           ASSERT((*p)->IsExternalString());
1808           visitor_->VisitExternalString(Utils::ToLocal(
1809               Handle<String>(String::cast(*p))));
1810         }
1811       }
1812     }
1813    private:
1814     v8::ExternalResourceVisitor* visitor_;
1815   } external_string_table_visitor(visitor);
1816
1817   external_string_table_.Iterate(&external_string_table_visitor);
1818
1819   class StringTableVisitorAdapter : public ObjectVisitor {
1820    public:
1821     explicit StringTableVisitorAdapter(
1822         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1823     virtual void VisitPointers(Object** start, Object** end) {
1824       for (Object** p = start; p < end; p++) {
1825         if ((*p)->IsExternalString()) {
1826           ASSERT((*p)->IsInternalizedString());
1827           visitor_->VisitExternalString(Utils::ToLocal(
1828               Handle<String>(String::cast(*p))));
1829         }
1830       }
1831     }
1832    private:
1833     v8::ExternalResourceVisitor* visitor_;
1834   } string_table_visitor(visitor);
1835
1836   string_table()->IterateElements(&string_table_visitor);
1837 }
1838
1839
1840 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1841  public:
1842   static inline void VisitPointer(Heap* heap, Object** p) {
1843     Object* object = *p;
1844     if (!heap->InNewSpace(object)) return;
1845     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1846                          reinterpret_cast<HeapObject*>(object));
1847   }
1848 };
1849
1850
1851 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1852                          Address new_space_front) {
1853   do {
1854     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1855     // The addresses new_space_front and new_space_.top() define a
1856     // queue of unprocessed copied objects.  Process them until the
1857     // queue is empty.
1858     while (new_space_front != new_space_.top()) {
1859       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1860         HeapObject* object = HeapObject::FromAddress(new_space_front);
1861         new_space_front +=
1862           NewSpaceScavenger::IterateBody(object->map(), object);
1863       } else {
1864         new_space_front =
1865             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1866       }
1867     }
1868
1869     // Promote and process all the to-be-promoted objects.
1870     {
1871       StoreBufferRebuildScope scope(this,
1872                                     store_buffer(),
1873                                     &ScavengeStoreBufferCallback);
1874       while (!promotion_queue()->is_empty()) {
1875         HeapObject* target;
1876         int size;
1877         promotion_queue()->remove(&target, &size);
1878
1879         // Promoted object might be already partially visited
1880         // during old space pointer iteration. Thus we search specificly
1881         // for pointers to from semispace instead of looking for pointers
1882         // to new space.
1883         ASSERT(!target->IsMap());
1884         IterateAndMarkPointersToFromSpace(target->address(),
1885                                           target->address() + size,
1886                                           &ScavengeObject);
1887       }
1888     }
1889
1890     // Take another spin if there are now unswept objects in new space
1891     // (there are currently no more unswept promoted objects).
1892   } while (new_space_front != new_space_.top());
1893
1894   return new_space_front;
1895 }
1896
1897
1898 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1899
1900
1901 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1902                                               HeapObject* object,
1903                                               int size));
1904
1905 static HeapObject* EnsureDoubleAligned(Heap* heap,
1906                                        HeapObject* object,
1907                                        int size) {
1908   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1909     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1910     return HeapObject::FromAddress(object->address() + kPointerSize);
1911   } else {
1912     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1913                                kPointerSize);
1914     return object;
1915   }
1916 }
1917
1918
1919 enum LoggingAndProfiling {
1920   LOGGING_AND_PROFILING_ENABLED,
1921   LOGGING_AND_PROFILING_DISABLED
1922 };
1923
1924
1925 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1926
1927
1928 template<MarksHandling marks_handling,
1929          LoggingAndProfiling logging_and_profiling_mode>
1930 class ScavengingVisitor : public StaticVisitorBase {
1931  public:
1932   static void Initialize() {
1933     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1934     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1935     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1936     table_.Register(kVisitByteArray, &EvacuateByteArray);
1937     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1938     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1939
1940     table_.Register(kVisitNativeContext,
1941                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1942                         template VisitSpecialized<Context::kSize>);
1943
1944     table_.Register(kVisitConsString,
1945                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1946                         template VisitSpecialized<ConsString::kSize>);
1947
1948     table_.Register(kVisitSlicedString,
1949                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950                         template VisitSpecialized<SlicedString::kSize>);
1951
1952     table_.Register(kVisitSymbol,
1953                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954                         template VisitSpecialized<Symbol::kSize>);
1955
1956     table_.Register(kVisitSharedFunctionInfo,
1957                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1959
1960     table_.Register(kVisitJSWeakMap,
1961                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1962                     Visit);
1963
1964     table_.Register(kVisitJSWeakSet,
1965                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1966                     Visit);
1967
1968     table_.Register(kVisitJSArrayBuffer,
1969                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1970                     Visit);
1971
1972     table_.Register(kVisitJSTypedArray,
1973                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1974                     Visit);
1975
1976     table_.Register(kVisitJSDataView,
1977                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1978                     Visit);
1979
1980     table_.Register(kVisitJSRegExp,
1981                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1982                     Visit);
1983
1984     if (marks_handling == IGNORE_MARKS) {
1985       table_.Register(kVisitJSFunction,
1986                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1987                           template VisitSpecialized<JSFunction::kSize>);
1988     } else {
1989       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1990     }
1991
1992     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1993                                    kVisitDataObject,
1994                                    kVisitDataObjectGeneric>();
1995
1996     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1997                                    kVisitJSObject,
1998                                    kVisitJSObjectGeneric>();
1999
2000     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2001                                    kVisitStruct,
2002                                    kVisitStructGeneric>();
2003   }
2004
2005   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2006     return &table_;
2007   }
2008
2009  private:
2010   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2011   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
2012
2013   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2014     bool should_record = false;
2015 #ifdef DEBUG
2016     should_record = FLAG_heap_stats;
2017 #endif
2018     should_record = should_record || FLAG_log_gc;
2019     if (should_record) {
2020       if (heap->new_space()->Contains(obj)) {
2021         heap->new_space()->RecordAllocation(obj);
2022       } else {
2023         heap->new_space()->RecordPromotion(obj);
2024       }
2025     }
2026   }
2027
2028   // Helper function used by CopyObject to copy a source object to an
2029   // allocated target object and update the forwarding pointer in the source
2030   // object.  Returns the target object.
2031   INLINE(static void MigrateObject(Heap* heap,
2032                                    HeapObject* source,
2033                                    HeapObject* target,
2034                                    int size)) {
2035     // Copy the content of source to target.
2036     heap->CopyBlock(target->address(), source->address(), size);
2037
2038     // Set the forwarding address.
2039     source->set_map_word(MapWord::FromForwardingAddress(target));
2040
2041     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2042       // Update NewSpace stats if necessary.
2043       RecordCopiedObject(heap, target);
2044       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2045       Isolate* isolate = heap->isolate();
2046       if (isolate->logger()->is_logging_code_events() ||
2047           isolate->cpu_profiler()->is_profiling()) {
2048         if (target->IsSharedFunctionInfo()) {
2049           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2050               source->address(), target->address()));
2051         }
2052       }
2053     }
2054
2055     if (marks_handling == TRANSFER_MARKS) {
2056       if (Marking::TransferColor(source, target)) {
2057         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2058       }
2059     }
2060   }
2061
2062
2063   template<ObjectContents object_contents,
2064            SizeRestriction size_restriction,
2065            int alignment>
2066   static inline void EvacuateObject(Map* map,
2067                                     HeapObject** slot,
2068                                     HeapObject* object,
2069                                     int object_size) {
2070     SLOW_ASSERT((size_restriction != SMALL) ||
2071                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2072     SLOW_ASSERT(object->Size() == object_size);
2073
2074     int allocation_size = object_size;
2075     if (alignment != kObjectAlignment) {
2076       ASSERT(alignment == kDoubleAlignment);
2077       allocation_size += kPointerSize;
2078     }
2079
2080     Heap* heap = map->GetHeap();
2081     if (heap->ShouldBePromoted(object->address(), object_size)) {
2082       MaybeObject* maybe_result;
2083
2084       if ((size_restriction != SMALL) &&
2085           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2086         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2087                                                      NOT_EXECUTABLE);
2088       } else {
2089         if (object_contents == DATA_OBJECT) {
2090           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2091         } else {
2092           maybe_result =
2093               heap->old_pointer_space()->AllocateRaw(allocation_size);
2094         }
2095       }
2096
2097       Object* result = NULL;  // Initialization to please compiler.
2098       if (maybe_result->ToObject(&result)) {
2099         HeapObject* target = HeapObject::cast(result);
2100
2101         if (alignment != kObjectAlignment) {
2102           target = EnsureDoubleAligned(heap, target, allocation_size);
2103         }
2104
2105         // Order is important: slot might be inside of the target if target
2106         // was allocated over a dead object and slot comes from the store
2107         // buffer.
2108         *slot = target;
2109         MigrateObject(heap, object, target, object_size);
2110
2111         if (object_contents == POINTER_OBJECT) {
2112           if (map->instance_type() == JS_FUNCTION_TYPE) {
2113             heap->promotion_queue()->insert(
2114                 target, JSFunction::kNonWeakFieldsEndOffset);
2115           } else {
2116             heap->promotion_queue()->insert(target, object_size);
2117           }
2118         }
2119
2120         heap->tracer()->increment_promoted_objects_size(object_size);
2121         return;
2122       }
2123     }
2124     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2125     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2126     Object* result = allocation->ToObjectUnchecked();
2127     HeapObject* target = HeapObject::cast(result);
2128
2129     if (alignment != kObjectAlignment) {
2130       target = EnsureDoubleAligned(heap, target, allocation_size);
2131     }
2132
2133     // Order is important: slot might be inside of the target if target
2134     // was allocated over a dead object and slot comes from the store
2135     // buffer.
2136     *slot = target;
2137     MigrateObject(heap, object, target, object_size);
2138     return;
2139   }
2140
2141
2142   static inline void EvacuateJSFunction(Map* map,
2143                                         HeapObject** slot,
2144                                         HeapObject* object) {
2145     ObjectEvacuationStrategy<POINTER_OBJECT>::
2146         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2147
2148     HeapObject* target = *slot;
2149     MarkBit mark_bit = Marking::MarkBitFrom(target);
2150     if (Marking::IsBlack(mark_bit)) {
2151       // This object is black and it might not be rescanned by marker.
2152       // We should explicitly record code entry slot for compaction because
2153       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2154       // miss it as it is not HeapObject-tagged.
2155       Address code_entry_slot =
2156           target->address() + JSFunction::kCodeEntryOffset;
2157       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2158       map->GetHeap()->mark_compact_collector()->
2159           RecordCodeEntrySlot(code_entry_slot, code);
2160     }
2161   }
2162
2163
2164   static inline void EvacuateFixedArray(Map* map,
2165                                         HeapObject** slot,
2166                                         HeapObject* object) {
2167     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2168     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2169                                                  slot,
2170                                                  object,
2171                                                  object_size);
2172   }
2173
2174
2175   static inline void EvacuateFixedDoubleArray(Map* map,
2176                                               HeapObject** slot,
2177                                               HeapObject* object) {
2178     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2179     int object_size = FixedDoubleArray::SizeFor(length);
2180     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2181         map,
2182         slot,
2183         object,
2184         object_size);
2185   }
2186
2187
2188   static inline void EvacuateByteArray(Map* map,
2189                                        HeapObject** slot,
2190                                        HeapObject* object) {
2191     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2192     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2193         map, slot, object, object_size);
2194   }
2195
2196
2197   static inline void EvacuateSeqOneByteString(Map* map,
2198                                             HeapObject** slot,
2199                                             HeapObject* object) {
2200     int object_size = SeqOneByteString::cast(object)->
2201         SeqOneByteStringSize(map->instance_type());
2202     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2203         map, slot, object, object_size);
2204   }
2205
2206
2207   static inline void EvacuateSeqTwoByteString(Map* map,
2208                                               HeapObject** slot,
2209                                               HeapObject* object) {
2210     int object_size = SeqTwoByteString::cast(object)->
2211         SeqTwoByteStringSize(map->instance_type());
2212     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2213         map, slot, object, object_size);
2214   }
2215
2216
2217   static inline bool IsShortcutCandidate(int type) {
2218     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2219   }
2220
2221   static inline void EvacuateShortcutCandidate(Map* map,
2222                                                HeapObject** slot,
2223                                                HeapObject* object) {
2224     ASSERT(IsShortcutCandidate(map->instance_type()));
2225
2226     Heap* heap = map->GetHeap();
2227
2228     if (marks_handling == IGNORE_MARKS &&
2229         ConsString::cast(object)->unchecked_second() ==
2230         heap->empty_string()) {
2231       HeapObject* first =
2232           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2233
2234       *slot = first;
2235
2236       if (!heap->InNewSpace(first)) {
2237         object->set_map_word(MapWord::FromForwardingAddress(first));
2238         return;
2239       }
2240
2241       MapWord first_word = first->map_word();
2242       if (first_word.IsForwardingAddress()) {
2243         HeapObject* target = first_word.ToForwardingAddress();
2244
2245         *slot = target;
2246         object->set_map_word(MapWord::FromForwardingAddress(target));
2247         return;
2248       }
2249
2250       heap->DoScavengeObject(first->map(), slot, first);
2251       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2252       return;
2253     }
2254
2255     int object_size = ConsString::kSize;
2256     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2257         map, slot, object, object_size);
2258   }
2259
2260   template<ObjectContents object_contents>
2261   class ObjectEvacuationStrategy {
2262    public:
2263     template<int object_size>
2264     static inline void VisitSpecialized(Map* map,
2265                                         HeapObject** slot,
2266                                         HeapObject* object) {
2267       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2268           map, slot, object, object_size);
2269     }
2270
2271     static inline void Visit(Map* map,
2272                              HeapObject** slot,
2273                              HeapObject* object) {
2274       int object_size = map->instance_size();
2275       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2276           map, slot, object, object_size);
2277     }
2278   };
2279
2280   static VisitorDispatchTable<ScavengingCallback> table_;
2281 };
2282
2283
2284 template<MarksHandling marks_handling,
2285          LoggingAndProfiling logging_and_profiling_mode>
2286 VisitorDispatchTable<ScavengingCallback>
2287     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2288
2289
2290 static void InitializeScavengingVisitorsTables() {
2291   ScavengingVisitor<TRANSFER_MARKS,
2292                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2293   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2294   ScavengingVisitor<TRANSFER_MARKS,
2295                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2296   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2297 }
2298
2299
2300 void Heap::SelectScavengingVisitorsTable() {
2301   bool logging_and_profiling =
2302       isolate()->logger()->is_logging() ||
2303       isolate()->cpu_profiler()->is_profiling() ||
2304       (isolate()->heap_profiler() != NULL &&
2305        isolate()->heap_profiler()->is_profiling());
2306
2307   if (!incremental_marking()->IsMarking()) {
2308     if (!logging_and_profiling) {
2309       scavenging_visitors_table_.CopyFrom(
2310           ScavengingVisitor<IGNORE_MARKS,
2311                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2312     } else {
2313       scavenging_visitors_table_.CopyFrom(
2314           ScavengingVisitor<IGNORE_MARKS,
2315                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2316     }
2317   } else {
2318     if (!logging_and_profiling) {
2319       scavenging_visitors_table_.CopyFrom(
2320           ScavengingVisitor<TRANSFER_MARKS,
2321                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2322     } else {
2323       scavenging_visitors_table_.CopyFrom(
2324           ScavengingVisitor<TRANSFER_MARKS,
2325                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2326     }
2327
2328     if (incremental_marking()->IsCompacting()) {
2329       // When compacting forbid short-circuiting of cons-strings.
2330       // Scavenging code relies on the fact that new space object
2331       // can't be evacuated into evacuation candidate but
2332       // short-circuiting violates this assumption.
2333       scavenging_visitors_table_.Register(
2334           StaticVisitorBase::kVisitShortcutCandidate,
2335           scavenging_visitors_table_.GetVisitorById(
2336               StaticVisitorBase::kVisitConsString));
2337     }
2338   }
2339 }
2340
2341
2342 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2343   SLOW_ASSERT(HEAP->InFromSpace(object));
2344   MapWord first_word = object->map_word();
2345   SLOW_ASSERT(!first_word.IsForwardingAddress());
2346   Map* map = first_word.ToMap();
2347   map->GetHeap()->DoScavengeObject(map, p, object);
2348 }
2349
2350
2351 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2352                                       int instance_size) {
2353   Object* result;
2354   MaybeObject* maybe_result = AllocateRawMap();
2355   if (!maybe_result->ToObject(&result)) return maybe_result;
2356
2357   // Map::cast cannot be used due to uninitialized map field.
2358   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2359   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2360   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2361   reinterpret_cast<Map*>(result)->set_visitor_id(
2362         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2363   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2364   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2365   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2366   reinterpret_cast<Map*>(result)->set_bit_field(0);
2367   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2368   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2369                    Map::OwnsDescriptors::encode(true);
2370   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2371   return result;
2372 }
2373
2374
2375 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2376                                int instance_size,
2377                                ElementsKind elements_kind) {
2378   Object* result;
2379   MaybeObject* maybe_result = AllocateRawMap();
2380   if (!maybe_result->To(&result)) return maybe_result;
2381
2382   Map* map = reinterpret_cast<Map*>(result);
2383   map->set_map_no_write_barrier(meta_map());
2384   map->set_instance_type(instance_type);
2385   map->set_visitor_id(
2386       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2387   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2388   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2389   map->set_instance_size(instance_size);
2390   map->set_inobject_properties(0);
2391   map->set_pre_allocated_property_fields(0);
2392   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2393   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2394                           SKIP_WRITE_BARRIER);
2395   map->init_back_pointer(undefined_value());
2396   map->set_unused_property_fields(0);
2397   map->set_instance_descriptors(empty_descriptor_array());
2398   map->set_bit_field(0);
2399   map->set_bit_field2(1 << Map::kIsExtensible);
2400   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2401                    Map::OwnsDescriptors::encode(true);
2402   map->set_bit_field3(bit_field3);
2403   map->set_elements_kind(elements_kind);
2404
2405   return map;
2406 }
2407
2408
2409 MaybeObject* Heap::AllocateCodeCache() {
2410   CodeCache* code_cache;
2411   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2412     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2413   }
2414   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2415   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2416   return code_cache;
2417 }
2418
2419
2420 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2421   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2422 }
2423
2424
2425 MaybeObject* Heap::AllocateAccessorPair() {
2426   AccessorPair* accessors;
2427   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2428     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2429   }
2430   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2431   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2432   return accessors;
2433 }
2434
2435
2436 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2437   TypeFeedbackInfo* info;
2438   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2439     if (!maybe_info->To(&info)) return maybe_info;
2440   }
2441   info->initialize_storage();
2442   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2443                                 SKIP_WRITE_BARRIER);
2444   return info;
2445 }
2446
2447
2448 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2449   AliasedArgumentsEntry* entry;
2450   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2451     if (!maybe_entry->To(&entry)) return maybe_entry;
2452   }
2453   entry->set_aliased_context_slot(aliased_context_slot);
2454   return entry;
2455 }
2456
2457
2458 const Heap::StringTypeTable Heap::string_type_table[] = {
2459 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2460   {type, size, k##camel_name##MapRootIndex},
2461   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2462 #undef STRING_TYPE_ELEMENT
2463 };
2464
2465
2466 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2467 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2468   {contents, k##name##RootIndex},
2469   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2470 #undef CONSTANT_STRING_ELEMENT
2471 };
2472
2473
2474 const Heap::StructTable Heap::struct_table[] = {
2475 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2476   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2477   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2478 #undef STRUCT_TABLE_ELEMENT
2479 };
2480
2481
2482 bool Heap::CreateInitialMaps() {
2483   Object* obj;
2484   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2485     if (!maybe_obj->ToObject(&obj)) return false;
2486   }
2487   // Map::cast cannot be used due to uninitialized map field.
2488   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2489   set_meta_map(new_meta_map);
2490   new_meta_map->set_map(new_meta_map);
2491
2492   { MaybeObject* maybe_obj =
2493         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2494     if (!maybe_obj->ToObject(&obj)) return false;
2495   }
2496   set_fixed_array_map(Map::cast(obj));
2497
2498   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2499     if (!maybe_obj->ToObject(&obj)) return false;
2500   }
2501   set_oddball_map(Map::cast(obj));
2502
2503   // Allocate the empty array.
2504   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_empty_fixed_array(FixedArray::cast(obj));
2508
2509   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2510     if (!maybe_obj->ToObject(&obj)) return false;
2511   }
2512   set_null_value(Oddball::cast(obj));
2513   Oddball::cast(obj)->set_kind(Oddball::kNull);
2514
2515   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2516     if (!maybe_obj->ToObject(&obj)) return false;
2517   }
2518   set_undefined_value(Oddball::cast(obj));
2519   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2520   ASSERT(!InNewSpace(undefined_value()));
2521
2522   // Allocate the empty descriptor array.
2523   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2524     if (!maybe_obj->ToObject(&obj)) return false;
2525   }
2526   set_empty_descriptor_array(DescriptorArray::cast(obj));
2527
2528   // Fix the instance_descriptors for the existing maps.
2529   meta_map()->set_code_cache(empty_fixed_array());
2530   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2531   meta_map()->init_back_pointer(undefined_value());
2532   meta_map()->set_instance_descriptors(empty_descriptor_array());
2533
2534   fixed_array_map()->set_code_cache(empty_fixed_array());
2535   fixed_array_map()->set_dependent_code(
2536       DependentCode::cast(empty_fixed_array()));
2537   fixed_array_map()->init_back_pointer(undefined_value());
2538   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2539
2540   oddball_map()->set_code_cache(empty_fixed_array());
2541   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2542   oddball_map()->init_back_pointer(undefined_value());
2543   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2544
2545   // Fix prototype object for existing maps.
2546   meta_map()->set_prototype(null_value());
2547   meta_map()->set_constructor(null_value());
2548
2549   fixed_array_map()->set_prototype(null_value());
2550   fixed_array_map()->set_constructor(null_value());
2551
2552   oddball_map()->set_prototype(null_value());
2553   oddball_map()->set_constructor(null_value());
2554
2555   { MaybeObject* maybe_obj =
2556         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557     if (!maybe_obj->ToObject(&obj)) return false;
2558   }
2559   set_fixed_cow_array_map(Map::cast(obj));
2560   ASSERT(fixed_array_map() != fixed_cow_array_map());
2561
2562   { MaybeObject* maybe_obj =
2563         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2564     if (!maybe_obj->ToObject(&obj)) return false;
2565   }
2566   set_scope_info_map(Map::cast(obj));
2567
2568   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2569     if (!maybe_obj->ToObject(&obj)) return false;
2570   }
2571   set_heap_number_map(Map::cast(obj));
2572
2573   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2574     if (!maybe_obj->ToObject(&obj)) return false;
2575   }
2576   set_symbol_map(Map::cast(obj));
2577
2578   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2579     if (!maybe_obj->ToObject(&obj)) return false;
2580   }
2581   set_foreign_map(Map::cast(obj));
2582
2583   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2584     const StringTypeTable& entry = string_type_table[i];
2585     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2586       if (!maybe_obj->ToObject(&obj)) return false;
2587     }
2588     roots_[entry.index] = Map::cast(obj);
2589   }
2590
2591   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2592     if (!maybe_obj->ToObject(&obj)) return false;
2593   }
2594   set_undetectable_string_map(Map::cast(obj));
2595   Map::cast(obj)->set_is_undetectable();
2596
2597   { MaybeObject* maybe_obj =
2598         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2599     if (!maybe_obj->ToObject(&obj)) return false;
2600   }
2601   set_undetectable_ascii_string_map(Map::cast(obj));
2602   Map::cast(obj)->set_is_undetectable();
2603
2604   { MaybeObject* maybe_obj =
2605         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2606     if (!maybe_obj->ToObject(&obj)) return false;
2607   }
2608   set_fixed_double_array_map(Map::cast(obj));
2609
2610   { MaybeObject* maybe_obj =
2611         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2612     if (!maybe_obj->ToObject(&obj)) return false;
2613   }
2614   set_byte_array_map(Map::cast(obj));
2615
2616   { MaybeObject* maybe_obj =
2617         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2618     if (!maybe_obj->ToObject(&obj)) return false;
2619   }
2620   set_free_space_map(Map::cast(obj));
2621
2622   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2623     if (!maybe_obj->ToObject(&obj)) return false;
2624   }
2625   set_empty_byte_array(ByteArray::cast(obj));
2626
2627   { MaybeObject* maybe_obj =
2628         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2629     if (!maybe_obj->ToObject(&obj)) return false;
2630   }
2631   set_external_pixel_array_map(Map::cast(obj));
2632
2633   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2634                                          ExternalArray::kAlignedSize);
2635     if (!maybe_obj->ToObject(&obj)) return false;
2636   }
2637   set_external_byte_array_map(Map::cast(obj));
2638
2639   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2640                                          ExternalArray::kAlignedSize);
2641     if (!maybe_obj->ToObject(&obj)) return false;
2642   }
2643   set_external_unsigned_byte_array_map(Map::cast(obj));
2644
2645   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2646                                          ExternalArray::kAlignedSize);
2647     if (!maybe_obj->ToObject(&obj)) return false;
2648   }
2649   set_external_short_array_map(Map::cast(obj));
2650
2651   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2652                                          ExternalArray::kAlignedSize);
2653     if (!maybe_obj->ToObject(&obj)) return false;
2654   }
2655   set_external_unsigned_short_array_map(Map::cast(obj));
2656
2657   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2658                                          ExternalArray::kAlignedSize);
2659     if (!maybe_obj->ToObject(&obj)) return false;
2660   }
2661   set_external_int_array_map(Map::cast(obj));
2662
2663   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2664                                          ExternalArray::kAlignedSize);
2665     if (!maybe_obj->ToObject(&obj)) return false;
2666   }
2667   set_external_unsigned_int_array_map(Map::cast(obj));
2668
2669   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2670                                          ExternalArray::kAlignedSize);
2671     if (!maybe_obj->ToObject(&obj)) return false;
2672   }
2673   set_external_float_array_map(Map::cast(obj));
2674
2675   { MaybeObject* maybe_obj =
2676         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2677     if (!maybe_obj->ToObject(&obj)) return false;
2678   }
2679   set_non_strict_arguments_elements_map(Map::cast(obj));
2680
2681   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2682                                          ExternalArray::kAlignedSize);
2683     if (!maybe_obj->ToObject(&obj)) return false;
2684   }
2685   set_external_double_array_map(Map::cast(obj));
2686
2687   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2688     if (!maybe_obj->ToObject(&obj)) return false;
2689   }
2690   set_empty_external_byte_array(ExternalArray::cast(obj));
2691
2692   { MaybeObject* maybe_obj =
2693         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2694     if (!maybe_obj->ToObject(&obj)) return false;
2695   }
2696   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2697
2698   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2699     if (!maybe_obj->ToObject(&obj)) return false;
2700   }
2701   set_empty_external_short_array(ExternalArray::cast(obj));
2702
2703   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2704       kExternalUnsignedShortArray);
2705     if (!maybe_obj->ToObject(&obj)) return false;
2706   }
2707   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2708
2709   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2710     if (!maybe_obj->ToObject(&obj)) return false;
2711   }
2712   set_empty_external_int_array(ExternalArray::cast(obj));
2713
2714   { MaybeObject* maybe_obj =
2715         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2716     if (!maybe_obj->ToObject(&obj)) return false;
2717   }
2718   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2719
2720   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2721     if (!maybe_obj->ToObject(&obj)) return false;
2722   }
2723   set_empty_external_float_array(ExternalArray::cast(obj));
2724
2725   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2726     if (!maybe_obj->ToObject(&obj)) return false;
2727   }
2728   set_empty_external_double_array(ExternalArray::cast(obj));
2729
2730   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2731     if (!maybe_obj->ToObject(&obj)) return false;
2732   }
2733   set_empty_external_pixel_array(ExternalArray::cast(obj));
2734
2735   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2736     if (!maybe_obj->ToObject(&obj)) return false;
2737   }
2738   set_code_map(Map::cast(obj));
2739
2740   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2741     if (!maybe_obj->ToObject(&obj)) return false;
2742   }
2743   set_cell_map(Map::cast(obj));
2744
2745   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2746                                          PropertyCell::kSize);
2747     if (!maybe_obj->ToObject(&obj)) return false;
2748   }
2749   set_global_property_cell_map(Map::cast(obj));
2750
2751   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2752     if (!maybe_obj->ToObject(&obj)) return false;
2753   }
2754   set_one_pointer_filler_map(Map::cast(obj));
2755
2756   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2757     if (!maybe_obj->ToObject(&obj)) return false;
2758   }
2759   set_two_pointer_filler_map(Map::cast(obj));
2760
2761   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2762     const StructTable& entry = struct_table[i];
2763     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2764       if (!maybe_obj->ToObject(&obj)) return false;
2765     }
2766     roots_[entry.index] = Map::cast(obj);
2767   }
2768
2769   { MaybeObject* maybe_obj =
2770         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2771     if (!maybe_obj->ToObject(&obj)) return false;
2772   }
2773   set_hash_table_map(Map::cast(obj));
2774
2775   { MaybeObject* maybe_obj =
2776         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2777     if (!maybe_obj->ToObject(&obj)) return false;
2778   }
2779   set_function_context_map(Map::cast(obj));
2780
2781   { MaybeObject* maybe_obj =
2782         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2783     if (!maybe_obj->ToObject(&obj)) return false;
2784   }
2785   set_catch_context_map(Map::cast(obj));
2786
2787   { MaybeObject* maybe_obj =
2788         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2789     if (!maybe_obj->ToObject(&obj)) return false;
2790   }
2791   set_with_context_map(Map::cast(obj));
2792
2793   { MaybeObject* maybe_obj =
2794         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2795     if (!maybe_obj->ToObject(&obj)) return false;
2796   }
2797   set_block_context_map(Map::cast(obj));
2798
2799   { MaybeObject* maybe_obj =
2800         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2801     if (!maybe_obj->ToObject(&obj)) return false;
2802   }
2803   set_module_context_map(Map::cast(obj));
2804
2805   { MaybeObject* maybe_obj =
2806         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2807     if (!maybe_obj->ToObject(&obj)) return false;
2808   }
2809   set_global_context_map(Map::cast(obj));
2810
2811   { MaybeObject* maybe_obj =
2812         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2813     if (!maybe_obj->ToObject(&obj)) return false;
2814   }
2815   Map* native_context_map = Map::cast(obj);
2816   native_context_map->set_dictionary_map(true);
2817   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2818   set_native_context_map(native_context_map);
2819
2820   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2821                                          SharedFunctionInfo::kAlignedSize);
2822     if (!maybe_obj->ToObject(&obj)) return false;
2823   }
2824   set_shared_function_info_map(Map::cast(obj));
2825
2826   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2827                                          JSMessageObject::kSize);
2828     if (!maybe_obj->ToObject(&obj)) return false;
2829   }
2830   set_message_object_map(Map::cast(obj));
2831
2832   Map* external_map;
2833   { MaybeObject* maybe_obj =
2834         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2835     if (!maybe_obj->To(&external_map)) return false;
2836   }
2837   external_map->set_is_extensible(false);
2838   set_external_map(external_map);
2839
2840   ASSERT(!InNewSpace(empty_fixed_array()));
2841   return true;
2842 }
2843
2844
2845 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2846   // Statically ensure that it is safe to allocate heap numbers in paged
2847   // spaces.
2848   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2849   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2850
2851   Object* result;
2852   { MaybeObject* maybe_result =
2853         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2854     if (!maybe_result->ToObject(&result)) return maybe_result;
2855   }
2856
2857   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2858   HeapNumber::cast(result)->set_value(value);
2859   return result;
2860 }
2861
2862
2863 MaybeObject* Heap::AllocateHeapNumber(double value) {
2864   // Use general version, if we're forced to always allocate.
2865   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2866
2867   // This version of AllocateHeapNumber is optimized for
2868   // allocation in new space.
2869   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2870   Object* result;
2871   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2872     if (!maybe_result->ToObject(&result)) return maybe_result;
2873   }
2874   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2875   HeapNumber::cast(result)->set_value(value);
2876   return result;
2877 }
2878
2879
2880 MaybeObject* Heap::AllocateCell(Object* value) {
2881   Object* result;
2882   { MaybeObject* maybe_result = AllocateRawCell();
2883     if (!maybe_result->ToObject(&result)) return maybe_result;
2884   }
2885   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2886   Cell::cast(result)->set_value(value);
2887   return result;
2888 }
2889
2890
2891 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2892   Object* result;
2893   MaybeObject* maybe_result = AllocateRawPropertyCell();
2894   if (!maybe_result->ToObject(&result)) return maybe_result;
2895
2896   HeapObject::cast(result)->set_map_no_write_barrier(
2897       global_property_cell_map());
2898   PropertyCell* cell = PropertyCell::cast(result);
2899   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2900                            SKIP_WRITE_BARRIER);
2901   cell->set_value(value);
2902   cell->set_type(Type::None());
2903   maybe_result = cell->SetValueInferType(value);
2904   if (maybe_result->IsFailure()) return maybe_result;
2905   return result;
2906 }
2907
2908
2909 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2910   Box* result;
2911   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2912   if (!maybe_result->To(&result)) return maybe_result;
2913   result->set_value(value);
2914   return result;
2915 }
2916
2917
2918 MaybeObject* Heap::AllocateAllocationSite() {
2919   Object* result;
2920   MaybeObject* maybe_result = Allocate(allocation_site_map(),
2921                                        OLD_POINTER_SPACE);
2922   if (!maybe_result->ToObject(&result)) return maybe_result;
2923   AllocationSite* site = AllocationSite::cast(result);
2924   site->Initialize();
2925
2926   // Link the site
2927   site->set_weak_next(allocation_sites_list());
2928   set_allocation_sites_list(site);
2929   return result;
2930 }
2931
2932
2933 MaybeObject* Heap::CreateOddball(const char* to_string,
2934                                  Object* to_number,
2935                                  byte kind) {
2936   Object* result;
2937   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2938     if (!maybe_result->ToObject(&result)) return maybe_result;
2939   }
2940   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2941 }
2942
2943
2944 bool Heap::CreateApiObjects() {
2945   Object* obj;
2946
2947   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2948     if (!maybe_obj->ToObject(&obj)) return false;
2949   }
2950   // Don't use Smi-only elements optimizations for objects with the neander
2951   // map. There are too many cases where element values are set directly with a
2952   // bottleneck to trap the Smi-only -> fast elements transition, and there
2953   // appears to be no benefit for optimize this case.
2954   Map* new_neander_map = Map::cast(obj);
2955   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2956   set_neander_map(new_neander_map);
2957
2958   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2959     if (!maybe_obj->ToObject(&obj)) return false;
2960   }
2961   Object* elements;
2962   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2963     if (!maybe_elements->ToObject(&elements)) return false;
2964   }
2965   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2966   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2967   set_message_listeners(JSObject::cast(obj));
2968
2969   return true;
2970 }
2971
2972
2973 void Heap::CreateJSEntryStub() {
2974   JSEntryStub stub;
2975   set_js_entry_code(*stub.GetCode(isolate()));
2976 }
2977
2978
2979 void Heap::CreateJSConstructEntryStub() {
2980   JSConstructEntryStub stub;
2981   set_js_construct_entry_code(*stub.GetCode(isolate()));
2982 }
2983
2984
2985 void Heap::CreateFixedStubs() {
2986   // Here we create roots for fixed stubs. They are needed at GC
2987   // for cooking and uncooking (check out frames.cc).
2988   // The eliminates the need for doing dictionary lookup in the
2989   // stub cache for these stubs.
2990   HandleScope scope(isolate());
2991   // gcc-4.4 has problem generating correct code of following snippet:
2992   // {  JSEntryStub stub;
2993   //    js_entry_code_ = *stub.GetCode();
2994   // }
2995   // {  JSConstructEntryStub stub;
2996   //    js_construct_entry_code_ = *stub.GetCode();
2997   // }
2998   // To workaround the problem, make separate functions without inlining.
2999   Heap::CreateJSEntryStub();
3000   Heap::CreateJSConstructEntryStub();
3001
3002   // Create stubs that should be there, so we don't unexpectedly have to
3003   // create them if we need them during the creation of another stub.
3004   // Stub creation mixes raw pointers and handles in an unsafe manner so
3005   // we cannot create stubs while we are creating stubs.
3006   CodeStub::GenerateStubsAheadOfTime(isolate());
3007 }
3008
3009
3010 bool Heap::CreateInitialObjects() {
3011   Object* obj;
3012
3013   // The -0 value must be set before NumberFromDouble works.
3014   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3015     if (!maybe_obj->ToObject(&obj)) return false;
3016   }
3017   set_minus_zero_value(HeapNumber::cast(obj));
3018   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3019
3020   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3021     if (!maybe_obj->ToObject(&obj)) return false;
3022   }
3023   set_nan_value(HeapNumber::cast(obj));
3024
3025   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3026     if (!maybe_obj->ToObject(&obj)) return false;
3027   }
3028   set_infinity_value(HeapNumber::cast(obj));
3029
3030   // The hole has not been created yet, but we want to put something
3031   // predictable in the gaps in the string table, so lets make that Smi zero.
3032   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3033
3034   // Allocate initial string table.
3035   { MaybeObject* maybe_obj =
3036         StringTable::Allocate(this, kInitialStringTableSize);
3037     if (!maybe_obj->ToObject(&obj)) return false;
3038   }
3039   // Don't use set_string_table() due to asserts.
3040   roots_[kStringTableRootIndex] = obj;
3041
3042   // Finish initializing oddballs after creating the string table.
3043   { MaybeObject* maybe_obj =
3044         undefined_value()->Initialize("undefined",
3045                                       nan_value(),
3046                                       Oddball::kUndefined);
3047     if (!maybe_obj->ToObject(&obj)) return false;
3048   }
3049
3050   // Initialize the null_value.
3051   { MaybeObject* maybe_obj =
3052         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3053     if (!maybe_obj->ToObject(&obj)) return false;
3054   }
3055
3056   { MaybeObject* maybe_obj = CreateOddball("true",
3057                                            Smi::FromInt(1),
3058                                            Oddball::kTrue);
3059     if (!maybe_obj->ToObject(&obj)) return false;
3060   }
3061   set_true_value(Oddball::cast(obj));
3062
3063   { MaybeObject* maybe_obj = CreateOddball("false",
3064                                            Smi::FromInt(0),
3065                                            Oddball::kFalse);
3066     if (!maybe_obj->ToObject(&obj)) return false;
3067   }
3068   set_false_value(Oddball::cast(obj));
3069
3070   { MaybeObject* maybe_obj = CreateOddball("hole",
3071                                            Smi::FromInt(-1),
3072                                            Oddball::kTheHole);
3073     if (!maybe_obj->ToObject(&obj)) return false;
3074   }
3075   set_the_hole_value(Oddball::cast(obj));
3076
3077   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3078                                            Smi::FromInt(-1),
3079                                            Oddball::kUninitialized);
3080     if (!maybe_obj->ToObject(&obj)) return false;
3081   }
3082   set_uninitialized_value(Oddball::cast(obj));
3083
3084   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3085                                            Smi::FromInt(-4),
3086                                            Oddball::kArgumentMarker);
3087     if (!maybe_obj->ToObject(&obj)) return false;
3088   }
3089   set_arguments_marker(Oddball::cast(obj));
3090
3091   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3092                                            Smi::FromInt(-2),
3093                                            Oddball::kOther);
3094     if (!maybe_obj->ToObject(&obj)) return false;
3095   }
3096   set_no_interceptor_result_sentinel(obj);
3097
3098   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3099                                            Smi::FromInt(-3),
3100                                            Oddball::kOther);
3101     if (!maybe_obj->ToObject(&obj)) return false;
3102   }
3103   set_termination_exception(obj);
3104
3105   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3106     { MaybeObject* maybe_obj =
3107           InternalizeUtf8String(constant_string_table[i].contents);
3108       if (!maybe_obj->ToObject(&obj)) return false;
3109     }
3110     roots_[constant_string_table[i].index] = String::cast(obj);
3111   }
3112
3113   // Allocate the hidden string which is used to identify the hidden properties
3114   // in JSObjects. The hash code has a special value so that it will not match
3115   // the empty string when searching for the property. It cannot be part of the
3116   // loop above because it needs to be allocated manually with the special
3117   // hash code in place. The hash code for the hidden_string is zero to ensure
3118   // that it will always be at the first entry in property descriptors.
3119   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3120       OneByteVector("", 0), String::kEmptyStringHash);
3121     if (!maybe_obj->ToObject(&obj)) return false;
3122   }
3123   hidden_string_ = String::cast(obj);
3124
3125   // Allocate the code_stubs dictionary. The initial size is set to avoid
3126   // expanding the dictionary during bootstrapping.
3127   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3128     if (!maybe_obj->ToObject(&obj)) return false;
3129   }
3130   set_code_stubs(UnseededNumberDictionary::cast(obj));
3131
3132
3133   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3134   // is set to avoid expanding the dictionary during bootstrapping.
3135   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3136     if (!maybe_obj->ToObject(&obj)) return false;
3137   }
3138   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3139
3140   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3141     if (!maybe_obj->ToObject(&obj)) return false;
3142   }
3143   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3144
3145   set_instanceof_cache_function(Smi::FromInt(0));
3146   set_instanceof_cache_map(Smi::FromInt(0));
3147   set_instanceof_cache_answer(Smi::FromInt(0));
3148
3149   CreateFixedStubs();
3150
3151   // Allocate the dictionary of intrinsic function names.
3152   { MaybeObject* maybe_obj =
3153         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3154     if (!maybe_obj->ToObject(&obj)) return false;
3155   }
3156   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3157                                                                        obj);
3158     if (!maybe_obj->ToObject(&obj)) return false;
3159   }
3160   set_intrinsic_function_names(NameDictionary::cast(obj));
3161
3162   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3163     if (!maybe_obj->ToObject(&obj)) return false;
3164   }
3165   set_number_string_cache(FixedArray::cast(obj));
3166
3167   // Allocate cache for single character one byte strings.
3168   { MaybeObject* maybe_obj =
3169         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3170     if (!maybe_obj->ToObject(&obj)) return false;
3171   }
3172   set_single_character_string_cache(FixedArray::cast(obj));
3173
3174   // Allocate cache for string split.
3175   { MaybeObject* maybe_obj = AllocateFixedArray(
3176       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3177     if (!maybe_obj->ToObject(&obj)) return false;
3178   }
3179   set_string_split_cache(FixedArray::cast(obj));
3180
3181   { MaybeObject* maybe_obj = AllocateFixedArray(
3182       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3183     if (!maybe_obj->ToObject(&obj)) return false;
3184   }
3185   set_regexp_multiple_cache(FixedArray::cast(obj));
3186
3187   // Allocate cache for external strings pointing to native source code.
3188   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3189     if (!maybe_obj->ToObject(&obj)) return false;
3190   }
3191   set_natives_source_cache(FixedArray::cast(obj));
3192
3193   // Allocate object to hold object observation state.
3194   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3195     if (!maybe_obj->ToObject(&obj)) return false;
3196   }
3197   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3198     if (!maybe_obj->ToObject(&obj)) return false;
3199   }
3200   set_observation_state(JSObject::cast(obj));
3201
3202   { MaybeObject* maybe_obj = AllocateSymbol();
3203     if (!maybe_obj->ToObject(&obj)) return false;
3204   }
3205   set_frozen_symbol(Symbol::cast(obj));
3206
3207   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3208     if (!maybe_obj->ToObject(&obj)) return false;
3209   }
3210   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3211   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3212
3213   { MaybeObject* maybe_obj = AllocateSymbol();
3214     if (!maybe_obj->ToObject(&obj)) return false;
3215   }
3216   set_observed_symbol(Symbol::cast(obj));
3217
3218   // Handling of script id generation is in Factory::NewScript.
3219   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3220
3221   // Initialize keyed lookup cache.
3222   isolate_->keyed_lookup_cache()->Clear();
3223
3224   // Initialize context slot cache.
3225   isolate_->context_slot_cache()->Clear();
3226
3227   // Initialize descriptor cache.
3228   isolate_->descriptor_lookup_cache()->Clear();
3229
3230   // Initialize compilation cache.
3231   isolate_->compilation_cache()->Clear();
3232
3233   return true;
3234 }
3235
3236
3237 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3238   RootListIndex writable_roots[] = {
3239     kStoreBufferTopRootIndex,
3240     kStackLimitRootIndex,
3241     kNumberStringCacheRootIndex,
3242     kInstanceofCacheFunctionRootIndex,
3243     kInstanceofCacheMapRootIndex,
3244     kInstanceofCacheAnswerRootIndex,
3245     kCodeStubsRootIndex,
3246     kNonMonomorphicCacheRootIndex,
3247     kPolymorphicCodeCacheRootIndex,
3248     kLastScriptIdRootIndex,
3249     kEmptyScriptRootIndex,
3250     kRealStackLimitRootIndex,
3251     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3252     kConstructStubDeoptPCOffsetRootIndex,
3253     kGetterStubDeoptPCOffsetRootIndex,
3254     kSetterStubDeoptPCOffsetRootIndex,
3255     kStringTableRootIndex,
3256   };
3257
3258   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3259     if (root_index == writable_roots[i])
3260       return true;
3261   }
3262   return false;
3263 }
3264
3265
3266 Object* RegExpResultsCache::Lookup(Heap* heap,
3267                                    String* key_string,
3268                                    Object* key_pattern,
3269                                    ResultsCacheType type) {
3270   FixedArray* cache;
3271   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3272   if (type == STRING_SPLIT_SUBSTRINGS) {
3273     ASSERT(key_pattern->IsString());
3274     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3275     cache = heap->string_split_cache();
3276   } else {
3277     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3278     ASSERT(key_pattern->IsFixedArray());
3279     cache = heap->regexp_multiple_cache();
3280   }
3281
3282   uint32_t hash = key_string->Hash();
3283   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3284       ~(kArrayEntriesPerCacheEntry - 1));
3285   if (cache->get(index + kStringOffset) == key_string &&
3286       cache->get(index + kPatternOffset) == key_pattern) {
3287     return cache->get(index + kArrayOffset);
3288   }
3289   index =
3290       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3291   if (cache->get(index + kStringOffset) == key_string &&
3292       cache->get(index + kPatternOffset) == key_pattern) {
3293     return cache->get(index + kArrayOffset);
3294   }
3295   return Smi::FromInt(0);
3296 }
3297
3298
3299 void RegExpResultsCache::Enter(Heap* heap,
3300                                String* key_string,
3301                                Object* key_pattern,
3302                                FixedArray* value_array,
3303                                ResultsCacheType type) {
3304   FixedArray* cache;
3305   if (!key_string->IsInternalizedString()) return;
3306   if (type == STRING_SPLIT_SUBSTRINGS) {
3307     ASSERT(key_pattern->IsString());
3308     if (!key_pattern->IsInternalizedString()) return;
3309     cache = heap->string_split_cache();
3310   } else {
3311     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3312     ASSERT(key_pattern->IsFixedArray());
3313     cache = heap->regexp_multiple_cache();
3314   }
3315
3316   uint32_t hash = key_string->Hash();
3317   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3318       ~(kArrayEntriesPerCacheEntry - 1));
3319   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3320     cache->set(index + kStringOffset, key_string);
3321     cache->set(index + kPatternOffset, key_pattern);
3322     cache->set(index + kArrayOffset, value_array);
3323   } else {
3324     uint32_t index2 =
3325         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3326     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3327       cache->set(index2 + kStringOffset, key_string);
3328       cache->set(index2 + kPatternOffset, key_pattern);
3329       cache->set(index2 + kArrayOffset, value_array);
3330     } else {
3331       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3332       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3333       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3334       cache->set(index + kStringOffset, key_string);
3335       cache->set(index + kPatternOffset, key_pattern);
3336       cache->set(index + kArrayOffset, value_array);
3337     }
3338   }
3339   // If the array is a reasonably short list of substrings, convert it into a
3340   // list of internalized strings.
3341   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3342     for (int i = 0; i < value_array->length(); i++) {
3343       String* str = String::cast(value_array->get(i));
3344       Object* internalized_str;
3345       MaybeObject* maybe_string = heap->InternalizeString(str);
3346       if (maybe_string->ToObject(&internalized_str)) {
3347         value_array->set(i, internalized_str);
3348       }
3349     }
3350   }
3351   // Convert backing store to a copy-on-write array.
3352   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3353 }
3354
3355
3356 void RegExpResultsCache::Clear(FixedArray* cache) {
3357   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3358     cache->set(i, Smi::FromInt(0));
3359   }
3360 }
3361
3362
3363 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3364   MaybeObject* maybe_obj =
3365       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3366   return maybe_obj;
3367 }
3368
3369
3370 int Heap::FullSizeNumberStringCacheLength() {
3371   // Compute the size of the number string cache based on the max newspace size.
3372   // The number string cache has a minimum size based on twice the initial cache
3373   // size to ensure that it is bigger after being made 'full size'.
3374   int number_string_cache_size = max_semispace_size_ / 512;
3375   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3376                                  Min(0x4000, number_string_cache_size));
3377   // There is a string and a number per entry so the length is twice the number
3378   // of entries.
3379   return number_string_cache_size * 2;
3380 }
3381
3382
3383 void Heap::AllocateFullSizeNumberStringCache() {
3384   // The idea is to have a small number string cache in the snapshot to keep
3385   // boot-time memory usage down.  If we expand the number string cache already
3386   // while creating the snapshot then that didn't work out.
3387   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3388   MaybeObject* maybe_obj =
3389       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3390   Object* new_cache;
3391   if (maybe_obj->ToObject(&new_cache)) {
3392     // We don't bother to repopulate the cache with entries from the old cache.
3393     // It will be repopulated soon enough with new strings.
3394     set_number_string_cache(FixedArray::cast(new_cache));
3395   }
3396   // If allocation fails then we just return without doing anything.  It is only
3397   // a cache, so best effort is OK here.
3398 }
3399
3400
3401 void Heap::FlushNumberStringCache() {
3402   // Flush the number to string cache.
3403   int len = number_string_cache()->length();
3404   for (int i = 0; i < len; i++) {
3405     number_string_cache()->set_undefined(this, i);
3406   }
3407 }
3408
3409
3410 static inline int double_get_hash(double d) {
3411   DoubleRepresentation rep(d);
3412   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3413 }
3414
3415
3416 static inline int smi_get_hash(Smi* smi) {
3417   return smi->value();
3418 }
3419
3420
3421 Object* Heap::GetNumberStringCache(Object* number) {
3422   int hash;
3423   int mask = (number_string_cache()->length() >> 1) - 1;
3424   if (number->IsSmi()) {
3425     hash = smi_get_hash(Smi::cast(number)) & mask;
3426   } else {
3427     hash = double_get_hash(number->Number()) & mask;
3428   }
3429   Object* key = number_string_cache()->get(hash * 2);
3430   if (key == number) {
3431     return String::cast(number_string_cache()->get(hash * 2 + 1));
3432   } else if (key->IsHeapNumber() &&
3433              number->IsHeapNumber() &&
3434              key->Number() == number->Number()) {
3435     return String::cast(number_string_cache()->get(hash * 2 + 1));
3436   }
3437   return undefined_value();
3438 }
3439
3440
3441 void Heap::SetNumberStringCache(Object* number, String* string) {
3442   int hash;
3443   int mask = (number_string_cache()->length() >> 1) - 1;
3444   if (number->IsSmi()) {
3445     hash = smi_get_hash(Smi::cast(number)) & mask;
3446   } else {
3447     hash = double_get_hash(number->Number()) & mask;
3448   }
3449   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3450       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3451     // The first time we have a hash collision, we move to the full sized
3452     // number string cache.
3453     AllocateFullSizeNumberStringCache();
3454     return;
3455   }
3456   number_string_cache()->set(hash * 2, number);
3457   number_string_cache()->set(hash * 2 + 1, string);
3458 }
3459
3460
3461 MaybeObject* Heap::NumberToString(Object* number,
3462                                   bool check_number_string_cache,
3463                                   PretenureFlag pretenure) {
3464   isolate_->counters()->number_to_string_runtime()->Increment();
3465   if (check_number_string_cache) {
3466     Object* cached = GetNumberStringCache(number);
3467     if (cached != undefined_value()) {
3468       return cached;
3469     }
3470   }
3471
3472   char arr[100];
3473   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3474   const char* str;
3475   if (number->IsSmi()) {
3476     int num = Smi::cast(number)->value();
3477     str = IntToCString(num, buffer);
3478   } else {
3479     double num = HeapNumber::cast(number)->value();
3480     str = DoubleToCString(num, buffer);
3481   }
3482
3483   Object* js_string;
3484   MaybeObject* maybe_js_string =
3485       AllocateStringFromOneByte(CStrVector(str), pretenure);
3486   if (maybe_js_string->ToObject(&js_string)) {
3487     SetNumberStringCache(number, String::cast(js_string));
3488   }
3489   return maybe_js_string;
3490 }
3491
3492
3493 MaybeObject* Heap::Uint32ToString(uint32_t value,
3494                                   bool check_number_string_cache) {
3495   Object* number;
3496   MaybeObject* maybe = NumberFromUint32(value);
3497   if (!maybe->To<Object>(&number)) return maybe;
3498   return NumberToString(number, check_number_string_cache);
3499 }
3500
3501
3502 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3503   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3504 }
3505
3506
3507 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3508     ExternalArrayType array_type) {
3509   switch (array_type) {
3510     case kExternalByteArray:
3511       return kExternalByteArrayMapRootIndex;
3512     case kExternalUnsignedByteArray:
3513       return kExternalUnsignedByteArrayMapRootIndex;
3514     case kExternalShortArray:
3515       return kExternalShortArrayMapRootIndex;
3516     case kExternalUnsignedShortArray:
3517       return kExternalUnsignedShortArrayMapRootIndex;
3518     case kExternalIntArray:
3519       return kExternalIntArrayMapRootIndex;
3520     case kExternalUnsignedIntArray:
3521       return kExternalUnsignedIntArrayMapRootIndex;
3522     case kExternalFloatArray:
3523       return kExternalFloatArrayMapRootIndex;
3524     case kExternalDoubleArray:
3525       return kExternalDoubleArrayMapRootIndex;
3526     case kExternalPixelArray:
3527       return kExternalPixelArrayMapRootIndex;
3528     default:
3529       UNREACHABLE();
3530       return kUndefinedValueRootIndex;
3531   }
3532 }
3533
3534 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3535     ElementsKind elementsKind) {
3536   switch (elementsKind) {
3537     case EXTERNAL_BYTE_ELEMENTS:
3538       return kEmptyExternalByteArrayRootIndex;
3539     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3540       return kEmptyExternalUnsignedByteArrayRootIndex;
3541     case EXTERNAL_SHORT_ELEMENTS:
3542       return kEmptyExternalShortArrayRootIndex;
3543     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3544       return kEmptyExternalUnsignedShortArrayRootIndex;
3545     case EXTERNAL_INT_ELEMENTS:
3546       return kEmptyExternalIntArrayRootIndex;
3547     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3548       return kEmptyExternalUnsignedIntArrayRootIndex;
3549     case EXTERNAL_FLOAT_ELEMENTS:
3550       return kEmptyExternalFloatArrayRootIndex;
3551     case EXTERNAL_DOUBLE_ELEMENTS:
3552       return kEmptyExternalDoubleArrayRootIndex;
3553     case EXTERNAL_PIXEL_ELEMENTS:
3554       return kEmptyExternalPixelArrayRootIndex;
3555     default:
3556       UNREACHABLE();
3557       return kUndefinedValueRootIndex;
3558   }
3559 }
3560
3561
3562 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3563   return ExternalArray::cast(
3564       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3565 }
3566
3567
3568
3569
3570 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3571   // We need to distinguish the minus zero value and this cannot be
3572   // done after conversion to int. Doing this by comparing bit
3573   // patterns is faster than using fpclassify() et al.
3574   static const DoubleRepresentation minus_zero(-0.0);
3575
3576   DoubleRepresentation rep(value);
3577   if (rep.bits == minus_zero.bits) {
3578     return AllocateHeapNumber(-0.0, pretenure);
3579   }
3580
3581   int int_value = FastD2I(value);
3582   if (value == int_value && Smi::IsValid(int_value)) {
3583     return Smi::FromInt(int_value);
3584   }
3585
3586   // Materialize the value in the heap.
3587   return AllocateHeapNumber(value, pretenure);
3588 }
3589
3590
3591 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3592   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3593   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3594   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3595   Foreign* result;
3596   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3597   if (!maybe_result->To(&result)) return maybe_result;
3598   result->set_foreign_address(address);
3599   return result;
3600 }
3601
3602
3603 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3604   SharedFunctionInfo* share;
3605   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3606   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3607
3608   // Set pointer fields.
3609   share->set_name(name);
3610   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3611   share->set_code(illegal);
3612   share->set_optimized_code_map(Smi::FromInt(0));
3613   share->set_scope_info(ScopeInfo::Empty(isolate_));
3614   Code* construct_stub =
3615       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3616   share->set_construct_stub(construct_stub);
3617   share->set_instance_class_name(Object_string());
3618   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3619   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3620   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3621   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3622   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3623   share->set_ast_node_count(0);
3624   share->set_counters(0);
3625
3626   // Set integer fields (smi or int, depending on the architecture).
3627   share->set_length(0);
3628   share->set_formal_parameter_count(0);
3629   share->set_expected_nof_properties(0);
3630   share->set_num_literals(0);
3631   share->set_start_position_and_type(0);
3632   share->set_end_position(0);
3633   share->set_function_token_position(0);
3634   // All compiler hints default to false or 0.
3635   share->set_compiler_hints(0);
3636   share->set_opt_count(0);
3637
3638   return share;
3639 }
3640
3641
3642 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3643                                            JSArray* arguments,
3644                                            int start_position,
3645                                            int end_position,
3646                                            Object* script,
3647                                            Object* stack_trace,
3648                                            Object* stack_frames) {
3649   Object* result;
3650   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3651     if (!maybe_result->ToObject(&result)) return maybe_result;
3652   }
3653   JSMessageObject* message = JSMessageObject::cast(result);
3654   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3655   message->initialize_elements();
3656   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3657   message->set_type(type);
3658   message->set_arguments(arguments);
3659   message->set_start_position(start_position);
3660   message->set_end_position(end_position);
3661   message->set_script(script);
3662   message->set_stack_trace(stack_trace);
3663   message->set_stack_frames(stack_frames);
3664   return result;
3665 }
3666
3667
3668
3669 // Returns true for a character in a range.  Both limits are inclusive.
3670 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3671   // This makes uses of the the unsigned wraparound.
3672   return character - from <= to - from;
3673 }
3674
3675
3676 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3677     Heap* heap,
3678     uint16_t c1,
3679     uint16_t c2) {
3680   String* result;
3681   // Numeric strings have a different hash algorithm not known by
3682   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3683   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3684       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3685     return result;
3686   // Now we know the length is 2, we might as well make use of that fact
3687   // when building the new string.
3688   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3689     // We can do this.
3690     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3691     Object* result;
3692     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3693       if (!maybe_result->ToObject(&result)) return maybe_result;
3694     }
3695     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3696     dest[0] = static_cast<uint8_t>(c1);
3697     dest[1] = static_cast<uint8_t>(c2);
3698     return result;
3699   } else {
3700     Object* result;
3701     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3702       if (!maybe_result->ToObject(&result)) return maybe_result;
3703     }
3704     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3705     dest[0] = c1;
3706     dest[1] = c2;
3707     return result;
3708   }
3709 }
3710
3711
3712 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3713   int first_length = first->length();
3714   if (first_length == 0) {
3715     return second;
3716   }
3717
3718   int second_length = second->length();
3719   if (second_length == 0) {
3720     return first;
3721   }
3722
3723   int length = first_length + second_length;
3724
3725   // Optimization for 2-byte strings often used as keys in a decompression
3726   // dictionary.  Check whether we already have the string in the string
3727   // table to prevent creation of many unneccesary strings.
3728   if (length == 2) {
3729     uint16_t c1 = first->Get(0);
3730     uint16_t c2 = second->Get(0);
3731     return MakeOrFindTwoCharacterString(this, c1, c2);
3732   }
3733
3734   bool first_is_one_byte = first->IsOneByteRepresentation();
3735   bool second_is_one_byte = second->IsOneByteRepresentation();
3736   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3737   // Make sure that an out of memory exception is thrown if the length
3738   // of the new cons string is too large.
3739   if (length > String::kMaxLength || length < 0) {
3740     isolate()->context()->mark_out_of_memory();
3741     return Failure::OutOfMemoryException(0x4);
3742   }
3743
3744   bool is_one_byte_data_in_two_byte_string = false;
3745   if (!is_one_byte) {
3746     // At least one of the strings uses two-byte representation so we
3747     // can't use the fast case code for short ASCII strings below, but
3748     // we can try to save memory if all chars actually fit in ASCII.
3749     is_one_byte_data_in_two_byte_string =
3750         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3751     if (is_one_byte_data_in_two_byte_string) {
3752       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3753     }
3754   }
3755
3756   // If the resulting string is small make a flat string.
3757   if (length < ConsString::kMinLength) {
3758     // Note that neither of the two inputs can be a slice because:
3759     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3760     ASSERT(first->IsFlat());
3761     ASSERT(second->IsFlat());
3762     if (is_one_byte) {
3763       Object* result;
3764       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3765         if (!maybe_result->ToObject(&result)) return maybe_result;
3766       }
3767       // Copy the characters into the new object.
3768       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3769       // Copy first part.
3770       const uint8_t* src;
3771       if (first->IsExternalString()) {
3772         src = ExternalAsciiString::cast(first)->GetChars();
3773       } else {
3774         src = SeqOneByteString::cast(first)->GetChars();
3775       }
3776       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3777       // Copy second part.
3778       if (second->IsExternalString()) {
3779         src = ExternalAsciiString::cast(second)->GetChars();
3780       } else {
3781         src = SeqOneByteString::cast(second)->GetChars();
3782       }
3783       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3784       return result;
3785     } else {
3786       if (is_one_byte_data_in_two_byte_string) {
3787         Object* result;
3788         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3789           if (!maybe_result->ToObject(&result)) return maybe_result;
3790         }
3791         // Copy the characters into the new object.
3792         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3793         String::WriteToFlat(first, dest, 0, first_length);
3794         String::WriteToFlat(second, dest + first_length, 0, second_length);
3795         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3796         return result;
3797       }
3798
3799       Object* result;
3800       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3801         if (!maybe_result->ToObject(&result)) return maybe_result;
3802       }
3803       // Copy the characters into the new object.
3804       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3805       String::WriteToFlat(first, dest, 0, first_length);
3806       String::WriteToFlat(second, dest + first_length, 0, second_length);
3807       return result;
3808     }
3809   }
3810
3811   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3812       cons_ascii_string_map() : cons_string_map();
3813
3814   Object* result;
3815   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3816     if (!maybe_result->ToObject(&result)) return maybe_result;
3817   }
3818
3819   DisallowHeapAllocation no_gc;
3820   ConsString* cons_string = ConsString::cast(result);
3821   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3822   cons_string->set_length(length);
3823   cons_string->set_hash_field(String::kEmptyHashField);
3824   cons_string->set_first(first, mode);
3825   cons_string->set_second(second, mode);
3826   return result;
3827 }
3828
3829
3830 MaybeObject* Heap::AllocateSubString(String* buffer,
3831                                      int start,
3832                                      int end,
3833                                      PretenureFlag pretenure) {
3834   int length = end - start;
3835   if (length <= 0) {
3836     return empty_string();
3837   } else if (length == 1) {
3838     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3839   } else if (length == 2) {
3840     // Optimization for 2-byte strings often used as keys in a decompression
3841     // dictionary.  Check whether we already have the string in the string
3842     // table to prevent creation of many unnecessary strings.
3843     uint16_t c1 = buffer->Get(start);
3844     uint16_t c2 = buffer->Get(start + 1);
3845     return MakeOrFindTwoCharacterString(this, c1, c2);
3846   }
3847
3848   // Make an attempt to flatten the buffer to reduce access time.
3849   buffer = buffer->TryFlattenGetString();
3850
3851   if (!FLAG_string_slices ||
3852       !buffer->IsFlat() ||
3853       length < SlicedString::kMinLength ||
3854       pretenure == TENURED) {
3855     Object* result;
3856     // WriteToFlat takes care of the case when an indirect string has a
3857     // different encoding from its underlying string.  These encodings may
3858     // differ because of externalization.
3859     bool is_one_byte = buffer->IsOneByteRepresentation();
3860     { MaybeObject* maybe_result = is_one_byte
3861                                   ? AllocateRawOneByteString(length, pretenure)
3862                                   : AllocateRawTwoByteString(length, pretenure);
3863       if (!maybe_result->ToObject(&result)) return maybe_result;
3864     }
3865     String* string_result = String::cast(result);
3866     // Copy the characters into the new object.
3867     if (is_one_byte) {
3868       ASSERT(string_result->IsOneByteRepresentation());
3869       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3870       String::WriteToFlat(buffer, dest, start, end);
3871     } else {
3872       ASSERT(string_result->IsTwoByteRepresentation());
3873       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3874       String::WriteToFlat(buffer, dest, start, end);
3875     }
3876     return result;
3877   }
3878
3879   ASSERT(buffer->IsFlat());
3880 #if VERIFY_HEAP
3881   if (FLAG_verify_heap) {
3882     buffer->StringVerify();
3883   }
3884 #endif
3885
3886   Object* result;
3887   // When slicing an indirect string we use its encoding for a newly created
3888   // slice and don't check the encoding of the underlying string.  This is safe
3889   // even if the encodings are different because of externalization.  If an
3890   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3891   // codes of the underlying string must still fit into ASCII (because
3892   // externalization must not change char codes).
3893   { Map* map = buffer->IsOneByteRepresentation()
3894                  ? sliced_ascii_string_map()
3895                  : sliced_string_map();
3896     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3897     if (!maybe_result->ToObject(&result)) return maybe_result;
3898   }
3899
3900   DisallowHeapAllocation no_gc;
3901   SlicedString* sliced_string = SlicedString::cast(result);
3902   sliced_string->set_length(length);
3903   sliced_string->set_hash_field(String::kEmptyHashField);
3904   if (buffer->IsConsString()) {
3905     ConsString* cons = ConsString::cast(buffer);
3906     ASSERT(cons->second()->length() == 0);
3907     sliced_string->set_parent(cons->first());
3908     sliced_string->set_offset(start);
3909   } else if (buffer->IsSlicedString()) {
3910     // Prevent nesting sliced strings.
3911     SlicedString* parent_slice = SlicedString::cast(buffer);
3912     sliced_string->set_parent(parent_slice->parent());
3913     sliced_string->set_offset(start + parent_slice->offset());
3914   } else {
3915     sliced_string->set_parent(buffer);
3916     sliced_string->set_offset(start);
3917   }
3918   ASSERT(sliced_string->parent()->IsSeqString() ||
3919          sliced_string->parent()->IsExternalString());
3920   return result;
3921 }
3922
3923
3924 MaybeObject* Heap::AllocateExternalStringFromAscii(
3925     const ExternalAsciiString::Resource* resource) {
3926   size_t length = resource->length();
3927   if (length > static_cast<size_t>(String::kMaxLength)) {
3928     isolate()->context()->mark_out_of_memory();
3929     return Failure::OutOfMemoryException(0x5);
3930   }
3931
3932   Map* map = external_ascii_string_map();
3933   Object* result;
3934   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3935     if (!maybe_result->ToObject(&result)) return maybe_result;
3936   }
3937
3938   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3939   external_string->set_length(static_cast<int>(length));
3940   external_string->set_hash_field(String::kEmptyHashField);
3941   external_string->set_resource(resource);
3942
3943   return result;
3944 }
3945
3946
3947 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3948     const ExternalTwoByteString::Resource* resource) {
3949   size_t length = resource->length();
3950   if (length > static_cast<size_t>(String::kMaxLength)) {
3951     isolate()->context()->mark_out_of_memory();
3952     return Failure::OutOfMemoryException(0x6);
3953   }
3954
3955   // For small strings we check whether the resource contains only
3956   // one byte characters.  If yes, we use a different string map.
3957   static const size_t kOneByteCheckLengthLimit = 32;
3958   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3959       String::IsOneByte(resource->data(), static_cast<int>(length));
3960   Map* map = is_one_byte ?
3961       external_string_with_one_byte_data_map() : external_string_map();
3962   Object* result;
3963   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3964     if (!maybe_result->ToObject(&result)) return maybe_result;
3965   }
3966
3967   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3968   external_string->set_length(static_cast<int>(length));
3969   external_string->set_hash_field(String::kEmptyHashField);
3970   external_string->set_resource(resource);
3971
3972   return result;
3973 }
3974
3975
3976 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3977   if (code <= String::kMaxOneByteCharCode) {
3978     Object* value = single_character_string_cache()->get(code);
3979     if (value != undefined_value()) return value;
3980
3981     uint8_t buffer[1];
3982     buffer[0] = static_cast<uint8_t>(code);
3983     Object* result;
3984     MaybeObject* maybe_result =
3985         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3986
3987     if (!maybe_result->ToObject(&result)) return maybe_result;
3988     single_character_string_cache()->set(code, result);
3989     return result;
3990   }
3991
3992   Object* result;
3993   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3994     if (!maybe_result->ToObject(&result)) return maybe_result;
3995   }
3996   String* answer = String::cast(result);
3997   answer->Set(0, code);
3998   return answer;
3999 }
4000
4001
4002 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4003   if (length < 0 || length > ByteArray::kMaxLength) {
4004     return Failure::OutOfMemoryException(0x7);
4005   }
4006   if (pretenure == NOT_TENURED) {
4007     return AllocateByteArray(length);
4008   }
4009   int size = ByteArray::SizeFor(length);
4010   Object* result;
4011   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4012                    ? old_data_space_->AllocateRaw(size)
4013                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4014     if (!maybe_result->ToObject(&result)) return maybe_result;
4015   }
4016
4017   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4018       byte_array_map());
4019   reinterpret_cast<ByteArray*>(result)->set_length(length);
4020   return result;
4021 }
4022
4023
4024 MaybeObject* Heap::AllocateByteArray(int length) {
4025   if (length < 0 || length > ByteArray::kMaxLength) {
4026     return Failure::OutOfMemoryException(0x8);
4027   }
4028   int size = ByteArray::SizeFor(length);
4029   AllocationSpace space =
4030       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4031   Object* result;
4032   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4033     if (!maybe_result->ToObject(&result)) return maybe_result;
4034   }
4035
4036   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4037       byte_array_map());
4038   reinterpret_cast<ByteArray*>(result)->set_length(length);
4039   return result;
4040 }
4041
4042
4043 void Heap::CreateFillerObjectAt(Address addr, int size) {
4044   if (size == 0) return;
4045   HeapObject* filler = HeapObject::FromAddress(addr);
4046   if (size == kPointerSize) {
4047     filler->set_map_no_write_barrier(one_pointer_filler_map());
4048   } else if (size == 2 * kPointerSize) {
4049     filler->set_map_no_write_barrier(two_pointer_filler_map());
4050   } else {
4051     filler->set_map_no_write_barrier(free_space_map());
4052     FreeSpace::cast(filler)->set_size(size);
4053   }
4054 }
4055
4056
4057 MaybeObject* Heap::AllocateExternalArray(int length,
4058                                          ExternalArrayType array_type,
4059                                          void* external_pointer,
4060                                          PretenureFlag pretenure) {
4061   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4062   Object* result;
4063   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4064                                             space,
4065                                             OLD_DATA_SPACE);
4066     if (!maybe_result->ToObject(&result)) return maybe_result;
4067   }
4068
4069   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4070       MapForExternalArrayType(array_type));
4071   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4072   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4073       external_pointer);
4074
4075   return result;
4076 }
4077
4078
4079 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4080                               Code::Flags flags,
4081                               Handle<Object> self_reference,
4082                               bool immovable,
4083                               bool crankshafted) {
4084   // Allocate ByteArray before the Code object, so that we do not risk
4085   // leaving uninitialized Code object (and breaking the heap).
4086   ByteArray* reloc_info;
4087   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4088   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4089
4090   // Compute size.
4091   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4092   int obj_size = Code::SizeFor(body_size);
4093   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4094   MaybeObject* maybe_result;
4095   // Large code objects and code objects which should stay at a fixed address
4096   // are allocated in large object space.
4097   HeapObject* result;
4098   bool force_lo_space = obj_size > code_space()->AreaSize();
4099   if (force_lo_space) {
4100     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4101   } else {
4102     maybe_result = code_space_->AllocateRaw(obj_size);
4103   }
4104   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4105
4106   if (immovable && !force_lo_space &&
4107       // Objects on the first page of each space are never moved.
4108       !code_space_->FirstPage()->Contains(result->address())) {
4109     // Discard the first code allocation, which was on a page where it could be
4110     // moved.
4111     CreateFillerObjectAt(result->address(), obj_size);
4112     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4113     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4114   }
4115
4116   // Initialize the object
4117   result->set_map_no_write_barrier(code_map());
4118   Code* code = Code::cast(result);
4119   ASSERT(!isolate_->code_range()->exists() ||
4120       isolate_->code_range()->contains(code->address()));
4121   code->set_instruction_size(desc.instr_size);
4122   code->set_relocation_info(reloc_info);
4123   code->set_flags(flags);
4124   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4125     code->set_check_type(RECEIVER_MAP_CHECK);
4126   }
4127   code->set_is_crankshafted(crankshafted);
4128   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4129   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4130   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4131   code->set_gc_metadata(Smi::FromInt(0));
4132   code->set_ic_age(global_ic_age_);
4133   code->set_prologue_offset(kPrologueOffsetNotSet);
4134   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4135     code->set_marked_for_deoptimization(false);
4136   }
4137   // Allow self references to created code object by patching the handle to
4138   // point to the newly allocated Code object.
4139   if (!self_reference.is_null()) {
4140     *(self_reference.location()) = code;
4141   }
4142   // Migrate generated code.
4143   // The generated code can contain Object** values (typically from handles)
4144   // that are dereferenced during the copy to point directly to the actual heap
4145   // objects. These pointers can include references to the code object itself,
4146   // through the self_reference parameter.
4147   code->CopyFrom(desc);
4148
4149 #ifdef VERIFY_HEAP
4150   if (FLAG_verify_heap) {
4151     code->Verify();
4152   }
4153 #endif
4154   return code;
4155 }
4156
4157
4158 MaybeObject* Heap::CopyCode(Code* code) {
4159   // Allocate an object the same size as the code object.
4160   int obj_size = code->Size();
4161   MaybeObject* maybe_result;
4162   if (obj_size > code_space()->AreaSize()) {
4163     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4164   } else {
4165     maybe_result = code_space_->AllocateRaw(obj_size);
4166   }
4167
4168   Object* result;
4169   if (!maybe_result->ToObject(&result)) return maybe_result;
4170
4171   // Copy code object.
4172   Address old_addr = code->address();
4173   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4174   CopyBlock(new_addr, old_addr, obj_size);
4175   // Relocate the copy.
4176   Code* new_code = Code::cast(result);
4177   ASSERT(!isolate_->code_range()->exists() ||
4178       isolate_->code_range()->contains(code->address()));
4179   new_code->Relocate(new_addr - old_addr);
4180   return new_code;
4181 }
4182
4183
4184 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4185   // Allocate ByteArray before the Code object, so that we do not risk
4186   // leaving uninitialized Code object (and breaking the heap).
4187   Object* reloc_info_array;
4188   { MaybeObject* maybe_reloc_info_array =
4189         AllocateByteArray(reloc_info.length(), TENURED);
4190     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4191       return maybe_reloc_info_array;
4192     }
4193   }
4194
4195   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4196
4197   int new_obj_size = Code::SizeFor(new_body_size);
4198
4199   Address old_addr = code->address();
4200
4201   size_t relocation_offset =
4202       static_cast<size_t>(code->instruction_end() - old_addr);
4203
4204   MaybeObject* maybe_result;
4205   if (new_obj_size > code_space()->AreaSize()) {
4206     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4207   } else {
4208     maybe_result = code_space_->AllocateRaw(new_obj_size);
4209   }
4210
4211   Object* result;
4212   if (!maybe_result->ToObject(&result)) return maybe_result;
4213
4214   // Copy code object.
4215   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4216
4217   // Copy header and instructions.
4218   CopyBytes(new_addr, old_addr, relocation_offset);
4219
4220   Code* new_code = Code::cast(result);
4221   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4222
4223   // Copy patched rinfo.
4224   CopyBytes(new_code->relocation_start(),
4225             reloc_info.start(),
4226             static_cast<size_t>(reloc_info.length()));
4227
4228   // Relocate the copy.
4229   ASSERT(!isolate_->code_range()->exists() ||
4230       isolate_->code_range()->contains(code->address()));
4231   new_code->Relocate(new_addr - old_addr);
4232
4233 #ifdef VERIFY_HEAP
4234   if (FLAG_verify_heap) {
4235     code->Verify();
4236   }
4237 #endif
4238   return new_code;
4239 }
4240
4241
4242 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4243     Handle<AllocationSite> allocation_site) {
4244   ASSERT(gc_state_ == NOT_IN_GC);
4245   ASSERT(map->instance_type() != MAP_TYPE);
4246   // If allocation failures are disallowed, we may allocate in a different
4247   // space when new space is full and the object is not a large object.
4248   AllocationSpace retry_space =
4249       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4250   int size = map->instance_size() + AllocationMemento::kSize;
4251   Object* result;
4252   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4253   if (!maybe_result->ToObject(&result)) return maybe_result;
4254   // No need for write barrier since object is white and map is in old space.
4255   HeapObject::cast(result)->set_map_no_write_barrier(map);
4256   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4257       reinterpret_cast<Address>(result) + map->instance_size());
4258   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4259   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4260   return result;
4261 }
4262
4263
4264 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4265   ASSERT(gc_state_ == NOT_IN_GC);
4266   ASSERT(map->instance_type() != MAP_TYPE);
4267   // If allocation failures are disallowed, we may allocate in a different
4268   // space when new space is full and the object is not a large object.
4269   AllocationSpace retry_space =
4270       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4271   int size = map->instance_size();
4272   Object* result;
4273   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4274   if (!maybe_result->ToObject(&result)) return maybe_result;
4275   // No need for write barrier since object is white and map is in old space.
4276   HeapObject::cast(result)->set_map_no_write_barrier(map);
4277   return result;
4278 }
4279
4280
4281 void Heap::InitializeFunction(JSFunction* function,
4282                               SharedFunctionInfo* shared,
4283                               Object* prototype) {
4284   ASSERT(!prototype->IsMap());
4285   function->initialize_properties();
4286   function->initialize_elements();
4287   function->set_shared(shared);
4288   function->set_code(shared->code());
4289   function->set_prototype_or_initial_map(prototype);
4290   function->set_context(undefined_value());
4291   function->set_literals_or_bindings(empty_fixed_array());
4292   function->set_next_function_link(undefined_value());
4293 }
4294
4295
4296 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4297   // Make sure to use globals from the function's context, since the function
4298   // can be from a different context.
4299   Context* native_context = function->context()->native_context();
4300   Map* new_map;
4301   if (function->shared()->is_generator()) {
4302     // Generator prototypes can share maps since they don't have "constructor"
4303     // properties.
4304     new_map = native_context->generator_object_prototype_map();
4305   } else {
4306     // Each function prototype gets a fresh map to avoid unwanted sharing of
4307     // maps between prototypes of different constructors.
4308     JSFunction* object_function = native_context->object_function();
4309     ASSERT(object_function->has_initial_map());
4310     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4311     if (!maybe_map->To(&new_map)) return maybe_map;
4312   }
4313
4314   Object* prototype;
4315   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4316   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4317
4318   if (!function->shared()->is_generator()) {
4319     MaybeObject* maybe_failure =
4320         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4321             constructor_string(), function, DONT_ENUM);
4322     if (maybe_failure->IsFailure()) return maybe_failure;
4323   }
4324
4325   return prototype;
4326 }
4327
4328
4329 MaybeObject* Heap::AllocateFunction(Map* function_map,
4330                                     SharedFunctionInfo* shared,
4331                                     Object* prototype,
4332                                     PretenureFlag pretenure) {
4333   AllocationSpace space =
4334       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4335   Object* result;
4336   { MaybeObject* maybe_result = Allocate(function_map, space);
4337     if (!maybe_result->ToObject(&result)) return maybe_result;
4338   }
4339   InitializeFunction(JSFunction::cast(result), shared, prototype);
4340   return result;
4341 }
4342
4343
4344 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4345   // To get fast allocation and map sharing for arguments objects we
4346   // allocate them based on an arguments boilerplate.
4347
4348   JSObject* boilerplate;
4349   int arguments_object_size;
4350   bool strict_mode_callee = callee->IsJSFunction() &&
4351       !JSFunction::cast(callee)->shared()->is_classic_mode();
4352   if (strict_mode_callee) {
4353     boilerplate =
4354         isolate()->context()->native_context()->
4355             strict_mode_arguments_boilerplate();
4356     arguments_object_size = kArgumentsObjectSizeStrict;
4357   } else {
4358     boilerplate =
4359         isolate()->context()->native_context()->arguments_boilerplate();
4360     arguments_object_size = kArgumentsObjectSize;
4361   }
4362
4363   // This calls Copy directly rather than using Heap::AllocateRaw so we
4364   // duplicate the check here.
4365   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4366
4367   // Check that the size of the boilerplate matches our
4368   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4369   // on the size being a known constant.
4370   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4371
4372   // Do the allocation.
4373   Object* result;
4374   { MaybeObject* maybe_result =
4375         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4376     if (!maybe_result->ToObject(&result)) return maybe_result;
4377   }
4378
4379   // Copy the content. The arguments boilerplate doesn't have any
4380   // fields that point to new space so it's safe to skip the write
4381   // barrier here.
4382   CopyBlock(HeapObject::cast(result)->address(),
4383             boilerplate->address(),
4384             JSObject::kHeaderSize);
4385
4386   // Set the length property.
4387   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4388                                                 Smi::FromInt(length),
4389                                                 SKIP_WRITE_BARRIER);
4390   // Set the callee property for non-strict mode arguments object only.
4391   if (!strict_mode_callee) {
4392     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4393                                                   callee);
4394   }
4395
4396   // Check the state of the object
4397   ASSERT(JSObject::cast(result)->HasFastProperties());
4398   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4399
4400   return result;
4401 }
4402
4403
4404 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4405   ASSERT(!fun->has_initial_map());
4406
4407   // First create a new map with the size and number of in-object properties
4408   // suggested by the function.
4409   InstanceType instance_type;
4410   int instance_size;
4411   int in_object_properties;
4412   if (fun->shared()->is_generator()) {
4413     instance_type = JS_GENERATOR_OBJECT_TYPE;
4414     instance_size = JSGeneratorObject::kSize;
4415     in_object_properties = 0;
4416   } else {
4417     instance_type = JS_OBJECT_TYPE;
4418     instance_size = fun->shared()->CalculateInstanceSize();
4419     in_object_properties = fun->shared()->CalculateInObjectProperties();
4420   }
4421   Map* map;
4422   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4423   if (!maybe_map->To(&map)) return maybe_map;
4424
4425   // Fetch or allocate prototype.
4426   Object* prototype;
4427   if (fun->has_instance_prototype()) {
4428     prototype = fun->instance_prototype();
4429   } else {
4430     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4431     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4432   }
4433   map->set_inobject_properties(in_object_properties);
4434   map->set_unused_property_fields(in_object_properties);
4435   map->set_prototype(prototype);
4436   ASSERT(map->has_fast_object_elements());
4437
4438   if (!fun->shared()->is_generator()) {
4439     fun->shared()->StartInobjectSlackTracking(map);
4440   }
4441
4442   return map;
4443 }
4444
4445
4446 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4447                                      FixedArray* properties,
4448                                      Map* map) {
4449   obj->set_properties(properties);
4450   obj->initialize_elements();
4451   // TODO(1240798): Initialize the object's body using valid initial values
4452   // according to the object's initial map.  For example, if the map's
4453   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4454   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4455   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4456   // verification code has to cope with (temporarily) invalid objects.  See
4457   // for example, JSArray::JSArrayVerify).
4458   Object* filler;
4459   // We cannot always fill with one_pointer_filler_map because objects
4460   // created from API functions expect their internal fields to be initialized
4461   // with undefined_value.
4462   // Pre-allocated fields need to be initialized with undefined_value as well
4463   // so that object accesses before the constructor completes (e.g. in the
4464   // debugger) will not cause a crash.
4465   if (map->constructor()->IsJSFunction() &&
4466       JSFunction::cast(map->constructor())->shared()->
4467           IsInobjectSlackTrackingInProgress()) {
4468     // We might want to shrink the object later.
4469     ASSERT(obj->GetInternalFieldCount() == 0);
4470     filler = Heap::one_pointer_filler_map();
4471   } else {
4472     filler = Heap::undefined_value();
4473   }
4474   obj->InitializeBody(map, Heap::undefined_value(), filler);
4475 }
4476
4477
4478 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4479   // JSFunctions should be allocated using AllocateFunction to be
4480   // properly initialized.
4481   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4482
4483   // Both types of global objects should be allocated using
4484   // AllocateGlobalObject to be properly initialized.
4485   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4486   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4487
4488   // Allocate the backing storage for the properties.
4489   int prop_size = map->InitialPropertiesLength();
4490   ASSERT(prop_size >= 0);
4491   Object* properties;
4492   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4493     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4494   }
4495
4496   // Allocate the JSObject.
4497   AllocationSpace space =
4498       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4499   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4500   Object* obj;
4501   MaybeObject* maybe_obj = Allocate(map, space);
4502   if (!maybe_obj->To(&obj)) return maybe_obj;
4503
4504   // Initialize the JSObject.
4505   InitializeJSObjectFromMap(JSObject::cast(obj),
4506                             FixedArray::cast(properties),
4507                             map);
4508   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4509          JSObject::cast(obj)->HasExternalArrayElements());
4510   return obj;
4511 }
4512
4513
4514 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4515     Handle<AllocationSite> allocation_site) {
4516   // JSFunctions should be allocated using AllocateFunction to be
4517   // properly initialized.
4518   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4519
4520   // Both types of global objects should be allocated using
4521   // AllocateGlobalObject to be properly initialized.
4522   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4523   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4524
4525   // Allocate the backing storage for the properties.
4526   int prop_size = map->InitialPropertiesLength();
4527   ASSERT(prop_size >= 0);
4528   Object* properties;
4529   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4530     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4531   }
4532
4533   // Allocate the JSObject.
4534   AllocationSpace space = NEW_SPACE;
4535   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4536   Object* obj;
4537   MaybeObject* maybe_obj =
4538       AllocateWithAllocationSite(map, space, allocation_site);
4539   if (!maybe_obj->To(&obj)) return maybe_obj;
4540
4541   // Initialize the JSObject.
4542   InitializeJSObjectFromMap(JSObject::cast(obj),
4543                             FixedArray::cast(properties),
4544                             map);
4545   ASSERT(JSObject::cast(obj)->HasFastElements());
4546   return obj;
4547 }
4548
4549
4550 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4551                                     PretenureFlag pretenure) {
4552   // Allocate the initial map if absent.
4553   if (!constructor->has_initial_map()) {
4554     Object* initial_map;
4555     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4556       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4557     }
4558     constructor->set_initial_map(Map::cast(initial_map));
4559     Map::cast(initial_map)->set_constructor(constructor);
4560   }
4561   // Allocate the object based on the constructors initial map.
4562   MaybeObject* result = AllocateJSObjectFromMap(
4563       constructor->initial_map(), pretenure);
4564 #ifdef DEBUG
4565   // Make sure result is NOT a global object if valid.
4566   Object* non_failure;
4567   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4568 #endif
4569   return result;
4570 }
4571
4572
4573 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4574     Handle<AllocationSite> allocation_site) {
4575   // Allocate the initial map if absent.
4576   if (!constructor->has_initial_map()) {
4577     Object* initial_map;
4578     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4579       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4580     }
4581     constructor->set_initial_map(Map::cast(initial_map));
4582     Map::cast(initial_map)->set_constructor(constructor);
4583   }
4584   // Allocate the object based on the constructors initial map, or the payload
4585   // advice
4586   Map* initial_map = constructor->initial_map();
4587
4588   Smi* smi = Smi::cast(allocation_site->transition_info());
4589   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4590   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4591   if (to_kind != initial_map->elements_kind()) {
4592     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4593     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4594     // Possibly alter the mode, since we found an updated elements kind
4595     // in the type info cell.
4596     mode = AllocationSite::GetMode(to_kind);
4597   }
4598
4599   MaybeObject* result;
4600   if (mode == TRACK_ALLOCATION_SITE) {
4601     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4602         allocation_site);
4603   } else {
4604     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4605   }
4606 #ifdef DEBUG
4607   // Make sure result is NOT a global object if valid.
4608   Object* non_failure;
4609   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4610 #endif
4611   return result;
4612 }
4613
4614
4615 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4616   ASSERT(function->shared()->is_generator());
4617   Map *map;
4618   if (function->has_initial_map()) {
4619     map = function->initial_map();
4620   } else {
4621     // Allocate the initial map if absent.
4622     MaybeObject* maybe_map = AllocateInitialMap(function);
4623     if (!maybe_map->To(&map)) return maybe_map;
4624     function->set_initial_map(map);
4625     map->set_constructor(function);
4626   }
4627   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4628   return AllocateJSObjectFromMap(map);
4629 }
4630
4631
4632 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4633   // Allocate a fresh map. Modules do not have a prototype.
4634   Map* map;
4635   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4636   if (!maybe_map->To(&map)) return maybe_map;
4637   // Allocate the object based on the map.
4638   JSModule* module;
4639   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4640   if (!maybe_module->To(&module)) return maybe_module;
4641   module->set_context(context);
4642   module->set_scope_info(scope_info);
4643   return module;
4644 }
4645
4646
4647 MaybeObject* Heap::AllocateJSArrayAndStorage(
4648     ElementsKind elements_kind,
4649     int length,
4650     int capacity,
4651     ArrayStorageAllocationMode mode,
4652     PretenureFlag pretenure) {
4653   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4654   JSArray* array;
4655   if (!maybe_array->To(&array)) return maybe_array;
4656
4657   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4658   // for performance reasons.
4659   ASSERT(capacity >= length);
4660
4661   if (capacity == 0) {
4662     array->set_length(Smi::FromInt(0));
4663     array->set_elements(empty_fixed_array());
4664     return array;
4665   }
4666
4667   FixedArrayBase* elms;
4668   MaybeObject* maybe_elms = NULL;
4669   if (IsFastDoubleElementsKind(elements_kind)) {
4670     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4671       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4672     } else {
4673       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4674       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4675     }
4676   } else {
4677     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4678     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4679       maybe_elms = AllocateUninitializedFixedArray(capacity);
4680     } else {
4681       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4682       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4683     }
4684   }
4685   if (!maybe_elms->To(&elms)) return maybe_elms;
4686
4687   array->set_elements(elms);
4688   array->set_length(Smi::FromInt(length));
4689   return array;
4690 }
4691
4692
4693 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4694     ElementsKind elements_kind,
4695     int length,
4696     int capacity,
4697     Handle<AllocationSite> allocation_site,
4698     ArrayStorageAllocationMode mode) {
4699   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4700       allocation_site);
4701   JSArray* array;
4702   if (!maybe_array->To(&array)) return maybe_array;
4703   return AllocateJSArrayStorage(array, length, capacity, mode);
4704 }
4705
4706
4707 MaybeObject* Heap::AllocateJSArrayStorage(
4708     JSArray* array,
4709     int length,
4710     int capacity,
4711     ArrayStorageAllocationMode mode) {
4712   ASSERT(capacity >= length);
4713
4714   if (capacity == 0) {
4715     array->set_length(Smi::FromInt(0));
4716     array->set_elements(empty_fixed_array());
4717     return array;
4718   }
4719
4720   FixedArrayBase* elms;
4721   MaybeObject* maybe_elms = NULL;
4722   ElementsKind elements_kind = array->GetElementsKind();
4723   if (IsFastDoubleElementsKind(elements_kind)) {
4724     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4725       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4726     } else {
4727       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4728       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4729     }
4730   } else {
4731     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4732     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4733       maybe_elms = AllocateUninitializedFixedArray(capacity);
4734     } else {
4735       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4736       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4737     }
4738   }
4739   if (!maybe_elms->To(&elms)) return maybe_elms;
4740
4741   array->set_elements(elms);
4742   array->set_length(Smi::FromInt(length));
4743   return array;
4744 }
4745
4746
4747 MaybeObject* Heap::AllocateJSArrayWithElements(
4748     FixedArrayBase* elements,
4749     ElementsKind elements_kind,
4750     int length,
4751     PretenureFlag pretenure) {
4752   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4753   JSArray* array;
4754   if (!maybe_array->To(&array)) return maybe_array;
4755
4756   array->set_elements(elements);
4757   array->set_length(Smi::FromInt(length));
4758   array->ValidateElements();
4759   return array;
4760 }
4761
4762
4763 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4764   // Allocate map.
4765   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4766   // maps. Will probably depend on the identity of the handler object, too.
4767   Map* map;
4768   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4769   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4770   map->set_prototype(prototype);
4771
4772   // Allocate the proxy object.
4773   JSProxy* result;
4774   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4775   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4776   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4777   result->set_handler(handler);
4778   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4779   return result;
4780 }
4781
4782
4783 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4784                                            Object* call_trap,
4785                                            Object* construct_trap,
4786                                            Object* prototype) {
4787   // Allocate map.
4788   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4789   // maps. Will probably depend on the identity of the handler object, too.
4790   Map* map;
4791   MaybeObject* maybe_map_obj =
4792       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4793   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4794   map->set_prototype(prototype);
4795
4796   // Allocate the proxy object.
4797   JSFunctionProxy* result;
4798   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4799   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4800   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4801   result->set_handler(handler);
4802   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4803   result->set_call_trap(call_trap);
4804   result->set_construct_trap(construct_trap);
4805   return result;
4806 }
4807
4808
4809 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4810   ASSERT(constructor->has_initial_map());
4811   Map* map = constructor->initial_map();
4812   ASSERT(map->is_dictionary_map());
4813
4814   // Make sure no field properties are described in the initial map.
4815   // This guarantees us that normalizing the properties does not
4816   // require us to change property values to PropertyCells.
4817   ASSERT(map->NextFreePropertyIndex() == 0);
4818
4819   // Make sure we don't have a ton of pre-allocated slots in the
4820   // global objects. They will be unused once we normalize the object.
4821   ASSERT(map->unused_property_fields() == 0);
4822   ASSERT(map->inobject_properties() == 0);
4823
4824   // Initial size of the backing store to avoid resize of the storage during
4825   // bootstrapping. The size differs between the JS global object ad the
4826   // builtins object.
4827   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4828
4829   // Allocate a dictionary object for backing storage.
4830   NameDictionary* dictionary;
4831   MaybeObject* maybe_dictionary =
4832       NameDictionary::Allocate(
4833           this,
4834           map->NumberOfOwnDescriptors() * 2 + initial_size);
4835   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4836
4837   // The global object might be created from an object template with accessors.
4838   // Fill these accessors into the dictionary.
4839   DescriptorArray* descs = map->instance_descriptors();
4840   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4841     PropertyDetails details = descs->GetDetails(i);
4842     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4843     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4844     Object* value = descs->GetCallbacksObject(i);
4845     MaybeObject* maybe_value = AllocatePropertyCell(value);
4846     if (!maybe_value->ToObject(&value)) return maybe_value;
4847
4848     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4849     if (!maybe_added->To(&dictionary)) return maybe_added;
4850   }
4851
4852   // Allocate the global object and initialize it with the backing store.
4853   JSObject* global;
4854   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4855   if (!maybe_global->To(&global)) return maybe_global;
4856
4857   InitializeJSObjectFromMap(global, dictionary, map);
4858
4859   // Create a new map for the global object.
4860   Map* new_map;
4861   MaybeObject* maybe_map = map->CopyDropDescriptors();
4862   if (!maybe_map->To(&new_map)) return maybe_map;
4863   new_map->set_dictionary_map(true);
4864
4865   // Set up the global object as a normalized object.
4866   global->set_map(new_map);
4867   global->set_properties(dictionary);
4868
4869   // Make sure result is a global object with properties in dictionary.
4870   ASSERT(global->IsGlobalObject());
4871   ASSERT(!global->HasFastProperties());
4872   return global;
4873 }
4874
4875
4876 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4877   // Never used to copy functions.  If functions need to be copied we
4878   // have to be careful to clear the literals array.
4879   SLOW_ASSERT(!source->IsJSFunction());
4880
4881   // Make the clone.
4882   Map* map = source->map();
4883   int object_size = map->instance_size();
4884   Object* clone;
4885
4886   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4887
4888   // If we're forced to always allocate, we use the general allocation
4889   // functions which may leave us with an object in old space.
4890   if (always_allocate()) {
4891     { MaybeObject* maybe_clone =
4892           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4893       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4894     }
4895     Address clone_address = HeapObject::cast(clone)->address();
4896     CopyBlock(clone_address,
4897               source->address(),
4898               object_size);
4899     // Update write barrier for all fields that lie beyond the header.
4900     RecordWrites(clone_address,
4901                  JSObject::kHeaderSize,
4902                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4903   } else {
4904     wb_mode = SKIP_WRITE_BARRIER;
4905
4906     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4907       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4908     }
4909     SLOW_ASSERT(InNewSpace(clone));
4910     // Since we know the clone is allocated in new space, we can copy
4911     // the contents without worrying about updating the write barrier.
4912     CopyBlock(HeapObject::cast(clone)->address(),
4913               source->address(),
4914               object_size);
4915   }
4916
4917   SLOW_ASSERT(
4918       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4919   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4920   FixedArray* properties = FixedArray::cast(source->properties());
4921   // Update elements if necessary.
4922   if (elements->length() > 0) {
4923     Object* elem;
4924     { MaybeObject* maybe_elem;
4925       if (elements->map() == fixed_cow_array_map()) {
4926         maybe_elem = FixedArray::cast(elements);
4927       } else if (source->HasFastDoubleElements()) {
4928         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4929       } else {
4930         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4931       }
4932       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4933     }
4934     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4935   }
4936   // Update properties if necessary.
4937   if (properties->length() > 0) {
4938     Object* prop;
4939     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4940       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4941     }
4942     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4943   }
4944   // Return the new clone.
4945   return clone;
4946 }
4947
4948
4949 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4950     JSObject* source,
4951     AllocationSite* site) {
4952   // Never used to copy functions.  If functions need to be copied we
4953   // have to be careful to clear the literals array.
4954   SLOW_ASSERT(!source->IsJSFunction());
4955
4956   // Make the clone.
4957   Map* map = source->map();
4958   int object_size = map->instance_size();
4959   Object* clone;
4960
4961   ASSERT(map->CanTrackAllocationSite());
4962   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4963   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4964
4965   // If we're forced to always allocate, we use the general allocation
4966   // functions which may leave us with an object in old space.
4967   int adjusted_object_size = object_size;
4968   if (always_allocate()) {
4969     // We'll only track origin if we are certain to allocate in new space
4970     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4971     if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4972       adjusted_object_size += AllocationMemento::kSize;
4973     }
4974
4975     { MaybeObject* maybe_clone =
4976           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4977       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4978     }
4979     Address clone_address = HeapObject::cast(clone)->address();
4980     CopyBlock(clone_address,
4981               source->address(),
4982               object_size);
4983     // Update write barrier for all fields that lie beyond the header.
4984     int write_barrier_offset = adjusted_object_size > object_size
4985         ? JSArray::kSize + AllocationMemento::kSize
4986         : JSObject::kHeaderSize;
4987     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4988       RecordWrites(clone_address,
4989                    write_barrier_offset,
4990                    (object_size - write_barrier_offset) / kPointerSize);
4991     }
4992
4993     // Track allocation site information, if we failed to allocate it inline.
4994     if (InNewSpace(clone) &&
4995         adjusted_object_size == object_size) {
4996       MaybeObject* maybe_alloc_memento =
4997           AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4998       AllocationMemento* alloc_memento;
4999       if (maybe_alloc_memento->To(&alloc_memento)) {
5000         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5001         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5002       }
5003     }
5004   } else {
5005     wb_mode = SKIP_WRITE_BARRIER;
5006     adjusted_object_size += AllocationMemento::kSize;
5007
5008     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5009       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5010     }
5011     SLOW_ASSERT(InNewSpace(clone));
5012     // Since we know the clone is allocated in new space, we can copy
5013     // the contents without worrying about updating the write barrier.
5014     CopyBlock(HeapObject::cast(clone)->address(),
5015               source->address(),
5016               object_size);
5017   }
5018
5019   if (adjusted_object_size > object_size) {
5020     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5021         reinterpret_cast<Address>(clone) + object_size);
5022     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5023     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5024   }
5025
5026   SLOW_ASSERT(
5027       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5028   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5029   FixedArray* properties = FixedArray::cast(source->properties());
5030   // Update elements if necessary.
5031   if (elements->length() > 0) {
5032     Object* elem;
5033     { MaybeObject* maybe_elem;
5034       if (elements->map() == fixed_cow_array_map()) {
5035         maybe_elem = FixedArray::cast(elements);
5036       } else if (source->HasFastDoubleElements()) {
5037         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5038       } else {
5039         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5040       }
5041       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5042     }
5043     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5044   }
5045   // Update properties if necessary.
5046   if (properties->length() > 0) {
5047     Object* prop;
5048     { MaybeObject* maybe_prop = CopyFixedArray(properties);
5049       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5050     }
5051     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5052   }
5053   // Return the new clone.
5054   return clone;
5055 }
5056
5057
5058 MaybeObject* Heap::ReinitializeJSReceiver(
5059     JSReceiver* object, InstanceType type, int size) {
5060   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5061
5062   // Allocate fresh map.
5063   // TODO(rossberg): Once we optimize proxies, cache these maps.
5064   Map* map;
5065   MaybeObject* maybe = AllocateMap(type, size);
5066   if (!maybe->To<Map>(&map)) return maybe;
5067
5068   // Check that the receiver has at least the size of the fresh object.
5069   int size_difference = object->map()->instance_size() - map->instance_size();
5070   ASSERT(size_difference >= 0);
5071
5072   map->set_prototype(object->map()->prototype());
5073
5074   // Allocate the backing storage for the properties.
5075   int prop_size = map->unused_property_fields() - map->inobject_properties();
5076   Object* properties;
5077   maybe = AllocateFixedArray(prop_size, TENURED);
5078   if (!maybe->ToObject(&properties)) return maybe;
5079
5080   // Functions require some allocation, which might fail here.
5081   SharedFunctionInfo* shared = NULL;
5082   if (type == JS_FUNCTION_TYPE) {
5083     String* name;
5084     maybe =
5085         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5086     if (!maybe->To<String>(&name)) return maybe;
5087     maybe = AllocateSharedFunctionInfo(name);
5088     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5089   }
5090
5091   // Because of possible retries of this function after failure,
5092   // we must NOT fail after this point, where we have changed the type!
5093
5094   // Reset the map for the object.
5095   object->set_map(map);
5096   JSObject* jsobj = JSObject::cast(object);
5097
5098   // Reinitialize the object from the constructor map.
5099   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5100
5101   // Functions require some minimal initialization.
5102   if (type == JS_FUNCTION_TYPE) {
5103     map->set_function_with_prototype(true);
5104     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5105     JSFunction::cast(object)->set_context(
5106         isolate()->context()->native_context());
5107   }
5108
5109   // Put in filler if the new object is smaller than the old.
5110   if (size_difference > 0) {
5111     CreateFillerObjectAt(
5112         object->address() + map->instance_size(), size_difference);
5113   }
5114
5115   return object;
5116 }
5117
5118
5119 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5120                                              JSGlobalProxy* object) {
5121   ASSERT(constructor->has_initial_map());
5122   Map* map = constructor->initial_map();
5123
5124   // Check that the already allocated object has the same size and type as
5125   // objects allocated using the constructor.
5126   ASSERT(map->instance_size() == object->map()->instance_size());
5127   ASSERT(map->instance_type() == object->map()->instance_type());
5128
5129   // Allocate the backing storage for the properties.
5130   int prop_size = map->unused_property_fields() - map->inobject_properties();
5131   Object* properties;
5132   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5133     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5134   }
5135
5136   // Reset the map for the object.
5137   object->set_map(constructor->initial_map());
5138
5139   // Reinitialize the object from the constructor map.
5140   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5141   return object;
5142 }
5143
5144
5145 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5146                                            PretenureFlag pretenure) {
5147   int length = string.length();
5148   if (length == 1) {
5149     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5150   }
5151   Object* result;
5152   { MaybeObject* maybe_result =
5153         AllocateRawOneByteString(string.length(), pretenure);
5154     if (!maybe_result->ToObject(&result)) return maybe_result;
5155   }
5156
5157   // Copy the characters into the new object.
5158   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5159             string.start(),
5160             length);
5161   return result;
5162 }
5163
5164
5165 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5166                                               int non_ascii_start,
5167                                               PretenureFlag pretenure) {
5168   // Continue counting the number of characters in the UTF-8 string, starting
5169   // from the first non-ascii character or word.
5170   Access<UnicodeCache::Utf8Decoder>
5171       decoder(isolate_->unicode_cache()->utf8_decoder());
5172   decoder->Reset(string.start() + non_ascii_start,
5173                  string.length() - non_ascii_start);
5174   int utf16_length = decoder->Utf16Length();
5175   ASSERT(utf16_length > 0);
5176   // Allocate string.
5177   Object* result;
5178   {
5179     int chars = non_ascii_start + utf16_length;
5180     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5181     if (!maybe_result->ToObject(&result)) return maybe_result;
5182   }
5183   // Convert and copy the characters into the new object.
5184   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5185   // Copy ascii portion.
5186   uint16_t* data = twobyte->GetChars();
5187   if (non_ascii_start != 0) {
5188     const char* ascii_data = string.start();
5189     for (int i = 0; i < non_ascii_start; i++) {
5190       *data++ = *ascii_data++;
5191     }
5192   }
5193   // Now write the remainder.
5194   decoder->WriteUtf16(data, utf16_length);
5195   return result;
5196 }
5197
5198
5199 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5200                                              PretenureFlag pretenure) {
5201   // Check if the string is an ASCII string.
5202   Object* result;
5203   int length = string.length();
5204   const uc16* start = string.start();
5205
5206   if (String::IsOneByte(start, length)) {
5207     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5208     if (!maybe_result->ToObject(&result)) return maybe_result;
5209     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5210   } else {  // It's not a one byte string.
5211     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5212     if (!maybe_result->ToObject(&result)) return maybe_result;
5213     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5214   }
5215   return result;
5216 }
5217
5218
5219 Map* Heap::InternalizedStringMapForString(String* string) {
5220   // If the string is in new space it cannot be used as internalized.
5221   if (InNewSpace(string)) return NULL;
5222
5223   // Find the corresponding internalized string map for strings.
5224   switch (string->map()->instance_type()) {
5225     case STRING_TYPE: return internalized_string_map();
5226     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5227     case CONS_STRING_TYPE: return cons_internalized_string_map();
5228     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5229     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5230     case EXTERNAL_ASCII_STRING_TYPE:
5231       return external_ascii_internalized_string_map();
5232     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5233       return external_internalized_string_with_one_byte_data_map();
5234     case SHORT_EXTERNAL_STRING_TYPE:
5235       return short_external_internalized_string_map();
5236     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5237       return short_external_ascii_internalized_string_map();
5238     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5239       return short_external_internalized_string_with_one_byte_data_map();
5240     default: return NULL;  // No match found.
5241   }
5242 }
5243
5244
5245 static inline void WriteOneByteData(Vector<const char> vector,
5246                                     uint8_t* chars,
5247                                     int len) {
5248   // Only works for ascii.
5249   ASSERT(vector.length() == len);
5250   OS::MemCopy(chars, vector.start(), len);
5251 }
5252
5253 static inline void WriteTwoByteData(Vector<const char> vector,
5254                                     uint16_t* chars,
5255                                     int len) {
5256   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5257   unsigned stream_length = vector.length();
5258   while (stream_length != 0) {
5259     unsigned consumed = 0;
5260     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5261     ASSERT(c != unibrow::Utf8::kBadChar);
5262     ASSERT(consumed <= stream_length);
5263     stream_length -= consumed;
5264     stream += consumed;
5265     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5266       len -= 2;
5267       if (len < 0) break;
5268       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5269       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5270     } else {
5271       len -= 1;
5272       if (len < 0) break;
5273       *chars++ = c;
5274     }
5275   }
5276   ASSERT(stream_length == 0);
5277   ASSERT(len == 0);
5278 }
5279
5280
5281 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5282   ASSERT(s->length() == len);
5283   String::WriteToFlat(s, chars, 0, len);
5284 }
5285
5286
5287 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5288   ASSERT(s->length() == len);
5289   String::WriteToFlat(s, chars, 0, len);
5290 }
5291
5292
5293 template<bool is_one_byte, typename T>
5294 MaybeObject* Heap::AllocateInternalizedStringImpl(
5295     T t, int chars, uint32_t hash_field) {
5296   ASSERT(chars >= 0);
5297   // Compute map and object size.
5298   int size;
5299   Map* map;
5300
5301   if (is_one_byte) {
5302     if (chars > SeqOneByteString::kMaxLength) {
5303       return Failure::OutOfMemoryException(0x9);
5304     }
5305     map = ascii_internalized_string_map();
5306     size = SeqOneByteString::SizeFor(chars);
5307   } else {
5308     if (chars > SeqTwoByteString::kMaxLength) {
5309       return Failure::OutOfMemoryException(0xa);
5310     }
5311     map = internalized_string_map();
5312     size = SeqTwoByteString::SizeFor(chars);
5313   }
5314
5315   // Allocate string.
5316   Object* result;
5317   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5318                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5319                    : old_data_space_->AllocateRaw(size);
5320     if (!maybe_result->ToObject(&result)) return maybe_result;
5321   }
5322
5323   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5324   // Set length and hash fields of the allocated string.
5325   String* answer = String::cast(result);
5326   answer->set_length(chars);
5327   answer->set_hash_field(hash_field);
5328
5329   ASSERT_EQ(size, answer->Size());
5330
5331   if (is_one_byte) {
5332     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5333   } else {
5334     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5335   }
5336   return answer;
5337 }
5338
5339
5340 // Need explicit instantiations.
5341 template
5342 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5343 template
5344 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5345     String*, int, uint32_t);
5346 template
5347 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5348     Vector<const char>, int, uint32_t);
5349
5350
5351 MaybeObject* Heap::AllocateRawOneByteString(int length,
5352                                             PretenureFlag pretenure) {
5353   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5354     return Failure::OutOfMemoryException(0xb);
5355   }
5356
5357   int size = SeqOneByteString::SizeFor(length);
5358   ASSERT(size <= SeqOneByteString::kMaxSize);
5359
5360   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5361   AllocationSpace retry_space = OLD_DATA_SPACE;
5362
5363   if (space == NEW_SPACE) {
5364     if (size > kMaxObjectSizeInNewSpace) {
5365       // Allocate in large object space, retry space will be ignored.
5366       space = LO_SPACE;
5367     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5368       // Allocate in new space, retry in large object space.
5369       retry_space = LO_SPACE;
5370     }
5371   } else if (space == OLD_DATA_SPACE &&
5372              size > Page::kMaxNonCodeHeapObjectSize) {
5373     space = LO_SPACE;
5374   }
5375   Object* result;
5376   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5377     if (!maybe_result->ToObject(&result)) return maybe_result;
5378   }
5379
5380   // Partially initialize the object.
5381   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5382   String::cast(result)->set_length(length);
5383   String::cast(result)->set_hash_field(String::kEmptyHashField);
5384   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5385
5386   return result;
5387 }
5388
5389
5390 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5391                                             PretenureFlag pretenure) {
5392   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5393     return Failure::OutOfMemoryException(0xc);
5394   }
5395   int size = SeqTwoByteString::SizeFor(length);
5396   ASSERT(size <= SeqTwoByteString::kMaxSize);
5397   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5398   AllocationSpace retry_space = OLD_DATA_SPACE;
5399
5400   if (space == NEW_SPACE) {
5401     if (size > kMaxObjectSizeInNewSpace) {
5402       // Allocate in large object space, retry space will be ignored.
5403       space = LO_SPACE;
5404     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5405       // Allocate in new space, retry in large object space.
5406       retry_space = LO_SPACE;
5407     }
5408   } else if (space == OLD_DATA_SPACE &&
5409              size > Page::kMaxNonCodeHeapObjectSize) {
5410     space = LO_SPACE;
5411   }
5412   Object* result;
5413   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5414     if (!maybe_result->ToObject(&result)) return maybe_result;
5415   }
5416
5417   // Partially initialize the object.
5418   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5419   String::cast(result)->set_length(length);
5420   String::cast(result)->set_hash_field(String::kEmptyHashField);
5421   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5422   return result;
5423 }
5424
5425
5426 MaybeObject* Heap::AllocateJSArray(
5427     ElementsKind elements_kind,
5428     PretenureFlag pretenure) {
5429   Context* native_context = isolate()->context()->native_context();
5430   JSFunction* array_function = native_context->array_function();
5431   Map* map = array_function->initial_map();
5432   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5433   if (transition_map != NULL) map = transition_map;
5434   return AllocateJSObjectFromMap(map, pretenure);
5435 }
5436
5437
5438 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5439     ElementsKind elements_kind,
5440     Handle<AllocationSite> allocation_site) {
5441   Context* native_context = isolate()->context()->native_context();
5442   JSFunction* array_function = native_context->array_function();
5443   Map* map = array_function->initial_map();
5444   Object* maybe_map_array = native_context->js_array_maps();
5445   if (!maybe_map_array->IsUndefined()) {
5446     Object* maybe_transitioned_map =
5447         FixedArray::cast(maybe_map_array)->get(elements_kind);
5448     if (!maybe_transitioned_map->IsUndefined()) {
5449       map = Map::cast(maybe_transitioned_map);
5450     }
5451   }
5452   return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5453 }
5454
5455
5456 MaybeObject* Heap::AllocateEmptyFixedArray() {
5457   int size = FixedArray::SizeFor(0);
5458   Object* result;
5459   { MaybeObject* maybe_result =
5460         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5461     if (!maybe_result->ToObject(&result)) return maybe_result;
5462   }
5463   // Initialize the object.
5464   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5465       fixed_array_map());
5466   reinterpret_cast<FixedArray*>(result)->set_length(0);
5467   return result;
5468 }
5469
5470
5471 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5472   return AllocateExternalArray(0, array_type, NULL, TENURED);
5473 }
5474
5475
5476 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5477   if (length < 0 || length > FixedArray::kMaxLength) {
5478     return Failure::OutOfMemoryException(0xd);
5479   }
5480   ASSERT(length > 0);
5481   // Use the general function if we're forced to always allocate.
5482   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5483   // Allocate the raw data for a fixed array.
5484   int size = FixedArray::SizeFor(length);
5485   return size <= kMaxObjectSizeInNewSpace
5486       ? new_space_.AllocateRaw(size)
5487       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5488 }
5489
5490
5491 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5492   int len = src->length();
5493   Object* obj;
5494   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5495     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5496   }
5497   if (InNewSpace(obj)) {
5498     HeapObject* dst = HeapObject::cast(obj);
5499     dst->set_map_no_write_barrier(map);
5500     CopyBlock(dst->address() + kPointerSize,
5501               src->address() + kPointerSize,
5502               FixedArray::SizeFor(len) - kPointerSize);
5503     return obj;
5504   }
5505   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5506   FixedArray* result = FixedArray::cast(obj);
5507   result->set_length(len);
5508
5509   // Copy the content
5510   DisallowHeapAllocation no_gc;
5511   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5512   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5513   return result;
5514 }
5515
5516
5517 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5518                                                Map* map) {
5519   int len = src->length();
5520   Object* obj;
5521   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5522     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5523   }
5524   HeapObject* dst = HeapObject::cast(obj);
5525   dst->set_map_no_write_barrier(map);
5526   CopyBlock(
5527       dst->address() + FixedDoubleArray::kLengthOffset,
5528       src->address() + FixedDoubleArray::kLengthOffset,
5529       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5530   return obj;
5531 }
5532
5533
5534 MaybeObject* Heap::AllocateFixedArray(int length) {
5535   ASSERT(length >= 0);
5536   if (length == 0) return empty_fixed_array();
5537   Object* result;
5538   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5539     if (!maybe_result->ToObject(&result)) return maybe_result;
5540   }
5541   // Initialize header.
5542   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5543   array->set_map_no_write_barrier(fixed_array_map());
5544   array->set_length(length);
5545   // Initialize body.
5546   ASSERT(!InNewSpace(undefined_value()));
5547   MemsetPointer(array->data_start(), undefined_value(), length);
5548   return result;
5549 }
5550
5551
5552 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5553   if (length < 0 || length > FixedArray::kMaxLength) {
5554     return Failure::OutOfMemoryException(0xe);
5555   }
5556
5557   AllocationSpace space =
5558       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5559   int size = FixedArray::SizeFor(length);
5560   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5561     // Too big for new space.
5562     space = LO_SPACE;
5563   } else if (space == OLD_POINTER_SPACE &&
5564              size > Page::kMaxNonCodeHeapObjectSize) {
5565     // Too big for old pointer space.
5566     space = LO_SPACE;
5567   }
5568
5569   AllocationSpace retry_space =
5570       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5571
5572   return AllocateRaw(size, space, retry_space);
5573 }
5574
5575
5576 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5577     Heap* heap,
5578     int length,
5579     PretenureFlag pretenure,
5580     Object* filler) {
5581   ASSERT(length >= 0);
5582   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5583   if (length == 0) return heap->empty_fixed_array();
5584
5585   ASSERT(!heap->InNewSpace(filler));
5586   Object* result;
5587   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5588     if (!maybe_result->ToObject(&result)) return maybe_result;
5589   }
5590
5591   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5592   FixedArray* array = FixedArray::cast(result);
5593   array->set_length(length);
5594   MemsetPointer(array->data_start(), filler, length);
5595   return array;
5596 }
5597
5598
5599 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5600   return AllocateFixedArrayWithFiller(this,
5601                                       length,
5602                                       pretenure,
5603                                       undefined_value());
5604 }
5605
5606
5607 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5608                                                PretenureFlag pretenure) {
5609   return AllocateFixedArrayWithFiller(this,
5610                                       length,
5611                                       pretenure,
5612                                       the_hole_value());
5613 }
5614
5615
5616 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5617   if (length == 0) return empty_fixed_array();
5618
5619   Object* obj;
5620   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5621     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5622   }
5623
5624   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5625       fixed_array_map());
5626   FixedArray::cast(obj)->set_length(length);
5627   return obj;
5628 }
5629
5630
5631 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5632   int size = FixedDoubleArray::SizeFor(0);
5633   Object* result;
5634   { MaybeObject* maybe_result =
5635         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5636     if (!maybe_result->ToObject(&result)) return maybe_result;
5637   }
5638   // Initialize the object.
5639   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5640       fixed_double_array_map());
5641   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5642   return result;
5643 }
5644
5645
5646 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5647     int length,
5648     PretenureFlag pretenure) {
5649   if (length == 0) return empty_fixed_array();
5650
5651   Object* elements_object;
5652   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5653   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5654   FixedDoubleArray* elements =
5655       reinterpret_cast<FixedDoubleArray*>(elements_object);
5656
5657   elements->set_map_no_write_barrier(fixed_double_array_map());
5658   elements->set_length(length);
5659   return elements;
5660 }
5661
5662
5663 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5664     int length,
5665     PretenureFlag pretenure) {
5666   if (length == 0) return empty_fixed_array();
5667
5668   Object* elements_object;
5669   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5670   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5671   FixedDoubleArray* elements =
5672       reinterpret_cast<FixedDoubleArray*>(elements_object);
5673
5674   for (int i = 0; i < length; ++i) {
5675     elements->set_the_hole(i);
5676   }
5677
5678   elements->set_map_no_write_barrier(fixed_double_array_map());
5679   elements->set_length(length);
5680   return elements;
5681 }
5682
5683
5684 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5685                                                PretenureFlag pretenure) {
5686   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5687     return Failure::OutOfMemoryException(0xf);
5688   }
5689
5690   AllocationSpace space =
5691       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5692   int size = FixedDoubleArray::SizeFor(length);
5693
5694 #ifndef V8_HOST_ARCH_64_BIT
5695   size += kPointerSize;
5696 #endif
5697
5698   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5699     // Too big for new space.
5700     space = LO_SPACE;
5701   } else if (space == OLD_DATA_SPACE &&
5702              size > Page::kMaxNonCodeHeapObjectSize) {
5703     // Too big for old data space.
5704     space = LO_SPACE;
5705   }
5706
5707   AllocationSpace retry_space =
5708       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5709
5710   HeapObject* object;
5711   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5712     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5713   }
5714
5715   return EnsureDoubleAligned(this, object, size);
5716 }
5717
5718
5719 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5720   Object* result;
5721   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5722     if (!maybe_result->ToObject(&result)) return maybe_result;
5723   }
5724   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5725       hash_table_map());
5726   ASSERT(result->IsHashTable());
5727   return result;
5728 }
5729
5730
5731 MaybeObject* Heap::AllocateSymbol() {
5732   // Statically ensure that it is safe to allocate symbols in paged spaces.
5733   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5734
5735   Object* result;
5736   MaybeObject* maybe =
5737       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5738   if (!maybe->ToObject(&result)) return maybe;
5739
5740   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5741
5742   // Generate a random hash value.
5743   int hash;
5744   int attempts = 0;
5745   do {
5746     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5747     attempts++;
5748   } while (hash == 0 && attempts < 30);
5749   if (hash == 0) hash = 1;  // never return 0
5750
5751   Symbol::cast(result)->set_hash_field(
5752       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5753   Symbol::cast(result)->set_name(undefined_value());
5754
5755   ASSERT(result->IsSymbol());
5756   return result;
5757 }
5758
5759
5760 MaybeObject* Heap::AllocateNativeContext() {
5761   Object* result;
5762   { MaybeObject* maybe_result =
5763         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5764     if (!maybe_result->ToObject(&result)) return maybe_result;
5765   }
5766   Context* context = reinterpret_cast<Context*>(result);
5767   context->set_map_no_write_barrier(native_context_map());
5768   context->set_js_array_maps(undefined_value());
5769   ASSERT(context->IsNativeContext());
5770   ASSERT(result->IsContext());
5771   return result;
5772 }
5773
5774
5775 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5776                                          ScopeInfo* scope_info) {
5777   Object* result;
5778   { MaybeObject* maybe_result =
5779         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5780     if (!maybe_result->ToObject(&result)) return maybe_result;
5781   }
5782   Context* context = reinterpret_cast<Context*>(result);
5783   context->set_map_no_write_barrier(global_context_map());
5784   context->set_closure(function);
5785   context->set_previous(function->context());
5786   context->set_extension(scope_info);
5787   context->set_global_object(function->context()->global_object());
5788   ASSERT(context->IsGlobalContext());
5789   ASSERT(result->IsContext());
5790   return context;
5791 }
5792
5793
5794 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5795   Object* result;
5796   { MaybeObject* maybe_result =
5797         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5798     if (!maybe_result->ToObject(&result)) return maybe_result;
5799   }
5800   Context* context = reinterpret_cast<Context*>(result);
5801   context->set_map_no_write_barrier(module_context_map());
5802   // Instance link will be set later.
5803   context->set_extension(Smi::FromInt(0));
5804   return context;
5805 }
5806
5807
5808 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5809   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5810   Object* result;
5811   { MaybeObject* maybe_result = AllocateFixedArray(length);
5812     if (!maybe_result->ToObject(&result)) return maybe_result;
5813   }
5814   Context* context = reinterpret_cast<Context*>(result);
5815   context->set_map_no_write_barrier(function_context_map());
5816   context->set_closure(function);
5817   context->set_previous(function->context());
5818   context->set_extension(Smi::FromInt(0));
5819   context->set_global_object(function->context()->global_object());
5820   return context;
5821 }
5822
5823
5824 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5825                                         Context* previous,
5826                                         String* name,
5827                                         Object* thrown_object) {
5828   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5829   Object* result;
5830   { MaybeObject* maybe_result =
5831         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5832     if (!maybe_result->ToObject(&result)) return maybe_result;
5833   }
5834   Context* context = reinterpret_cast<Context*>(result);
5835   context->set_map_no_write_barrier(catch_context_map());
5836   context->set_closure(function);
5837   context->set_previous(previous);
5838   context->set_extension(name);
5839   context->set_global_object(previous->global_object());
5840   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5841   return context;
5842 }
5843
5844
5845 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5846                                        Context* previous,
5847                                        JSReceiver* extension) {
5848   Object* result;
5849   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5850     if (!maybe_result->ToObject(&result)) return maybe_result;
5851   }
5852   Context* context = reinterpret_cast<Context*>(result);
5853   context->set_map_no_write_barrier(with_context_map());
5854   context->set_closure(function);
5855   context->set_previous(previous);
5856   context->set_extension(extension);
5857   context->set_global_object(previous->global_object());
5858   return context;
5859 }
5860
5861
5862 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5863                                         Context* previous,
5864                                         ScopeInfo* scope_info) {
5865   Object* result;
5866   { MaybeObject* maybe_result =
5867         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5868     if (!maybe_result->ToObject(&result)) return maybe_result;
5869   }
5870   Context* context = reinterpret_cast<Context*>(result);
5871   context->set_map_no_write_barrier(block_context_map());
5872   context->set_closure(function);
5873   context->set_previous(previous);
5874   context->set_extension(scope_info);
5875   context->set_global_object(previous->global_object());
5876   return context;
5877 }
5878
5879
5880 MaybeObject* Heap::AllocateScopeInfo(int length) {
5881   FixedArray* scope_info;
5882   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5883   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5884   scope_info->set_map_no_write_barrier(scope_info_map());
5885   return scope_info;
5886 }
5887
5888
5889 MaybeObject* Heap::AllocateExternal(void* value) {
5890   Foreign* foreign;
5891   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5892     if (!maybe_result->To(&foreign)) return maybe_result;
5893   }
5894   JSObject* external;
5895   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5896     if (!maybe_result->To(&external)) return maybe_result;
5897   }
5898   external->SetInternalField(0, foreign);
5899   return external;
5900 }
5901
5902
5903 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5904   Map* map;
5905   switch (type) {
5906 #define MAKE_CASE(NAME, Name, name) \
5907     case NAME##_TYPE: map = name##_map(); break;
5908 STRUCT_LIST(MAKE_CASE)
5909 #undef MAKE_CASE
5910     default:
5911       UNREACHABLE();
5912       return Failure::InternalError();
5913   }
5914   int size = map->instance_size();
5915   AllocationSpace space =
5916       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5917   Object* result;
5918   { MaybeObject* maybe_result = Allocate(map, space);
5919     if (!maybe_result->ToObject(&result)) return maybe_result;
5920   }
5921   Struct::cast(result)->InitializeBody(size);
5922   return result;
5923 }
5924
5925
5926 bool Heap::IsHeapIterable() {
5927   return (!old_pointer_space()->was_swept_conservatively() &&
5928           !old_data_space()->was_swept_conservatively());
5929 }
5930
5931
5932 void Heap::EnsureHeapIsIterable() {
5933   ASSERT(AllowHeapAllocation::IsAllowed());
5934   if (!IsHeapIterable()) {
5935     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5936   }
5937   ASSERT(IsHeapIterable());
5938 }
5939
5940
5941 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5942   incremental_marking()->Step(step_size,
5943                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5944
5945   if (incremental_marking()->IsComplete()) {
5946     bool uncommit = false;
5947     if (gc_count_at_last_idle_gc_ == gc_count_) {
5948       // No GC since the last full GC, the mutator is probably not active.
5949       isolate_->compilation_cache()->Clear();
5950       uncommit = true;
5951     }
5952     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5953     mark_sweeps_since_idle_round_started_++;
5954     gc_count_at_last_idle_gc_ = gc_count_;
5955     if (uncommit) {
5956       new_space_.Shrink();
5957       UncommitFromSpace();
5958     }
5959   }
5960 }
5961
5962
5963 bool Heap::IdleNotification(int hint) {
5964   // Hints greater than this value indicate that
5965   // the embedder is requesting a lot of GC work.
5966   const int kMaxHint = 1000;
5967   const int kMinHintForIncrementalMarking = 10;
5968   // Minimal hint that allows to do full GC.
5969   const int kMinHintForFullGC = 100;
5970   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5971   // The size factor is in range [5..250]. The numbers here are chosen from
5972   // experiments. If you changes them, make sure to test with
5973   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5974   intptr_t step_size =
5975       size_factor * IncrementalMarking::kAllocatedThreshold;
5976
5977   if (contexts_disposed_ > 0) {
5978     if (hint >= kMaxHint) {
5979       // The embedder is requesting a lot of GC work after context disposal,
5980       // we age inline caches so that they don't keep objects from
5981       // the old context alive.
5982       AgeInlineCaches();
5983     }
5984     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5985     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5986         incremental_marking()->IsStopped()) {
5987       HistogramTimerScope scope(isolate_->counters()->gc_context());
5988       CollectAllGarbage(kReduceMemoryFootprintMask,
5989                         "idle notification: contexts disposed");
5990     } else {
5991       AdvanceIdleIncrementalMarking(step_size);
5992       contexts_disposed_ = 0;
5993     }
5994     // After context disposal there is likely a lot of garbage remaining, reset
5995     // the idle notification counters in order to trigger more incremental GCs
5996     // on subsequent idle notifications.
5997     StartIdleRound();
5998     return false;
5999   }
6000
6001   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
6002     return IdleGlobalGC();
6003   }
6004
6005   // By doing small chunks of GC work in each IdleNotification,
6006   // perform a round of incremental GCs and after that wait until
6007   // the mutator creates enough garbage to justify a new round.
6008   // An incremental GC progresses as follows:
6009   // 1. many incremental marking steps,
6010   // 2. one old space mark-sweep-compact,
6011   // 3. many lazy sweep steps.
6012   // Use mark-sweep-compact events to count incremental GCs in a round.
6013
6014   if (incremental_marking()->IsStopped()) {
6015     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
6016         !IsSweepingComplete() &&
6017         !AdvanceSweepers(static_cast<int>(step_size))) {
6018       return false;
6019     }
6020   }
6021
6022   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6023     if (EnoughGarbageSinceLastIdleRound()) {
6024       StartIdleRound();
6025     } else {
6026       return true;
6027     }
6028   }
6029
6030   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6031                               mark_sweeps_since_idle_round_started_;
6032
6033   if (incremental_marking()->IsStopped()) {
6034     // If there are no more than two GCs left in this idle round and we are
6035     // allowed to do a full GC, then make those GCs full in order to compact
6036     // the code space.
6037     // TODO(ulan): Once we enable code compaction for incremental marking,
6038     // we can get rid of this special case and always start incremental marking.
6039     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6040       CollectAllGarbage(kReduceMemoryFootprintMask,
6041                         "idle notification: finalize idle round");
6042       mark_sweeps_since_idle_round_started_++;
6043     } else if (hint > kMinHintForIncrementalMarking) {
6044       incremental_marking()->Start();
6045     }
6046   }
6047   if (!incremental_marking()->IsStopped() &&
6048       hint > kMinHintForIncrementalMarking) {
6049     AdvanceIdleIncrementalMarking(step_size);
6050   }
6051
6052   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6053     FinishIdleRound();
6054     return true;
6055   }
6056
6057   return false;
6058 }
6059
6060
6061 bool Heap::IdleGlobalGC() {
6062   static const int kIdlesBeforeScavenge = 4;
6063   static const int kIdlesBeforeMarkSweep = 7;
6064   static const int kIdlesBeforeMarkCompact = 8;
6065   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6066   static const unsigned int kGCsBetweenCleanup = 4;
6067
6068   if (!last_idle_notification_gc_count_init_) {
6069     last_idle_notification_gc_count_ = gc_count_;
6070     last_idle_notification_gc_count_init_ = true;
6071   }
6072
6073   bool uncommit = true;
6074   bool finished = false;
6075
6076   // Reset the number of idle notifications received when a number of
6077   // GCs have taken place. This allows another round of cleanup based
6078   // on idle notifications if enough work has been carried out to
6079   // provoke a number of garbage collections.
6080   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6081     number_idle_notifications_ =
6082         Min(number_idle_notifications_ + 1, kMaxIdleCount);
6083   } else {
6084     number_idle_notifications_ = 0;
6085     last_idle_notification_gc_count_ = gc_count_;
6086   }
6087
6088   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6089     CollectGarbage(NEW_SPACE, "idle notification");
6090     new_space_.Shrink();
6091     last_idle_notification_gc_count_ = gc_count_;
6092   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6093     // Before doing the mark-sweep collections we clear the
6094     // compilation cache to avoid hanging on to source code and
6095     // generated code for cached functions.
6096     isolate_->compilation_cache()->Clear();
6097
6098     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6099     new_space_.Shrink();
6100     last_idle_notification_gc_count_ = gc_count_;
6101
6102   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6103     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6104     new_space_.Shrink();
6105     last_idle_notification_gc_count_ = gc_count_;
6106     number_idle_notifications_ = 0;
6107     finished = true;
6108   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6109     // If we have received more than kIdlesBeforeMarkCompact idle
6110     // notifications we do not perform any cleanup because we don't
6111     // expect to gain much by doing so.
6112     finished = true;
6113   }
6114
6115   if (uncommit) UncommitFromSpace();
6116
6117   return finished;
6118 }
6119
6120
6121 #ifdef DEBUG
6122
6123 void Heap::Print() {
6124   if (!HasBeenSetUp()) return;
6125   isolate()->PrintStack(stdout);
6126   AllSpaces spaces(this);
6127   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6128     space->Print();
6129   }
6130 }
6131
6132
6133 void Heap::ReportCodeStatistics(const char* title) {
6134   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6135   PagedSpace::ResetCodeStatistics();
6136   // We do not look for code in new space, map space, or old space.  If code
6137   // somehow ends up in those spaces, we would miss it here.
6138   code_space_->CollectCodeStatistics();
6139   lo_space_->CollectCodeStatistics();
6140   PagedSpace::ReportCodeStatistics();
6141 }
6142
6143
6144 // This function expects that NewSpace's allocated objects histogram is
6145 // populated (via a call to CollectStatistics or else as a side effect of a
6146 // just-completed scavenge collection).
6147 void Heap::ReportHeapStatistics(const char* title) {
6148   USE(title);
6149   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6150          title, gc_count_);
6151   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6152          old_generation_allocation_limit_);
6153
6154   PrintF("\n");
6155   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6156   isolate_->global_handles()->PrintStats();
6157   PrintF("\n");
6158
6159   PrintF("Heap statistics : ");
6160   isolate_->memory_allocator()->ReportStatistics();
6161   PrintF("To space : ");
6162   new_space_.ReportStatistics();
6163   PrintF("Old pointer space : ");
6164   old_pointer_space_->ReportStatistics();
6165   PrintF("Old data space : ");
6166   old_data_space_->ReportStatistics();
6167   PrintF("Code space : ");
6168   code_space_->ReportStatistics();
6169   PrintF("Map space : ");
6170   map_space_->ReportStatistics();
6171   PrintF("Cell space : ");
6172   cell_space_->ReportStatistics();
6173   PrintF("PropertyCell space : ");
6174   property_cell_space_->ReportStatistics();
6175   PrintF("Large object space : ");
6176   lo_space_->ReportStatistics();
6177   PrintF(">>>>>> ========================================= >>>>>>\n");
6178 }
6179
6180 #endif  // DEBUG
6181
6182 bool Heap::Contains(HeapObject* value) {
6183   return Contains(value->address());
6184 }
6185
6186
6187 bool Heap::Contains(Address addr) {
6188   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6189   return HasBeenSetUp() &&
6190     (new_space_.ToSpaceContains(addr) ||
6191      old_pointer_space_->Contains(addr) ||
6192      old_data_space_->Contains(addr) ||
6193      code_space_->Contains(addr) ||
6194      map_space_->Contains(addr) ||
6195      cell_space_->Contains(addr) ||
6196      property_cell_space_->Contains(addr) ||
6197      lo_space_->SlowContains(addr));
6198 }
6199
6200
6201 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6202   return InSpace(value->address(), space);
6203 }
6204
6205
6206 bool Heap::InSpace(Address addr, AllocationSpace space) {
6207   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6208   if (!HasBeenSetUp()) return false;
6209
6210   switch (space) {
6211     case NEW_SPACE:
6212       return new_space_.ToSpaceContains(addr);
6213     case OLD_POINTER_SPACE:
6214       return old_pointer_space_->Contains(addr);
6215     case OLD_DATA_SPACE:
6216       return old_data_space_->Contains(addr);
6217     case CODE_SPACE:
6218       return code_space_->Contains(addr);
6219     case MAP_SPACE:
6220       return map_space_->Contains(addr);
6221     case CELL_SPACE:
6222       return cell_space_->Contains(addr);
6223     case PROPERTY_CELL_SPACE:
6224       return property_cell_space_->Contains(addr);
6225     case LO_SPACE:
6226       return lo_space_->SlowContains(addr);
6227   }
6228
6229   return false;
6230 }
6231
6232
6233 #ifdef VERIFY_HEAP
6234 void Heap::Verify() {
6235   CHECK(HasBeenSetUp());
6236
6237   store_buffer()->Verify();
6238
6239   VerifyPointersVisitor visitor;
6240   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6241
6242   new_space_.Verify();
6243
6244   old_pointer_space_->Verify(&visitor);
6245   map_space_->Verify(&visitor);
6246
6247   VerifyPointersVisitor no_dirty_regions_visitor;
6248   old_data_space_->Verify(&no_dirty_regions_visitor);
6249   code_space_->Verify(&no_dirty_regions_visitor);
6250   cell_space_->Verify(&no_dirty_regions_visitor);
6251   property_cell_space_->Verify(&no_dirty_regions_visitor);
6252
6253   lo_space_->Verify();
6254 }
6255 #endif
6256
6257
6258 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6259   Object* result = NULL;
6260   Object* new_table;
6261   { MaybeObject* maybe_new_table =
6262         string_table()->LookupUtf8String(string, &result);
6263     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6264   }
6265   // Can't use set_string_table because StringTable::cast knows that
6266   // StringTable is a singleton and checks for identity.
6267   roots_[kStringTableRootIndex] = new_table;
6268   ASSERT(result != NULL);
6269   return result;
6270 }
6271
6272
6273 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6274   Object* result = NULL;
6275   Object* new_table;
6276   { MaybeObject* maybe_new_table =
6277         string_table()->LookupOneByteString(string, &result);
6278     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6279   }
6280   // Can't use set_string_table because StringTable::cast knows that
6281   // StringTable is a singleton and checks for identity.
6282   roots_[kStringTableRootIndex] = new_table;
6283   ASSERT(result != NULL);
6284   return result;
6285 }
6286
6287
6288 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6289                                      int from,
6290                                      int length) {
6291   Object* result = NULL;
6292   Object* new_table;
6293   { MaybeObject* maybe_new_table =
6294         string_table()->LookupSubStringOneByteString(string,
6295                                                    from,
6296                                                    length,
6297                                                    &result);
6298     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6299   }
6300   // Can't use set_string_table because StringTable::cast knows that
6301   // StringTable is a singleton and checks for identity.
6302   roots_[kStringTableRootIndex] = new_table;
6303   ASSERT(result != NULL);
6304   return result;
6305 }
6306
6307
6308 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6309   Object* result = NULL;
6310   Object* new_table;
6311   { MaybeObject* maybe_new_table =
6312         string_table()->LookupTwoByteString(string, &result);
6313     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6314   }
6315   // Can't use set_string_table because StringTable::cast knows that
6316   // StringTable is a singleton and checks for identity.
6317   roots_[kStringTableRootIndex] = new_table;
6318   ASSERT(result != NULL);
6319   return result;
6320 }
6321
6322
6323 MaybeObject* Heap::InternalizeString(String* string) {
6324   if (string->IsInternalizedString()) return string;
6325   Object* result = NULL;
6326   Object* new_table;
6327   { MaybeObject* maybe_new_table =
6328         string_table()->LookupString(string, &result);
6329     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6330   }
6331   // Can't use set_string_table because StringTable::cast knows that
6332   // StringTable is a singleton and checks for identity.
6333   roots_[kStringTableRootIndex] = new_table;
6334   ASSERT(result != NULL);
6335   return result;
6336 }
6337
6338
6339 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6340   if (string->IsInternalizedString()) {
6341     *result = string;
6342     return true;
6343   }
6344   return string_table()->LookupStringIfExists(string, result);
6345 }
6346
6347
6348 void Heap::ZapFromSpace() {
6349   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6350                           new_space_.FromSpaceEnd());
6351   while (it.has_next()) {
6352     NewSpacePage* page = it.next();
6353     for (Address cursor = page->area_start(), limit = page->area_end();
6354          cursor < limit;
6355          cursor += kPointerSize) {
6356       Memory::Address_at(cursor) = kFromSpaceZapValue;
6357     }
6358   }
6359 }
6360
6361
6362 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6363                                              Address end,
6364                                              ObjectSlotCallback callback) {
6365   Address slot_address = start;
6366
6367   // We are not collecting slots on new space objects during mutation
6368   // thus we have to scan for pointers to evacuation candidates when we
6369   // promote objects. But we should not record any slots in non-black
6370   // objects. Grey object's slots would be rescanned.
6371   // White object might not survive until the end of collection
6372   // it would be a violation of the invariant to record it's slots.
6373   bool record_slots = false;
6374   if (incremental_marking()->IsCompacting()) {
6375     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6376     record_slots = Marking::IsBlack(mark_bit);
6377   }
6378
6379   while (slot_address < end) {
6380     Object** slot = reinterpret_cast<Object**>(slot_address);
6381     Object* object = *slot;
6382     // If the store buffer becomes overfull we mark pages as being exempt from
6383     // the store buffer.  These pages are scanned to find pointers that point
6384     // to the new space.  In that case we may hit newly promoted objects and
6385     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6386     if (object->IsHeapObject()) {
6387       if (Heap::InFromSpace(object)) {
6388         callback(reinterpret_cast<HeapObject**>(slot),
6389                  HeapObject::cast(object));
6390         Object* new_object = *slot;
6391         if (InNewSpace(new_object)) {
6392           SLOW_ASSERT(Heap::InToSpace(new_object));
6393           SLOW_ASSERT(new_object->IsHeapObject());
6394           store_buffer_.EnterDirectlyIntoStoreBuffer(
6395               reinterpret_cast<Address>(slot));
6396         }
6397         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6398       } else if (record_slots &&
6399                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6400         mark_compact_collector()->RecordSlot(slot, slot, object);
6401       }
6402     }
6403     slot_address += kPointerSize;
6404   }
6405 }
6406
6407
6408 #ifdef DEBUG
6409 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6410
6411
6412 bool IsAMapPointerAddress(Object** addr) {
6413   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6414   int mod = a % Map::kSize;
6415   return mod >= Map::kPointerFieldsBeginOffset &&
6416          mod < Map::kPointerFieldsEndOffset;
6417 }
6418
6419
6420 bool EverythingsAPointer(Object** addr) {
6421   return true;
6422 }
6423
6424
6425 static void CheckStoreBuffer(Heap* heap,
6426                              Object** current,
6427                              Object** limit,
6428                              Object**** store_buffer_position,
6429                              Object*** store_buffer_top,
6430                              CheckStoreBufferFilter filter,
6431                              Address special_garbage_start,
6432                              Address special_garbage_end) {
6433   Map* free_space_map = heap->free_space_map();
6434   for ( ; current < limit; current++) {
6435     Object* o = *current;
6436     Address current_address = reinterpret_cast<Address>(current);
6437     // Skip free space.
6438     if (o == free_space_map) {
6439       Address current_address = reinterpret_cast<Address>(current);
6440       FreeSpace* free_space =
6441           FreeSpace::cast(HeapObject::FromAddress(current_address));
6442       int skip = free_space->Size();
6443       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6444       ASSERT(skip > 0);
6445       current_address += skip - kPointerSize;
6446       current = reinterpret_cast<Object**>(current_address);
6447       continue;
6448     }
6449     // Skip the current linear allocation space between top and limit which is
6450     // unmarked with the free space map, but can contain junk.
6451     if (current_address == special_garbage_start &&
6452         special_garbage_end != special_garbage_start) {
6453       current_address = special_garbage_end - kPointerSize;
6454       current = reinterpret_cast<Object**>(current_address);
6455       continue;
6456     }
6457     if (!(*filter)(current)) continue;
6458     ASSERT(current_address < special_garbage_start ||
6459            current_address >= special_garbage_end);
6460     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6461     // We have to check that the pointer does not point into new space
6462     // without trying to cast it to a heap object since the hash field of
6463     // a string can contain values like 1 and 3 which are tagged null
6464     // pointers.
6465     if (!heap->InNewSpace(o)) continue;
6466     while (**store_buffer_position < current &&
6467            *store_buffer_position < store_buffer_top) {
6468       (*store_buffer_position)++;
6469     }
6470     if (**store_buffer_position != current ||
6471         *store_buffer_position == store_buffer_top) {
6472       Object** obj_start = current;
6473       while (!(*obj_start)->IsMap()) obj_start--;
6474       UNREACHABLE();
6475     }
6476   }
6477 }
6478
6479
6480 // Check that the store buffer contains all intergenerational pointers by
6481 // scanning a page and ensuring that all pointers to young space are in the
6482 // store buffer.
6483 void Heap::OldPointerSpaceCheckStoreBuffer() {
6484   OldSpace* space = old_pointer_space();
6485   PageIterator pages(space);
6486
6487   store_buffer()->SortUniq();
6488
6489   while (pages.has_next()) {
6490     Page* page = pages.next();
6491     Object** current = reinterpret_cast<Object**>(page->area_start());
6492
6493     Address end = page->area_end();
6494
6495     Object*** store_buffer_position = store_buffer()->Start();
6496     Object*** store_buffer_top = store_buffer()->Top();
6497
6498     Object** limit = reinterpret_cast<Object**>(end);
6499     CheckStoreBuffer(this,
6500                      current,
6501                      limit,
6502                      &store_buffer_position,
6503                      store_buffer_top,
6504                      &EverythingsAPointer,
6505                      space->top(),
6506                      space->limit());
6507   }
6508 }
6509
6510
6511 void Heap::MapSpaceCheckStoreBuffer() {
6512   MapSpace* space = map_space();
6513   PageIterator pages(space);
6514
6515   store_buffer()->SortUniq();
6516
6517   while (pages.has_next()) {
6518     Page* page = pages.next();
6519     Object** current = reinterpret_cast<Object**>(page->area_start());
6520
6521     Address end = page->area_end();
6522
6523     Object*** store_buffer_position = store_buffer()->Start();
6524     Object*** store_buffer_top = store_buffer()->Top();
6525
6526     Object** limit = reinterpret_cast<Object**>(end);
6527     CheckStoreBuffer(this,
6528                      current,
6529                      limit,
6530                      &store_buffer_position,
6531                      store_buffer_top,
6532                      &IsAMapPointerAddress,
6533                      space->top(),
6534                      space->limit());
6535   }
6536 }
6537
6538
6539 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6540   LargeObjectIterator it(lo_space());
6541   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6542     // We only have code, sequential strings, or fixed arrays in large
6543     // object space, and only fixed arrays can possibly contain pointers to
6544     // the young generation.
6545     if (object->IsFixedArray()) {
6546       Object*** store_buffer_position = store_buffer()->Start();
6547       Object*** store_buffer_top = store_buffer()->Top();
6548       Object** current = reinterpret_cast<Object**>(object->address());
6549       Object** limit =
6550           reinterpret_cast<Object**>(object->address() + object->Size());
6551       CheckStoreBuffer(this,
6552                        current,
6553                        limit,
6554                        &store_buffer_position,
6555                        store_buffer_top,
6556                        &EverythingsAPointer,
6557                        NULL,
6558                        NULL);
6559     }
6560   }
6561 }
6562 #endif
6563
6564
6565 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6566   IterateStrongRoots(v, mode);
6567   IterateWeakRoots(v, mode);
6568 }
6569
6570
6571 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6572   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6573   v->Synchronize(VisitorSynchronization::kStringTable);
6574   if (mode != VISIT_ALL_IN_SCAVENGE &&
6575       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6576     // Scavenge collections have special processing for this.
6577     external_string_table_.Iterate(v);
6578     error_object_list_.Iterate(v);
6579   }
6580   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6581 }
6582
6583
6584 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6585   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6586   v->Synchronize(VisitorSynchronization::kStrongRootList);
6587
6588   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6589   v->Synchronize(VisitorSynchronization::kInternalizedString);
6590
6591   isolate_->bootstrapper()->Iterate(v);
6592   v->Synchronize(VisitorSynchronization::kBootstrapper);
6593   isolate_->Iterate(v);
6594   v->Synchronize(VisitorSynchronization::kTop);
6595   Relocatable::Iterate(v);
6596   v->Synchronize(VisitorSynchronization::kRelocatable);
6597
6598 #ifdef ENABLE_DEBUGGER_SUPPORT
6599   isolate_->debug()->Iterate(v);
6600   if (isolate_->deoptimizer_data() != NULL) {
6601     isolate_->deoptimizer_data()->Iterate(v);
6602   }
6603 #endif
6604   v->Synchronize(VisitorSynchronization::kDebug);
6605   isolate_->compilation_cache()->Iterate(v);
6606   v->Synchronize(VisitorSynchronization::kCompilationCache);
6607
6608   // Iterate over local handles in handle scopes.
6609   isolate_->handle_scope_implementer()->Iterate(v);
6610   isolate_->IterateDeferredHandles(v);
6611   v->Synchronize(VisitorSynchronization::kHandleScope);
6612
6613   // Iterate over the builtin code objects and code stubs in the
6614   // heap. Note that it is not necessary to iterate over code objects
6615   // on scavenge collections.
6616   if (mode != VISIT_ALL_IN_SCAVENGE) {
6617     isolate_->builtins()->IterateBuiltins(v);
6618   }
6619   v->Synchronize(VisitorSynchronization::kBuiltins);
6620
6621   // Iterate over global handles.
6622   switch (mode) {
6623     case VISIT_ONLY_STRONG:
6624       isolate_->global_handles()->IterateStrongRoots(v);
6625       break;
6626     case VISIT_ALL_IN_SCAVENGE:
6627       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6628       break;
6629     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6630     case VISIT_ALL:
6631       isolate_->global_handles()->IterateAllRoots(v);
6632       break;
6633   }
6634   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6635
6636   // Iterate over pointers being held by inactive threads.
6637   isolate_->thread_manager()->Iterate(v);
6638   v->Synchronize(VisitorSynchronization::kThreadManager);
6639
6640   // Iterate over the pointers the Serialization/Deserialization code is
6641   // holding.
6642   // During garbage collection this keeps the partial snapshot cache alive.
6643   // During deserialization of the startup snapshot this creates the partial
6644   // snapshot cache and deserializes the objects it refers to.  During
6645   // serialization this does nothing, since the partial snapshot cache is
6646   // empty.  However the next thing we do is create the partial snapshot,
6647   // filling up the partial snapshot cache with objects it needs as we go.
6648   SerializerDeserializer::Iterate(v);
6649   // We don't do a v->Synchronize call here, because in debug mode that will
6650   // output a flag to the snapshot.  However at this point the serializer and
6651   // deserializer are deliberately a little unsynchronized (see above) so the
6652   // checking of the sync flag in the snapshot would fail.
6653 }
6654
6655
6656 // TODO(1236194): Since the heap size is configurable on the command line
6657 // and through the API, we should gracefully handle the case that the heap
6658 // size is not big enough to fit all the initial objects.
6659 bool Heap::ConfigureHeap(int max_semispace_size,
6660                          intptr_t max_old_gen_size,
6661                          intptr_t max_executable_size) {
6662   if (HasBeenSetUp()) return false;
6663
6664   if (FLAG_stress_compaction) {
6665     // This will cause more frequent GCs when stressing.
6666     max_semispace_size_ = Page::kPageSize;
6667   }
6668
6669   if (max_semispace_size > 0) {
6670     if (max_semispace_size < Page::kPageSize) {
6671       max_semispace_size = Page::kPageSize;
6672       if (FLAG_trace_gc) {
6673         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6674                  Page::kPageSize >> 10);
6675       }
6676     }
6677     max_semispace_size_ = max_semispace_size;
6678   }
6679
6680   if (Snapshot::IsEnabled()) {
6681     // If we are using a snapshot we always reserve the default amount
6682     // of memory for each semispace because code in the snapshot has
6683     // write-barrier code that relies on the size and alignment of new
6684     // space.  We therefore cannot use a larger max semispace size
6685     // than the default reserved semispace size.
6686     if (max_semispace_size_ > reserved_semispace_size_) {
6687       max_semispace_size_ = reserved_semispace_size_;
6688       if (FLAG_trace_gc) {
6689         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6690                  reserved_semispace_size_ >> 10);
6691       }
6692     }
6693   } else {
6694     // If we are not using snapshots we reserve space for the actual
6695     // max semispace size.
6696     reserved_semispace_size_ = max_semispace_size_;
6697   }
6698
6699   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6700   if (max_executable_size > 0) {
6701     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6702   }
6703
6704   // The max executable size must be less than or equal to the max old
6705   // generation size.
6706   if (max_executable_size_ > max_old_generation_size_) {
6707     max_executable_size_ = max_old_generation_size_;
6708   }
6709
6710   // The new space size must be a power of two to support single-bit testing
6711   // for containment.
6712   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6713   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6714   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6715
6716   // The external allocation limit should be below 256 MB on all architectures
6717   // to avoid unnecessary low memory notifications, as that is the threshold
6718   // for some embedders.
6719   external_allocation_limit_ = 12 * max_semispace_size_;
6720   ASSERT(external_allocation_limit_ <= 256 * MB);
6721
6722   // The old generation is paged and needs at least one page for each space.
6723   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6724   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6725                                                        Page::kPageSize),
6726                                  RoundUp(max_old_generation_size_,
6727                                          Page::kPageSize));
6728
6729   configured_ = true;
6730   return true;
6731 }
6732
6733
6734 bool Heap::ConfigureHeapDefault() {
6735   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6736                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6737                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6738 }
6739
6740
6741 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6742   *stats->start_marker = HeapStats::kStartMarker;
6743   *stats->end_marker = HeapStats::kEndMarker;
6744   *stats->new_space_size = new_space_.SizeAsInt();
6745   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6746   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6747   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6748   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6749   *stats->old_data_space_capacity = old_data_space_->Capacity();
6750   *stats->code_space_size = code_space_->SizeOfObjects();
6751   *stats->code_space_capacity = code_space_->Capacity();
6752   *stats->map_space_size = map_space_->SizeOfObjects();
6753   *stats->map_space_capacity = map_space_->Capacity();
6754   *stats->cell_space_size = cell_space_->SizeOfObjects();
6755   *stats->cell_space_capacity = cell_space_->Capacity();
6756   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6757   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6758   *stats->lo_space_size = lo_space_->Size();
6759   isolate_->global_handles()->RecordStats(stats);
6760   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6761   *stats->memory_allocator_capacity =
6762       isolate()->memory_allocator()->Size() +
6763       isolate()->memory_allocator()->Available();
6764   *stats->os_error = OS::GetLastError();
6765       isolate()->memory_allocator()->Available();
6766   if (take_snapshot) {
6767     HeapIterator iterator(this);
6768     for (HeapObject* obj = iterator.next();
6769          obj != NULL;
6770          obj = iterator.next()) {
6771       InstanceType type = obj->map()->instance_type();
6772       ASSERT(0 <= type && type <= LAST_TYPE);
6773       stats->objects_per_type[type]++;
6774       stats->size_per_type[type] += obj->Size();
6775     }
6776   }
6777 }
6778
6779
6780 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6781   return old_pointer_space_->SizeOfObjects()
6782       + old_data_space_->SizeOfObjects()
6783       + code_space_->SizeOfObjects()
6784       + map_space_->SizeOfObjects()
6785       + cell_space_->SizeOfObjects()
6786       + property_cell_space_->SizeOfObjects()
6787       + lo_space_->SizeOfObjects();
6788 }
6789
6790
6791 intptr_t Heap::PromotedExternalMemorySize() {
6792   if (amount_of_external_allocated_memory_
6793       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6794   return amount_of_external_allocated_memory_
6795       - amount_of_external_allocated_memory_at_last_global_gc_;
6796 }
6797
6798
6799 V8_DECLARE_ONCE(initialize_gc_once);
6800
6801 static void InitializeGCOnce() {
6802   InitializeScavengingVisitorsTables();
6803   NewSpaceScavenger::Initialize();
6804   MarkCompactCollector::Initialize();
6805 }
6806
6807
6808 bool Heap::SetUp() {
6809 #ifdef DEBUG
6810   allocation_timeout_ = FLAG_gc_interval;
6811 #endif
6812
6813   // Initialize heap spaces and initial maps and objects. Whenever something
6814   // goes wrong, just return false. The caller should check the results and
6815   // call Heap::TearDown() to release allocated memory.
6816   //
6817   // If the heap is not yet configured (e.g. through the API), configure it.
6818   // Configuration is based on the flags new-space-size (really the semispace
6819   // size) and old-space-size if set or the initial values of semispace_size_
6820   // and old_generation_size_ otherwise.
6821   if (!configured_) {
6822     if (!ConfigureHeapDefault()) return false;
6823   }
6824
6825   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6826
6827   MarkMapPointersAsEncoded(false);
6828
6829   // Set up memory allocator.
6830   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6831       return false;
6832
6833   // Set up new space.
6834   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6835     return false;
6836   }
6837
6838   // Initialize old pointer space.
6839   old_pointer_space_ =
6840       new OldSpace(this,
6841                    max_old_generation_size_,
6842                    OLD_POINTER_SPACE,
6843                    NOT_EXECUTABLE);
6844   if (old_pointer_space_ == NULL) return false;
6845   if (!old_pointer_space_->SetUp()) return false;
6846
6847   // Initialize old data space.
6848   old_data_space_ =
6849       new OldSpace(this,
6850                    max_old_generation_size_,
6851                    OLD_DATA_SPACE,
6852                    NOT_EXECUTABLE);
6853   if (old_data_space_ == NULL) return false;
6854   if (!old_data_space_->SetUp()) return false;
6855
6856   // Initialize the code space, set its maximum capacity to the old
6857   // generation size. It needs executable memory.
6858   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6859   // virtual address space, so that they can call each other with near calls.
6860   if (code_range_size_ > 0) {
6861     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6862       return false;
6863     }
6864   }
6865
6866   code_space_ =
6867       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6868   if (code_space_ == NULL) return false;
6869   if (!code_space_->SetUp()) return false;
6870
6871   // Initialize map space.
6872   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6873   if (map_space_ == NULL) return false;
6874   if (!map_space_->SetUp()) return false;
6875
6876   // Initialize simple cell space.
6877   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6878   if (cell_space_ == NULL) return false;
6879   if (!cell_space_->SetUp()) return false;
6880
6881   // Initialize global property cell space.
6882   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6883                                                PROPERTY_CELL_SPACE);
6884   if (property_cell_space_ == NULL) return false;
6885   if (!property_cell_space_->SetUp()) return false;
6886
6887   // The large object code space may contain code or data.  We set the memory
6888   // to be non-executable here for safety, but this means we need to enable it
6889   // explicitly when allocating large code objects.
6890   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6891   if (lo_space_ == NULL) return false;
6892   if (!lo_space_->SetUp()) return false;
6893
6894   // Set up the seed that is used to randomize the string hash function.
6895   ASSERT(hash_seed() == 0);
6896   if (FLAG_randomize_hashes) {
6897     if (FLAG_hash_seed == 0) {
6898       set_hash_seed(
6899           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6900     } else {
6901       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6902     }
6903   }
6904
6905   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6906   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6907
6908   store_buffer()->SetUp();
6909
6910   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6911 #ifdef DEBUG
6912   relocation_mutex_locked_by_optimizer_thread_ = false;
6913 #endif  // DEBUG
6914
6915   return true;
6916 }
6917
6918
6919 bool Heap::CreateHeapObjects() {
6920   // Create initial maps.
6921   if (!CreateInitialMaps()) return false;
6922   if (!CreateApiObjects()) return false;
6923
6924   // Create initial objects
6925   if (!CreateInitialObjects()) return false;
6926
6927   native_contexts_list_ = undefined_value();
6928   array_buffers_list_ = undefined_value();
6929   allocation_sites_list_ = undefined_value();
6930   return true;
6931 }
6932
6933
6934 void Heap::SetStackLimits() {
6935   ASSERT(isolate_ != NULL);
6936   ASSERT(isolate_ == isolate());
6937   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6938   // something that looks like an out of range Smi to the GC.
6939
6940   // Set up the special root array entries containing the stack limits.
6941   // These are actually addresses, but the tag makes the GC ignore it.
6942   roots_[kStackLimitRootIndex] =
6943       reinterpret_cast<Object*>(
6944           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6945   roots_[kRealStackLimitRootIndex] =
6946       reinterpret_cast<Object*>(
6947           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6948 }
6949
6950
6951 void Heap::TearDown() {
6952 #ifdef VERIFY_HEAP
6953   if (FLAG_verify_heap) {
6954     Verify();
6955   }
6956 #endif
6957
6958   if (FLAG_print_cumulative_gc_stat) {
6959     PrintF("\n");
6960     PrintF("gc_count=%d ", gc_count_);
6961     PrintF("mark_sweep_count=%d ", ms_count_);
6962     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6963     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6964     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6965     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6966            get_max_alive_after_gc());
6967     PrintF("total_marking_time=%.1f ", marking_time());
6968     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6969     PrintF("\n\n");
6970   }
6971
6972   TearDownArrayBuffers();
6973
6974   isolate_->global_handles()->TearDown();
6975
6976   external_string_table_.TearDown();
6977
6978   error_object_list_.TearDown();
6979
6980   new_space_.TearDown();
6981
6982   if (old_pointer_space_ != NULL) {
6983     old_pointer_space_->TearDown();
6984     delete old_pointer_space_;
6985     old_pointer_space_ = NULL;
6986   }
6987
6988   if (old_data_space_ != NULL) {
6989     old_data_space_->TearDown();
6990     delete old_data_space_;
6991     old_data_space_ = NULL;
6992   }
6993
6994   if (code_space_ != NULL) {
6995     code_space_->TearDown();
6996     delete code_space_;
6997     code_space_ = NULL;
6998   }
6999
7000   if (map_space_ != NULL) {
7001     map_space_->TearDown();
7002     delete map_space_;
7003     map_space_ = NULL;
7004   }
7005
7006   if (cell_space_ != NULL) {
7007     cell_space_->TearDown();
7008     delete cell_space_;
7009     cell_space_ = NULL;
7010   }
7011
7012   if (property_cell_space_ != NULL) {
7013     property_cell_space_->TearDown();
7014     delete property_cell_space_;
7015     property_cell_space_ = NULL;
7016   }
7017
7018   if (lo_space_ != NULL) {
7019     lo_space_->TearDown();
7020     delete lo_space_;
7021     lo_space_ = NULL;
7022   }
7023
7024   store_buffer()->TearDown();
7025   incremental_marking()->TearDown();
7026
7027   isolate_->memory_allocator()->TearDown();
7028
7029   delete relocation_mutex_;
7030 }
7031
7032
7033 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7034   ASSERT(callback != NULL);
7035   GCPrologueCallbackPair pair(callback, gc_type);
7036   ASSERT(!gc_prologue_callbacks_.Contains(pair));
7037   return gc_prologue_callbacks_.Add(pair);
7038 }
7039
7040
7041 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7042   ASSERT(callback != NULL);
7043   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7044     if (gc_prologue_callbacks_[i].callback == callback) {
7045       gc_prologue_callbacks_.Remove(i);
7046       return;
7047     }
7048   }
7049   UNREACHABLE();
7050 }
7051
7052
7053 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7054   ASSERT(callback != NULL);
7055   GCEpilogueCallbackPair pair(callback, gc_type);
7056   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7057   return gc_epilogue_callbacks_.Add(pair);
7058 }
7059
7060
7061 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7062   ASSERT(callback != NULL);
7063   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7064     if (gc_epilogue_callbacks_[i].callback == callback) {
7065       gc_epilogue_callbacks_.Remove(i);
7066       return;
7067     }
7068   }
7069   UNREACHABLE();
7070 }
7071
7072
7073 #ifdef DEBUG
7074
7075 class PrintHandleVisitor: public ObjectVisitor {
7076  public:
7077   void VisitPointers(Object** start, Object** end) {
7078     for (Object** p = start; p < end; p++)
7079       PrintF("  handle %p to %p\n",
7080              reinterpret_cast<void*>(p),
7081              reinterpret_cast<void*>(*p));
7082   }
7083 };
7084
7085
7086 void Heap::PrintHandles() {
7087   PrintF("Handles:\n");
7088   PrintHandleVisitor v;
7089   isolate_->handle_scope_implementer()->Iterate(&v);
7090 }
7091
7092 #endif
7093
7094
7095 Space* AllSpaces::next() {
7096   switch (counter_++) {
7097     case NEW_SPACE:
7098       return heap_->new_space();
7099     case OLD_POINTER_SPACE:
7100       return heap_->old_pointer_space();
7101     case OLD_DATA_SPACE:
7102       return heap_->old_data_space();
7103     case CODE_SPACE:
7104       return heap_->code_space();
7105     case MAP_SPACE:
7106       return heap_->map_space();
7107     case CELL_SPACE:
7108       return heap_->cell_space();
7109     case PROPERTY_CELL_SPACE:
7110       return heap_->property_cell_space();
7111     case LO_SPACE:
7112       return heap_->lo_space();
7113     default:
7114       return NULL;
7115   }
7116 }
7117
7118
7119 PagedSpace* PagedSpaces::next() {
7120   switch (counter_++) {
7121     case OLD_POINTER_SPACE:
7122       return heap_->old_pointer_space();
7123     case OLD_DATA_SPACE:
7124       return heap_->old_data_space();
7125     case CODE_SPACE:
7126       return heap_->code_space();
7127     case MAP_SPACE:
7128       return heap_->map_space();
7129     case CELL_SPACE:
7130       return heap_->cell_space();
7131     case PROPERTY_CELL_SPACE:
7132       return heap_->property_cell_space();
7133     default:
7134       return NULL;
7135   }
7136 }
7137
7138
7139
7140 OldSpace* OldSpaces::next() {
7141   switch (counter_++) {
7142     case OLD_POINTER_SPACE:
7143       return heap_->old_pointer_space();
7144     case OLD_DATA_SPACE:
7145       return heap_->old_data_space();
7146     case CODE_SPACE:
7147       return heap_->code_space();
7148     default:
7149       return NULL;
7150   }
7151 }
7152
7153
7154 SpaceIterator::SpaceIterator(Heap* heap)
7155     : heap_(heap),
7156       current_space_(FIRST_SPACE),
7157       iterator_(NULL),
7158       size_func_(NULL) {
7159 }
7160
7161
7162 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7163     : heap_(heap),
7164       current_space_(FIRST_SPACE),
7165       iterator_(NULL),
7166       size_func_(size_func) {
7167 }
7168
7169
7170 SpaceIterator::~SpaceIterator() {
7171   // Delete active iterator if any.
7172   delete iterator_;
7173 }
7174
7175
7176 bool SpaceIterator::has_next() {
7177   // Iterate until no more spaces.
7178   return current_space_ != LAST_SPACE;
7179 }
7180
7181
7182 ObjectIterator* SpaceIterator::next() {
7183   if (iterator_ != NULL) {
7184     delete iterator_;
7185     iterator_ = NULL;
7186     // Move to the next space
7187     current_space_++;
7188     if (current_space_ > LAST_SPACE) {
7189       return NULL;
7190     }
7191   }
7192
7193   // Return iterator for the new current space.
7194   return CreateIterator();
7195 }
7196
7197
7198 // Create an iterator for the space to iterate.
7199 ObjectIterator* SpaceIterator::CreateIterator() {
7200   ASSERT(iterator_ == NULL);
7201
7202   switch (current_space_) {
7203     case NEW_SPACE:
7204       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7205       break;
7206     case OLD_POINTER_SPACE:
7207       iterator_ =
7208           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7209       break;
7210     case OLD_DATA_SPACE:
7211       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7212       break;
7213     case CODE_SPACE:
7214       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7215       break;
7216     case MAP_SPACE:
7217       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7218       break;
7219     case CELL_SPACE:
7220       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7221       break;
7222     case PROPERTY_CELL_SPACE:
7223       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7224                                          size_func_);
7225       break;
7226     case LO_SPACE:
7227       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7228       break;
7229   }
7230
7231   // Return the newly allocated iterator;
7232   ASSERT(iterator_ != NULL);
7233   return iterator_;
7234 }
7235
7236
7237 class HeapObjectsFilter {
7238  public:
7239   virtual ~HeapObjectsFilter() {}
7240   virtual bool SkipObject(HeapObject* object) = 0;
7241 };
7242
7243
7244 class UnreachableObjectsFilter : public HeapObjectsFilter {
7245  public:
7246   UnreachableObjectsFilter() {
7247     MarkReachableObjects();
7248   }
7249
7250   ~UnreachableObjectsFilter() {
7251     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7252   }
7253
7254   bool SkipObject(HeapObject* object) {
7255     MarkBit mark_bit = Marking::MarkBitFrom(object);
7256     return !mark_bit.Get();
7257   }
7258
7259  private:
7260   class MarkingVisitor : public ObjectVisitor {
7261    public:
7262     MarkingVisitor() : marking_stack_(10) {}
7263
7264     void VisitPointers(Object** start, Object** end) {
7265       for (Object** p = start; p < end; p++) {
7266         if (!(*p)->IsHeapObject()) continue;
7267         HeapObject* obj = HeapObject::cast(*p);
7268         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7269         if (!mark_bit.Get()) {
7270           mark_bit.Set();
7271           marking_stack_.Add(obj);
7272         }
7273       }
7274     }
7275
7276     void TransitiveClosure() {
7277       while (!marking_stack_.is_empty()) {
7278         HeapObject* obj = marking_stack_.RemoveLast();
7279         obj->Iterate(this);
7280       }
7281     }
7282
7283    private:
7284     List<HeapObject*> marking_stack_;
7285   };
7286
7287   void MarkReachableObjects() {
7288     Heap* heap = Isolate::Current()->heap();
7289     MarkingVisitor visitor;
7290     heap->IterateRoots(&visitor, VISIT_ALL);
7291     visitor.TransitiveClosure();
7292   }
7293
7294   DisallowHeapAllocation no_allocation_;
7295 };
7296
7297
7298 HeapIterator::HeapIterator(Heap* heap)
7299     : heap_(heap),
7300       filtering_(HeapIterator::kNoFiltering),
7301       filter_(NULL) {
7302   Init();
7303 }
7304
7305
7306 HeapIterator::HeapIterator(Heap* heap,
7307                            HeapIterator::HeapObjectsFiltering filtering)
7308     : heap_(heap),
7309       filtering_(filtering),
7310       filter_(NULL) {
7311   Init();
7312 }
7313
7314
7315 HeapIterator::~HeapIterator() {
7316   Shutdown();
7317 }
7318
7319
7320 void HeapIterator::Init() {
7321   // Start the iteration.
7322   space_iterator_ = new SpaceIterator(heap_);
7323   switch (filtering_) {
7324     case kFilterUnreachable:
7325       filter_ = new UnreachableObjectsFilter;
7326       break;
7327     default:
7328       break;
7329   }
7330   object_iterator_ = space_iterator_->next();
7331 }
7332
7333
7334 void HeapIterator::Shutdown() {
7335 #ifdef DEBUG
7336   // Assert that in filtering mode we have iterated through all
7337   // objects. Otherwise, heap will be left in an inconsistent state.
7338   if (filtering_ != kNoFiltering) {
7339     ASSERT(object_iterator_ == NULL);
7340   }
7341 #endif
7342   // Make sure the last iterator is deallocated.
7343   delete space_iterator_;
7344   space_iterator_ = NULL;
7345   object_iterator_ = NULL;
7346   delete filter_;
7347   filter_ = NULL;
7348 }
7349
7350
7351 HeapObject* HeapIterator::next() {
7352   if (filter_ == NULL) return NextObject();
7353
7354   HeapObject* obj = NextObject();
7355   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7356   return obj;
7357 }
7358
7359
7360 HeapObject* HeapIterator::NextObject() {
7361   // No iterator means we are done.
7362   if (object_iterator_ == NULL) return NULL;
7363
7364   if (HeapObject* obj = object_iterator_->next_object()) {
7365     // If the current iterator has more objects we are fine.
7366     return obj;
7367   } else {
7368     // Go though the spaces looking for one that has objects.
7369     while (space_iterator_->has_next()) {
7370       object_iterator_ = space_iterator_->next();
7371       if (HeapObject* obj = object_iterator_->next_object()) {
7372         return obj;
7373       }
7374     }
7375   }
7376   // Done with the last space.
7377   object_iterator_ = NULL;
7378   return NULL;
7379 }
7380
7381
7382 void HeapIterator::reset() {
7383   // Restart the iterator.
7384   Shutdown();
7385   Init();
7386 }
7387
7388
7389 #ifdef DEBUG
7390
7391 Object* const PathTracer::kAnyGlobalObject = NULL;
7392
7393 class PathTracer::MarkVisitor: public ObjectVisitor {
7394  public:
7395   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7396   void VisitPointers(Object** start, Object** end) {
7397     // Scan all HeapObject pointers in [start, end)
7398     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7399       if ((*p)->IsHeapObject())
7400         tracer_->MarkRecursively(p, this);
7401     }
7402   }
7403
7404  private:
7405   PathTracer* tracer_;
7406 };
7407
7408
7409 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7410  public:
7411   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7412   void VisitPointers(Object** start, Object** end) {
7413     // Scan all HeapObject pointers in [start, end)
7414     for (Object** p = start; p < end; p++) {
7415       if ((*p)->IsHeapObject())
7416         tracer_->UnmarkRecursively(p, this);
7417     }
7418   }
7419
7420  private:
7421   PathTracer* tracer_;
7422 };
7423
7424
7425 void PathTracer::VisitPointers(Object** start, Object** end) {
7426   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7427   // Visit all HeapObject pointers in [start, end)
7428   for (Object** p = start; !done && (p < end); p++) {
7429     if ((*p)->IsHeapObject()) {
7430       TracePathFrom(p);
7431       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7432     }
7433   }
7434 }
7435
7436
7437 void PathTracer::Reset() {
7438   found_target_ = false;
7439   object_stack_.Clear();
7440 }
7441
7442
7443 void PathTracer::TracePathFrom(Object** root) {
7444   ASSERT((search_target_ == kAnyGlobalObject) ||
7445          search_target_->IsHeapObject());
7446   found_target_in_trace_ = false;
7447   Reset();
7448
7449   MarkVisitor mark_visitor(this);
7450   MarkRecursively(root, &mark_visitor);
7451
7452   UnmarkVisitor unmark_visitor(this);
7453   UnmarkRecursively(root, &unmark_visitor);
7454
7455   ProcessResults();
7456 }
7457
7458
7459 static bool SafeIsNativeContext(HeapObject* obj) {
7460   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7461 }
7462
7463
7464 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7465   if (!(*p)->IsHeapObject()) return;
7466
7467   HeapObject* obj = HeapObject::cast(*p);
7468
7469   Object* map = obj->map();
7470
7471   if (!map->IsHeapObject()) return;  // visited before
7472
7473   if (found_target_in_trace_) return;  // stop if target found
7474   object_stack_.Add(obj);
7475   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7476       (obj == search_target_)) {
7477     found_target_in_trace_ = true;
7478     found_target_ = true;
7479     return;
7480   }
7481
7482   bool is_native_context = SafeIsNativeContext(obj);
7483
7484   // not visited yet
7485   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7486
7487   Address map_addr = map_p->address();
7488
7489   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7490
7491   // Scan the object body.
7492   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7493     // This is specialized to scan Context's properly.
7494     Object** start = reinterpret_cast<Object**>(obj->address() +
7495                                                 Context::kHeaderSize);
7496     Object** end = reinterpret_cast<Object**>(obj->address() +
7497         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7498     mark_visitor->VisitPointers(start, end);
7499   } else {
7500     obj->IterateBody(map_p->instance_type(),
7501                      obj->SizeFromMap(map_p),
7502                      mark_visitor);
7503   }
7504
7505   // Scan the map after the body because the body is a lot more interesting
7506   // when doing leak detection.
7507   MarkRecursively(&map, mark_visitor);
7508
7509   if (!found_target_in_trace_)  // don't pop if found the target
7510     object_stack_.RemoveLast();
7511 }
7512
7513
7514 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7515   if (!(*p)->IsHeapObject()) return;
7516
7517   HeapObject* obj = HeapObject::cast(*p);
7518
7519   Object* map = obj->map();
7520
7521   if (map->IsHeapObject()) return;  // unmarked already
7522
7523   Address map_addr = reinterpret_cast<Address>(map);
7524
7525   map_addr -= kMarkTag;
7526
7527   ASSERT_TAG_ALIGNED(map_addr);
7528
7529   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7530
7531   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7532
7533   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7534
7535   obj->IterateBody(Map::cast(map_p)->instance_type(),
7536                    obj->SizeFromMap(Map::cast(map_p)),
7537                    unmark_visitor);
7538 }
7539
7540
7541 void PathTracer::ProcessResults() {
7542   if (found_target_) {
7543     PrintF("=====================================\n");
7544     PrintF("====        Path to object       ====\n");
7545     PrintF("=====================================\n\n");
7546
7547     ASSERT(!object_stack_.is_empty());
7548     for (int i = 0; i < object_stack_.length(); i++) {
7549       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7550       Object* obj = object_stack_[i];
7551       obj->Print();
7552     }
7553     PrintF("=====================================\n");
7554   }
7555 }
7556
7557
7558 // Triggers a depth-first traversal of reachable objects from one
7559 // given root object and finds a path to a specific heap object and
7560 // prints it.
7561 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7562   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7563   tracer.VisitPointer(&root);
7564 }
7565
7566
7567 // Triggers a depth-first traversal of reachable objects from roots
7568 // and finds a path to a specific heap object and prints it.
7569 void Heap::TracePathToObject(Object* target) {
7570   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7571   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7572 }
7573
7574
7575 // Triggers a depth-first traversal of reachable objects from roots
7576 // and finds a path to any global object and prints it. Useful for
7577 // determining the source for leaks of global objects.
7578 void Heap::TracePathToGlobal() {
7579   PathTracer tracer(PathTracer::kAnyGlobalObject,
7580                     PathTracer::FIND_ALL,
7581                     VISIT_ALL);
7582   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7583 }
7584 #endif
7585
7586
7587 static intptr_t CountTotalHolesSize(Heap* heap) {
7588   intptr_t holes_size = 0;
7589   OldSpaces spaces(heap);
7590   for (OldSpace* space = spaces.next();
7591        space != NULL;
7592        space = spaces.next()) {
7593     holes_size += space->Waste() + space->Available();
7594   }
7595   return holes_size;
7596 }
7597
7598
7599 GCTracer::GCTracer(Heap* heap,
7600                    const char* gc_reason,
7601                    const char* collector_reason)
7602     : start_time_(0.0),
7603       start_object_size_(0),
7604       start_memory_size_(0),
7605       gc_count_(0),
7606       full_gc_count_(0),
7607       allocated_since_last_gc_(0),
7608       spent_in_mutator_(0),
7609       promoted_objects_size_(0),
7610       nodes_died_in_new_space_(0),
7611       nodes_copied_in_new_space_(0),
7612       nodes_promoted_(0),
7613       heap_(heap),
7614       gc_reason_(gc_reason),
7615       collector_reason_(collector_reason) {
7616   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7617   start_time_ = OS::TimeCurrentMillis();
7618   start_object_size_ = heap_->SizeOfObjects();
7619   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7620
7621   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7622     scopes_[i] = 0;
7623   }
7624
7625   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7626
7627   allocated_since_last_gc_ =
7628       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7629
7630   if (heap_->last_gc_end_timestamp_ > 0) {
7631     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7632   }
7633
7634   steps_count_ = heap_->incremental_marking()->steps_count();
7635   steps_took_ = heap_->incremental_marking()->steps_took();
7636   longest_step_ = heap_->incremental_marking()->longest_step();
7637   steps_count_since_last_gc_ =
7638       heap_->incremental_marking()->steps_count_since_last_gc();
7639   steps_took_since_last_gc_ =
7640       heap_->incremental_marking()->steps_took_since_last_gc();
7641 }
7642
7643
7644 GCTracer::~GCTracer() {
7645   // Printf ONE line iff flag is set.
7646   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7647
7648   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7649
7650   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7651   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7652
7653   double time = heap_->last_gc_end_timestamp_ - start_time_;
7654
7655   // Update cumulative GC statistics if required.
7656   if (FLAG_print_cumulative_gc_stat) {
7657     heap_->total_gc_time_ms_ += time;
7658     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7659     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7660                                      heap_->alive_after_last_gc_);
7661     if (!first_gc) {
7662       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7663                                    spent_in_mutator_);
7664     }
7665   } else if (FLAG_trace_gc_verbose) {
7666     heap_->total_gc_time_ms_ += time;
7667   }
7668
7669   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7670
7671   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7672
7673   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7674   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7675
7676   if (!FLAG_trace_gc_nvp) {
7677     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7678
7679     double end_memory_size_mb =
7680         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7681
7682     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7683            CollectorString(),
7684            static_cast<double>(start_object_size_) / MB,
7685            static_cast<double>(start_memory_size_) / MB,
7686            SizeOfHeapObjects(),
7687            end_memory_size_mb);
7688
7689     if (external_time > 0) PrintF("%d / ", external_time);
7690     PrintF("%.1f ms", time);
7691     if (steps_count_ > 0) {
7692       if (collector_ == SCAVENGER) {
7693         PrintF(" (+ %.1f ms in %d steps since last GC)",
7694                steps_took_since_last_gc_,
7695                steps_count_since_last_gc_);
7696       } else {
7697         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7698                    "biggest step %.1f ms)",
7699                steps_took_,
7700                steps_count_,
7701                longest_step_);
7702       }
7703     }
7704
7705     if (gc_reason_ != NULL) {
7706       PrintF(" [%s]", gc_reason_);
7707     }
7708
7709     if (collector_reason_ != NULL) {
7710       PrintF(" [%s]", collector_reason_);
7711     }
7712
7713     PrintF(".\n");
7714   } else {
7715     PrintF("pause=%.1f ", time);
7716     PrintF("mutator=%.1f ", spent_in_mutator_);
7717     PrintF("gc=");
7718     switch (collector_) {
7719       case SCAVENGER:
7720         PrintF("s");
7721         break;
7722       case MARK_COMPACTOR:
7723         PrintF("ms");
7724         break;
7725       default:
7726         UNREACHABLE();
7727     }
7728     PrintF(" ");
7729
7730     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7731     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7732     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7733     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7734     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7735     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7736     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7737     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7738     PrintF("compaction_ptrs=%.1f ",
7739         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7740     PrintF("intracompaction_ptrs=%.1f ",
7741         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7742     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7743     PrintF("weakcollection_process=%.1f ",
7744         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7745     PrintF("weakcollection_clear=%.1f ",
7746         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7747
7748     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7749     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7750     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7751            in_free_list_or_wasted_before_gc_);
7752     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7753
7754     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7755     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7756     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7757     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7758     PrintF("nodes_promoted=%d ", nodes_promoted_);
7759
7760     if (collector_ == SCAVENGER) {
7761       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7762       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7763     } else {
7764       PrintF("stepscount=%d ", steps_count_);
7765       PrintF("stepstook=%.1f ", steps_took_);
7766       PrintF("longeststep=%.1f ", longest_step_);
7767     }
7768
7769     PrintF("\n");
7770   }
7771
7772   heap_->PrintShortHeapStatistics();
7773 }
7774
7775
7776 const char* GCTracer::CollectorString() {
7777   switch (collector_) {
7778     case SCAVENGER:
7779       return "Scavenge";
7780     case MARK_COMPACTOR:
7781       return "Mark-sweep";
7782   }
7783   return "Unknown GC";
7784 }
7785
7786
7787 int KeyedLookupCache::Hash(Map* map, Name* name) {
7788   // Uses only lower 32 bits if pointers are larger.
7789   uintptr_t addr_hash =
7790       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7791   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7792 }
7793
7794
7795 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7796   int index = (Hash(map, name) & kHashMask);
7797   for (int i = 0; i < kEntriesPerBucket; i++) {
7798     Key& key = keys_[index + i];
7799     if ((key.map == map) && key.name->Equals(name)) {
7800       return field_offsets_[index + i];
7801     }
7802   }
7803   return kNotFound;
7804 }
7805
7806
7807 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7808   if (!name->IsUniqueName()) {
7809     String* internalized_string;
7810     if (!HEAP->InternalizeStringIfExists(
7811             String::cast(name), &internalized_string)) {
7812       return;
7813     }
7814     name = internalized_string;
7815   }
7816   // This cache is cleared only between mark compact passes, so we expect the
7817   // cache to only contain old space names.
7818   ASSERT(!HEAP->InNewSpace(name));
7819
7820   int index = (Hash(map, name) & kHashMask);
7821   // After a GC there will be free slots, so we use them in order (this may
7822   // help to get the most frequently used one in position 0).
7823   for (int i = 0; i< kEntriesPerBucket; i++) {
7824     Key& key = keys_[index];
7825     Object* free_entry_indicator = NULL;
7826     if (key.map == free_entry_indicator) {
7827       key.map = map;
7828       key.name = name;
7829       field_offsets_[index + i] = field_offset;
7830       return;
7831     }
7832   }
7833   // No free entry found in this bucket, so we move them all down one and
7834   // put the new entry at position zero.
7835   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7836     Key& key = keys_[index + i];
7837     Key& key2 = keys_[index + i - 1];
7838     key = key2;
7839     field_offsets_[index + i] = field_offsets_[index + i - 1];
7840   }
7841
7842   // Write the new first entry.
7843   Key& key = keys_[index];
7844   key.map = map;
7845   key.name = name;
7846   field_offsets_[index] = field_offset;
7847 }
7848
7849
7850 void KeyedLookupCache::Clear() {
7851   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7852 }
7853
7854
7855 void DescriptorLookupCache::Clear() {
7856   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7857 }
7858
7859
7860 #ifdef DEBUG
7861 void Heap::GarbageCollectionGreedyCheck() {
7862   ASSERT(FLAG_gc_greedy);
7863   if (isolate_->bootstrapper()->IsActive()) return;
7864   if (disallow_allocation_failure()) return;
7865   CollectGarbage(NEW_SPACE);
7866 }
7867 #endif
7868
7869
7870 TranscendentalCache::SubCache::SubCache(Type t)
7871   : type_(t),
7872     isolate_(Isolate::Current()) {
7873   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7874   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7875   for (int i = 0; i < kCacheSize; i++) {
7876     elements_[i].in[0] = in0;
7877     elements_[i].in[1] = in1;
7878     elements_[i].output = NULL;
7879   }
7880 }
7881
7882
7883 void TranscendentalCache::Clear() {
7884   for (int i = 0; i < kNumberOfCaches; i++) {
7885     if (caches_[i] != NULL) {
7886       delete caches_[i];
7887       caches_[i] = NULL;
7888     }
7889   }
7890 }
7891
7892
7893 void ExternalStringTable::CleanUp() {
7894   int last = 0;
7895   for (int i = 0; i < new_space_strings_.length(); ++i) {
7896     if (new_space_strings_[i] == heap_->the_hole_value()) {
7897       continue;
7898     }
7899     if (heap_->InNewSpace(new_space_strings_[i])) {
7900       new_space_strings_[last++] = new_space_strings_[i];
7901     } else {
7902       old_space_strings_.Add(new_space_strings_[i]);
7903     }
7904   }
7905   new_space_strings_.Rewind(last);
7906   new_space_strings_.Trim();
7907
7908   last = 0;
7909   for (int i = 0; i < old_space_strings_.length(); ++i) {
7910     if (old_space_strings_[i] == heap_->the_hole_value()) {
7911       continue;
7912     }
7913     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7914     old_space_strings_[last++] = old_space_strings_[i];
7915   }
7916   old_space_strings_.Rewind(last);
7917   old_space_strings_.Trim();
7918 #ifdef VERIFY_HEAP
7919   if (FLAG_verify_heap) {
7920     Verify();
7921   }
7922 #endif
7923 }
7924
7925
7926 void ExternalStringTable::TearDown() {
7927   new_space_strings_.Free();
7928   old_space_strings_.Free();
7929 }
7930
7931
7932 // Update all references.
7933 void ErrorObjectList::UpdateReferences() {
7934   for (int i = 0; i < list_.length(); i++) {
7935     HeapObject* object = HeapObject::cast(list_[i]);
7936     MapWord first_word = object->map_word();
7937     if (first_word.IsForwardingAddress()) {
7938       list_[i] = first_word.ToForwardingAddress();
7939     }
7940   }
7941 }
7942
7943
7944 // Unforwarded objects in new space are dead and removed from the list.
7945 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7946   if (list_.is_empty()) return;
7947   if (!nested_) {
7948     int write_index = 0;
7949     for (int i = 0; i < list_.length(); i++) {
7950       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7951       if (first_word.IsForwardingAddress()) {
7952         list_[write_index++] = first_word.ToForwardingAddress();
7953       }
7954     }
7955     list_.Rewind(write_index);
7956   } else {
7957     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7958     // objects in the list, just remove dead ones, as to not confuse the
7959     // loop in DeferredFormatStackTrace.
7960     for (int i = 0; i < list_.length(); i++) {
7961       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7962       list_[i] = first_word.IsForwardingAddress()
7963                      ? first_word.ToForwardingAddress()
7964                      : heap->the_hole_value();
7965     }
7966   }
7967 }
7968
7969
7970 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7971   // If formatting the stack trace causes a GC, this method will be
7972   // recursively called.  In that case, skip the recursive call, since
7973   // the loop modifies the list while iterating over it.
7974   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7975   nested_ = true;
7976   HandleScope scope(isolate);
7977   Handle<String> stack_key = isolate->factory()->stack_string();
7978   int write_index = 0;
7979   int budget = kBudgetPerGC;
7980   for (int i = 0; i < list_.length(); i++) {
7981     Object* object = list_[i];
7982     JSFunction* getter_fun;
7983
7984     { DisallowHeapAllocation no_gc;
7985       // Skip possible holes in the list.
7986       if (object->IsTheHole()) continue;
7987       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7988         list_[write_index++] = object;
7989         continue;
7990       }
7991
7992       // Check whether the stack property is backed by the original getter.
7993       LookupResult lookup(isolate);
7994       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7995       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7996       Object* callback = lookup.GetCallbackObject();
7997       if (!callback->IsAccessorPair()) continue;
7998       Object* getter_obj = AccessorPair::cast(callback)->getter();
7999       if (!getter_obj->IsJSFunction()) continue;
8000       getter_fun = JSFunction::cast(getter_obj);
8001       String* key = isolate->heap()->hidden_stack_trace_string();
8002       Object* value = getter_fun->GetHiddenProperty(key);
8003       if (key != value) continue;
8004     }
8005
8006     budget--;
8007     HandleScope scope(isolate);
8008     bool has_exception = false;
8009 #ifdef DEBUG
8010     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
8011 #endif
8012     Handle<Object> object_handle(object, isolate);
8013     Handle<Object> getter_handle(getter_fun, isolate);
8014     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
8015     ASSERT(*map == HeapObject::cast(*object_handle)->map());
8016     if (has_exception) {
8017       // Hit an exception (most likely a stack overflow).
8018       // Wrap up this pass and retry after another GC.
8019       isolate->clear_pending_exception();
8020       // We use the handle since calling the getter might have caused a GC.
8021       list_[write_index++] = *object_handle;
8022       budget = 0;
8023     }
8024   }
8025   list_.Rewind(write_index);
8026   list_.Trim();
8027   nested_ = false;
8028 }
8029
8030
8031 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
8032   for (int i = 0; i < list_.length(); i++) {
8033     HeapObject* object = HeapObject::cast(list_[i]);
8034     if (!Marking::MarkBitFrom(object).Get()) {
8035       list_[i] = heap->the_hole_value();
8036     }
8037   }
8038 }
8039
8040
8041 void ErrorObjectList::TearDown() {
8042   list_.Free();
8043 }
8044
8045
8046 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
8047   chunk->set_next_chunk(chunks_queued_for_free_);
8048   chunks_queued_for_free_ = chunk;
8049 }
8050
8051
8052 void Heap::FreeQueuedChunks() {
8053   if (chunks_queued_for_free_ == NULL) return;
8054   MemoryChunk* next;
8055   MemoryChunk* chunk;
8056   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8057     next = chunk->next_chunk();
8058     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8059
8060     if (chunk->owner()->identity() == LO_SPACE) {
8061       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
8062       // If FromAnyPointerAddress encounters a slot that belongs to a large
8063       // chunk queued for deletion it will fail to find the chunk because
8064       // it try to perform a search in the list of pages owned by of the large
8065       // object space and queued chunks were detached from that list.
8066       // To work around this we split large chunk into normal kPageSize aligned
8067       // pieces and initialize size, owner and flags field of every piece.
8068       // If FromAnyPointerAddress encounters a slot that belongs to one of
8069       // these smaller pieces it will treat it as a slot on a normal Page.
8070       Address chunk_end = chunk->address() + chunk->size();
8071       MemoryChunk* inner = MemoryChunk::FromAddress(
8072           chunk->address() + Page::kPageSize);
8073       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
8074       while (inner <= inner_last) {
8075         // Size of a large chunk is always a multiple of
8076         // OS::AllocateAlignment() so there is always
8077         // enough space for a fake MemoryChunk header.
8078         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
8079         // Guard against overflow.
8080         if (area_end < inner->address()) area_end = chunk_end;
8081         inner->SetArea(inner->address(), area_end);
8082         inner->set_size(Page::kPageSize);
8083         inner->set_owner(lo_space());
8084         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8085         inner = MemoryChunk::FromAddress(
8086             inner->address() + Page::kPageSize);
8087       }
8088     }
8089   }
8090   isolate_->heap()->store_buffer()->Compact();
8091   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
8092   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8093     next = chunk->next_chunk();
8094     isolate_->memory_allocator()->Free(chunk);
8095   }
8096   chunks_queued_for_free_ = NULL;
8097 }
8098
8099
8100 void Heap::RememberUnmappedPage(Address page, bool compacted) {
8101   uintptr_t p = reinterpret_cast<uintptr_t>(page);
8102   // Tag the page pointer to make it findable in the dump file.
8103   if (compacted) {
8104     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
8105   } else {
8106     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
8107   }
8108   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
8109       reinterpret_cast<Address>(p);
8110   remembered_unmapped_pages_index_++;
8111   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
8112 }
8113
8114
8115 void Heap::ClearObjectStats(bool clear_last_time_stats) {
8116   memset(object_counts_, 0, sizeof(object_counts_));
8117   memset(object_sizes_, 0, sizeof(object_sizes_));
8118   if (clear_last_time_stats) {
8119     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
8120     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
8121   }
8122 }
8123
8124
8125 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
8126
8127
8128 void Heap::CheckpointObjectStats() {
8129   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8130   Counters* counters = isolate()->counters();
8131 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
8132   counters->count_of_##name()->Increment(                                      \
8133       static_cast<int>(object_counts_[name]));                                 \
8134   counters->count_of_##name()->Decrement(                                      \
8135       static_cast<int>(object_counts_last_time_[name]));                       \
8136   counters->size_of_##name()->Increment(                                       \
8137       static_cast<int>(object_sizes_[name]));                                  \
8138   counters->size_of_##name()->Decrement(                                       \
8139       static_cast<int>(object_sizes_last_time_[name]));
8140   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8141 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8142   int index;
8143 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8144   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
8145   counters->count_of_CODE_TYPE_##name()->Increment(       \
8146       static_cast<int>(object_counts_[index]));           \
8147   counters->count_of_CODE_TYPE_##name()->Decrement(       \
8148       static_cast<int>(object_counts_last_time_[index])); \
8149   counters->size_of_CODE_TYPE_##name()->Increment(        \
8150       static_cast<int>(object_sizes_[index]));            \
8151   counters->size_of_CODE_TYPE_##name()->Decrement(        \
8152       static_cast<int>(object_sizes_last_time_[index]));
8153   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8154 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8155 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8156   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8157   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8158       static_cast<int>(object_counts_[index]));           \
8159   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8160       static_cast<int>(object_counts_last_time_[index])); \
8161   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8162       static_cast<int>(object_sizes_[index]));            \
8163   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8164       static_cast<int>(object_sizes_last_time_[index]));
8165   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8166 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8167
8168   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8169   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8170   ClearObjectStats();
8171 }
8172
8173
8174 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8175   if (FLAG_parallel_recompilation) {
8176     heap_->relocation_mutex_->Lock();
8177 #ifdef DEBUG
8178     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8179         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8180 #endif  // DEBUG
8181   }
8182 }
8183
8184 } }  // namespace v8::internal