v8: upgrade to v3.19.13
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "natives.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
44 #include "once.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
47 #include "snapshot.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "v8utils.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
55 #endif
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
59 #endif
60
61 namespace v8 {
62 namespace internal {
63
64
65 Heap::Heap()
66     : isolate_(NULL),
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71       code_range_size_(512*MB),
72 #else
73 #define LUMP_OF_MEMORY MB
74       code_range_size_(0),
75 #endif
76 #if defined(ANDROID)
77       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       initial_semispace_size_(Page::kPageSize),
80       max_old_generation_size_(192*MB),
81       max_executable_size_(max_old_generation_size_),
82 #else
83       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       initial_semispace_size_(Page::kPageSize),
86       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87       max_executable_size_(256l * LUMP_OF_MEMORY),
88 #endif
89
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94       survived_since_last_expansion_(0),
95       sweep_generation_(0),
96       always_allocate_scope_depth_(0),
97       linear_allocation_scope_depth_(0),
98       contexts_disposed_(0),
99       global_ic_age_(0),
100       flush_monomorphic_ics_(false),
101       scan_on_scavenge_pages_(0),
102       new_space_(this),
103       old_pointer_space_(NULL),
104       old_data_space_(NULL),
105       code_space_(NULL),
106       map_space_(NULL),
107       cell_space_(NULL),
108       lo_space_(NULL),
109       gc_state_(NOT_IN_GC),
110       gc_post_processing_depth_(0),
111       ms_count_(0),
112       gc_count_(0),
113       remembered_unmapped_pages_index_(0),
114       unflattened_strings_length_(0),
115 #ifdef DEBUG
116       allocation_timeout_(0),
117       disallow_allocation_failure_(false),
118 #endif  // DEBUG
119       new_space_high_promotion_mode_active_(false),
120       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
121       size_of_old_gen_at_last_old_space_gc_(0),
122       external_allocation_limit_(0),
123       amount_of_external_allocated_memory_(0),
124       amount_of_external_allocated_memory_at_last_global_gc_(0),
125       old_gen_exhausted_(false),
126       store_buffer_rebuilder_(store_buffer()),
127       hidden_string_(NULL),
128       global_gc_prologue_callback_(NULL),
129       global_gc_epilogue_callback_(NULL),
130       gc_safe_size_of_old_object_(NULL),
131       total_regexp_code_generated_(0),
132       tracer_(NULL),
133       young_survivors_after_last_gc_(0),
134       high_survival_rate_period_length_(0),
135       low_survival_rate_period_length_(0),
136       survival_rate_(0),
137       previous_survival_rate_trend_(Heap::STABLE),
138       survival_rate_trend_(Heap::STABLE),
139       max_gc_pause_(0.0),
140       total_gc_time_ms_(0.0),
141       max_alive_after_gc_(0),
142       min_in_mutator_(kMaxInt),
143       alive_after_last_gc_(0),
144       last_gc_end_timestamp_(0.0),
145       marking_time_(0.0),
146       sweeping_time_(0.0),
147       store_buffer_(this),
148       marking_(this),
149       incremental_marking_(this),
150       number_idle_notifications_(0),
151       last_idle_notification_gc_count_(0),
152       last_idle_notification_gc_count_init_(false),
153       mark_sweeps_since_idle_round_started_(0),
154       ms_count_at_last_idle_notification_(0),
155       gc_count_at_last_idle_gc_(0),
156       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
157       gcs_since_last_deopt_(0),
158 #ifdef VERIFY_HEAP
159       no_weak_embedded_maps_verification_scope_depth_(0),
160 #endif
161       promotion_queue_(this),
162       configured_(false),
163       chunks_queued_for_free_(NULL),
164       relocation_mutex_(NULL) {
165   // Allow build-time customization of the max semispace size. Building
166   // V8 with snapshots and a non-default max semispace size is much
167   // easier if you can define it as part of the build environment.
168 #if defined(V8_MAX_SEMISPACE_SIZE)
169   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170 #endif
171
172   intptr_t max_virtual = OS::MaxVirtualMemory();
173
174   if (max_virtual > 0) {
175     if (code_range_size_ > 0) {
176       // Reserve no more than 1/8 of the memory for the code range.
177       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
178     }
179   }
180
181   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
182   native_contexts_list_ = NULL;
183   array_buffers_list_ = Smi::FromInt(0);
184   mark_compact_collector_.heap_ = this;
185   external_string_table_.heap_ = this;
186   // Put a dummy entry in the remembered pages so we can find the list the
187   // minidump even if there are no real unmapped pages.
188   RememberUnmappedPage(NULL, false);
189
190   ClearObjectStats(true);
191 }
192
193
194 intptr_t Heap::Capacity() {
195   if (!HasBeenSetUp()) return 0;
196
197   return new_space_.Capacity() +
198       old_pointer_space_->Capacity() +
199       old_data_space_->Capacity() +
200       code_space_->Capacity() +
201       map_space_->Capacity() +
202       cell_space_->Capacity();
203 }
204
205
206 intptr_t Heap::CommittedMemory() {
207   if (!HasBeenSetUp()) return 0;
208
209   return new_space_.CommittedMemory() +
210       old_pointer_space_->CommittedMemory() +
211       old_data_space_->CommittedMemory() +
212       code_space_->CommittedMemory() +
213       map_space_->CommittedMemory() +
214       cell_space_->CommittedMemory() +
215       lo_space_->Size();
216 }
217
218
219 size_t Heap::CommittedPhysicalMemory() {
220   if (!HasBeenSetUp()) return 0;
221
222   return new_space_.CommittedPhysicalMemory() +
223       old_pointer_space_->CommittedPhysicalMemory() +
224       old_data_space_->CommittedPhysicalMemory() +
225       code_space_->CommittedPhysicalMemory() +
226       map_space_->CommittedPhysicalMemory() +
227       cell_space_->CommittedPhysicalMemory() +
228       lo_space_->CommittedPhysicalMemory();
229 }
230
231
232 intptr_t Heap::CommittedMemoryExecutable() {
233   if (!HasBeenSetUp()) return 0;
234
235   return isolate()->memory_allocator()->SizeExecutable();
236 }
237
238
239 intptr_t Heap::Available() {
240   if (!HasBeenSetUp()) return 0;
241
242   return new_space_.Available() +
243       old_pointer_space_->Available() +
244       old_data_space_->Available() +
245       code_space_->Available() +
246       map_space_->Available() +
247       cell_space_->Available();
248 }
249
250
251 bool Heap::HasBeenSetUp() {
252   return old_pointer_space_ != NULL &&
253          old_data_space_ != NULL &&
254          code_space_ != NULL &&
255          map_space_ != NULL &&
256          cell_space_ != NULL &&
257          lo_space_ != NULL;
258 }
259
260
261 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
262   if (IntrusiveMarking::IsMarked(object)) {
263     return IntrusiveMarking::SizeOfMarkedObject(object);
264   }
265   return object->SizeFromMap(object->map());
266 }
267
268
269 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
270                                               const char** reason) {
271   // Is global GC requested?
272   if (space != NEW_SPACE) {
273     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
274     *reason = "GC in old space requested";
275     return MARK_COMPACTOR;
276   }
277
278   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
279     *reason = "GC in old space forced by flags";
280     return MARK_COMPACTOR;
281   }
282
283   // Is enough data promoted to justify a global GC?
284   if (OldGenerationAllocationLimitReached()) {
285     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
286     *reason = "promotion limit reached";
287     return MARK_COMPACTOR;
288   }
289
290   // Have allocation in OLD and LO failed?
291   if (old_gen_exhausted_) {
292     isolate_->counters()->
293         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
294     *reason = "old generations exhausted";
295     return MARK_COMPACTOR;
296   }
297
298   // Is there enough space left in OLD to guarantee that a scavenge can
299   // succeed?
300   //
301   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
302   // for object promotion. It counts only the bytes that the memory
303   // allocator has not yet allocated from the OS and assigned to any space,
304   // and does not count available bytes already in the old space or code
305   // space.  Undercounting is safe---we may get an unrequested full GC when
306   // a scavenge would have succeeded.
307   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
308     isolate_->counters()->
309         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
310     *reason = "scavenge might not succeed";
311     return MARK_COMPACTOR;
312   }
313
314   // Default
315   *reason = NULL;
316   return SCAVENGER;
317 }
318
319
320 // TODO(1238405): Combine the infrastructure for --heap-stats and
321 // --log-gc to avoid the complicated preprocessor and flag testing.
322 void Heap::ReportStatisticsBeforeGC() {
323   // Heap::ReportHeapStatistics will also log NewSpace statistics when
324   // compiled --log-gc is set.  The following logic is used to avoid
325   // double logging.
326 #ifdef DEBUG
327   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
328   if (FLAG_heap_stats) {
329     ReportHeapStatistics("Before GC");
330   } else if (FLAG_log_gc) {
331     new_space_.ReportStatistics();
332   }
333   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
334 #else
335   if (FLAG_log_gc) {
336     new_space_.CollectStatistics();
337     new_space_.ReportStatistics();
338     new_space_.ClearHistograms();
339   }
340 #endif  // DEBUG
341 }
342
343
344 void Heap::PrintShortHeapStatistics() {
345   if (!FLAG_trace_gc_verbose) return;
346   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
347                ", available: %6" V8_PTR_PREFIX "d KB\n",
348            isolate_->memory_allocator()->Size() / KB,
349            isolate_->memory_allocator()->Available() / KB);
350   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
351                ", available: %6" V8_PTR_PREFIX "d KB"
352                ", committed: %6" V8_PTR_PREFIX "d KB\n",
353            new_space_.Size() / KB,
354            new_space_.Available() / KB,
355            new_space_.CommittedMemory() / KB);
356   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
357                ", available: %6" V8_PTR_PREFIX "d KB"
358                ", committed: %6" V8_PTR_PREFIX "d KB\n",
359            old_pointer_space_->SizeOfObjects() / KB,
360            old_pointer_space_->Available() / KB,
361            old_pointer_space_->CommittedMemory() / KB);
362   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
363                ", available: %6" V8_PTR_PREFIX "d KB"
364                ", committed: %6" V8_PTR_PREFIX "d KB\n",
365            old_data_space_->SizeOfObjects() / KB,
366            old_data_space_->Available() / KB,
367            old_data_space_->CommittedMemory() / KB);
368   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
369                ", available: %6" V8_PTR_PREFIX "d KB"
370                ", committed: %6" V8_PTR_PREFIX "d KB\n",
371            code_space_->SizeOfObjects() / KB,
372            code_space_->Available() / KB,
373            code_space_->CommittedMemory() / KB);
374   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
375                ", available: %6" V8_PTR_PREFIX "d KB"
376                ", committed: %6" V8_PTR_PREFIX "d KB\n",
377            map_space_->SizeOfObjects() / KB,
378            map_space_->Available() / KB,
379            map_space_->CommittedMemory() / KB);
380   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
381                ", available: %6" V8_PTR_PREFIX "d KB"
382                ", committed: %6" V8_PTR_PREFIX "d KB\n",
383            cell_space_->SizeOfObjects() / KB,
384            cell_space_->Available() / KB,
385            cell_space_->CommittedMemory() / KB);
386   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
387                ", available: %6" V8_PTR_PREFIX "d KB"
388                ", committed: %6" V8_PTR_PREFIX "d KB\n",
389            lo_space_->SizeOfObjects() / KB,
390            lo_space_->Available() / KB,
391            lo_space_->CommittedMemory() / KB);
392   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
393                ", available: %6" V8_PTR_PREFIX "d KB"
394                ", committed: %6" V8_PTR_PREFIX "d KB\n",
395            this->SizeOfObjects() / KB,
396            this->Available() / KB,
397            this->CommittedMemory() / KB);
398   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
399 }
400
401
402 // TODO(1238405): Combine the infrastructure for --heap-stats and
403 // --log-gc to avoid the complicated preprocessor and flag testing.
404 void Heap::ReportStatisticsAfterGC() {
405   // Similar to the before GC, we use some complicated logic to ensure that
406   // NewSpace statistics are logged exactly once when --log-gc is turned on.
407 #if defined(DEBUG)
408   if (FLAG_heap_stats) {
409     new_space_.CollectStatistics();
410     ReportHeapStatistics("After GC");
411   } else if (FLAG_log_gc) {
412     new_space_.ReportStatistics();
413   }
414 #else
415   if (FLAG_log_gc) new_space_.ReportStatistics();
416 #endif  // DEBUG
417 }
418
419
420 void Heap::GarbageCollectionPrologue() {
421   {  AllowHeapAllocation for_the_first_part_of_prologue;
422     isolate_->transcendental_cache()->Clear();
423     ClearJSFunctionResultCaches();
424     gc_count_++;
425     unflattened_strings_length_ = 0;
426
427     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
428       mark_compact_collector()->EnableCodeFlushing(true);
429     }
430
431 #ifdef VERIFY_HEAP
432     if (FLAG_verify_heap) {
433       Verify();
434     }
435 #endif
436   }
437
438 #ifdef DEBUG
439   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
440
441   if (FLAG_gc_verbose) Print();
442
443   ReportStatisticsBeforeGC();
444 #endif  // DEBUG
445
446   store_buffer()->GCPrologue();
447 }
448
449
450 intptr_t Heap::SizeOfObjects() {
451   intptr_t total = 0;
452   AllSpaces spaces(this);
453   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454     total += space->SizeOfObjects();
455   }
456   return total;
457 }
458
459
460 void Heap::RepairFreeListsAfterBoot() {
461   PagedSpaces spaces(this);
462   for (PagedSpace* space = spaces.next();
463        space != NULL;
464        space = spaces.next()) {
465     space->RepairFreeListsAfterBoot();
466   }
467 }
468
469
470 void Heap::GarbageCollectionEpilogue() {
471   store_buffer()->GCEpilogue();
472
473   // In release mode, we only zap the from space under heap verification.
474   if (Heap::ShouldZapGarbage()) {
475     ZapFromSpace();
476   }
477
478 #ifdef VERIFY_HEAP
479   if (FLAG_verify_heap) {
480     Verify();
481   }
482 #endif
483
484   AllowHeapAllocation for_the_rest_of_the_epilogue;
485
486 #ifdef DEBUG
487   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488   if (FLAG_print_handles) PrintHandles();
489   if (FLAG_gc_verbose) Print();
490   if (FLAG_code_stats) ReportCodeStatistics("After GC");
491 #endif
492   if (FLAG_deopt_every_n_garbage_collections > 0) {
493     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494       Deoptimizer::DeoptimizeAll(isolate());
495       gcs_since_last_deopt_ = 0;
496     }
497   }
498
499   isolate_->counters()->alive_after_last_gc()->Set(
500       static_cast<int>(SizeOfObjects()));
501
502   isolate_->counters()->string_table_capacity()->Set(
503       string_table()->Capacity());
504   isolate_->counters()->number_of_symbols()->Set(
505       string_table()->NumberOfElements());
506
507   if (CommittedMemory() > 0) {
508     isolate_->counters()->external_fragmentation_total()->AddSample(
509         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
510
511     isolate_->counters()->heap_fraction_map_space()->AddSample(
512         static_cast<int>(
513             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514     isolate_->counters()->heap_fraction_cell_space()->AddSample(
515         static_cast<int>(
516             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
517
518     isolate_->counters()->heap_sample_total_committed()->AddSample(
519         static_cast<int>(CommittedMemory() / KB));
520     isolate_->counters()->heap_sample_total_used()->AddSample(
521         static_cast<int>(SizeOfObjects() / KB));
522     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523         static_cast<int>(map_space()->CommittedMemory() / KB));
524     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525         static_cast<int>(cell_space()->CommittedMemory() / KB));
526   }
527
528 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
529   isolate_->counters()->space##_bytes_available()->Set(                        \
530       static_cast<int>(space()->Available()));                                 \
531   isolate_->counters()->space##_bytes_committed()->Set(                        \
532       static_cast<int>(space()->CommittedMemory()));                           \
533   isolate_->counters()->space##_bytes_used()->Set(                             \
534       static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
536   if (space()->CommittedMemory() > 0) {                                        \
537     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
538         static_cast<int>(100 -                                                 \
539             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
540   }
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
542   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
543   UPDATE_FRAGMENTATION_FOR_SPACE(space)
544
545   UPDATE_COUNTERS_FOR_SPACE(new_space)
546   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
555
556 #if defined(DEBUG)
557   ReportStatisticsAfterGC();
558 #endif  // DEBUG
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560   isolate_->debug()->AfterGarbageCollection();
561 #endif  // ENABLE_DEBUGGER_SUPPORT
562
563   error_object_list_.DeferredFormatStackTrace(isolate());
564 }
565
566
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568   // Since we are ignoring the return value, the exact choice of space does
569   // not matter, so long as we do not specify NEW_SPACE, which would not
570   // cause a full GC.
571   mark_compact_collector_.SetFlags(flags);
572   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573   mark_compact_collector_.SetFlags(kNoGCFlags);
574 }
575
576
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578   // Since we are ignoring the return value, the exact choice of space does
579   // not matter, so long as we do not specify NEW_SPACE, which would not
580   // cause a full GC.
581   // Major GC would invoke weak handle callbacks on weakly reachable
582   // handles, but won't collect weakly reachable objects until next
583   // major GC.  Therefore if we collect aggressively and weak handle callback
584   // has been invoked, we rerun major GC to release objects which become
585   // garbage.
586   // Note: as weak callbacks can execute arbitrary code, we cannot
587   // hope that eventually there will be no weak callbacks invocations.
588   // Therefore stop recollecting after several attempts.
589   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590                                      kReduceMemoryFootprintMask);
591   isolate_->compilation_cache()->Clear();
592   const int kMaxNumberOfAttempts = 7;
593   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
595       break;
596     }
597   }
598   mark_compact_collector()->SetFlags(kNoGCFlags);
599   new_space_.Shrink();
600   UncommitFromSpace();
601   incremental_marking()->UncommitMarkingDeque();
602 }
603
604
605 bool Heap::CollectGarbage(AllocationSpace space,
606                           GarbageCollector collector,
607                           const char* gc_reason,
608                           const char* collector_reason) {
609   // The VM is in the GC state until exiting this function.
610   VMState<GC> state(isolate_);
611
612 #ifdef DEBUG
613   // Reset the allocation timeout to the GC interval, but make sure to
614   // allow at least a few allocations after a collection. The reason
615   // for this is that we have a lot of allocation sequences and we
616   // assume that a garbage collection will allow the subsequent
617   // allocation attempts to go through.
618   allocation_timeout_ = Max(6, FLAG_gc_interval);
619 #endif
620
621   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622     if (FLAG_trace_incremental_marking) {
623       PrintF("[IncrementalMarking] Scavenge during marking.\n");
624     }
625   }
626
627   if (collector == MARK_COMPACTOR &&
628       !mark_compact_collector()->abort_incremental_marking() &&
629       !incremental_marking()->IsStopped() &&
630       !incremental_marking()->should_hurry() &&
631       FLAG_incremental_marking_steps) {
632     // Make progress in incremental marking.
633     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636     if (!incremental_marking()->IsComplete()) {
637       if (FLAG_trace_incremental_marking) {
638         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
639       }
640       collector = SCAVENGER;
641       collector_reason = "incremental marking delaying mark-sweep";
642     }
643   }
644
645   bool next_gc_likely_to_collect_more = false;
646
647   { GCTracer tracer(this, gc_reason, collector_reason);
648     ASSERT(AllowHeapAllocation::IsAllowed());
649     DisallowHeapAllocation no_allocation_during_gc;
650     GarbageCollectionPrologue();
651     // The GC count was incremented in the prologue.  Tell the tracer about
652     // it.
653     tracer.set_gc_count(gc_count_);
654
655     // Tell the tracer which collector we've selected.
656     tracer.set_collector(collector);
657
658     {
659       HistogramTimerScope histogram_timer_scope(
660           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
661                                    : isolate_->counters()->gc_compactor());
662       next_gc_likely_to_collect_more =
663           PerformGarbageCollection(collector, &tracer);
664     }
665
666     GarbageCollectionEpilogue();
667   }
668
669   // Start incremental marking for the next cycle. The heap snapshot
670   // generator needs incremental marking to stay off after it aborted.
671   if (!mark_compact_collector()->abort_incremental_marking() &&
672       incremental_marking()->IsStopped() &&
673       incremental_marking()->WorthActivating() &&
674       NextGCIsLikelyToBeFull()) {
675     incremental_marking()->Start();
676   }
677
678   return next_gc_likely_to_collect_more;
679 }
680
681
682 void Heap::PerformScavenge() {
683   GCTracer tracer(this, NULL, NULL);
684   if (incremental_marking()->IsStopped()) {
685     PerformGarbageCollection(SCAVENGER, &tracer);
686   } else {
687     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
688   }
689 }
690
691
692 void Heap::MoveElements(FixedArray* array,
693                         int dst_index,
694                         int src_index,
695                         int len) {
696   if (len == 0) return;
697
698   ASSERT(array->map() != HEAP->fixed_cow_array_map());
699   Object** dst_objects = array->data_start() + dst_index;
700   OS::MemMove(dst_objects,
701               array->data_start() + src_index,
702               len * kPointerSize);
703   if (!InNewSpace(array)) {
704     for (int i = 0; i < len; i++) {
705       // TODO(hpayer): check store buffer for entries
706       if (InNewSpace(dst_objects[i])) {
707         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
708       }
709     }
710   }
711   incremental_marking()->RecordWrites(array);
712 }
713
714
715 #ifdef VERIFY_HEAP
716 // Helper class for verifying the string table.
717 class StringTableVerifier : public ObjectVisitor {
718  public:
719   void VisitPointers(Object** start, Object** end) {
720     // Visit all HeapObject pointers in [start, end).
721     for (Object** p = start; p < end; p++) {
722       if ((*p)->IsHeapObject()) {
723         // Check that the string is actually internalized.
724         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
725               (*p)->IsInternalizedString());
726       }
727     }
728   }
729 };
730
731
732 static void VerifyStringTable() {
733   StringTableVerifier verifier;
734   HEAP->string_table()->IterateElements(&verifier);
735 }
736 #endif  // VERIFY_HEAP
737
738
739 static bool AbortIncrementalMarkingAndCollectGarbage(
740     Heap* heap,
741     AllocationSpace space,
742     const char* gc_reason = NULL) {
743   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
744   bool result = heap->CollectGarbage(space, gc_reason);
745   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
746   return result;
747 }
748
749
750 void Heap::ReserveSpace(
751     int *sizes,
752     Address *locations_out) {
753   bool gc_performed = true;
754   int counter = 0;
755   static const int kThreshold = 20;
756   while (gc_performed && counter++ < kThreshold) {
757     gc_performed = false;
758     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
759     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
760       if (sizes[space] != 0) {
761         MaybeObject* allocation;
762         if (space == NEW_SPACE) {
763           allocation = new_space()->AllocateRaw(sizes[space]);
764         } else {
765           allocation = paged_space(space)->AllocateRaw(sizes[space]);
766         }
767         FreeListNode* node;
768         if (!allocation->To<FreeListNode>(&node)) {
769           if (space == NEW_SPACE) {
770             Heap::CollectGarbage(NEW_SPACE,
771                                  "failed to reserve space in the new space");
772           } else {
773             AbortIncrementalMarkingAndCollectGarbage(
774                 this,
775                 static_cast<AllocationSpace>(space),
776                 "failed to reserve space in paged space");
777           }
778           gc_performed = true;
779           break;
780         } else {
781           // Mark with a free list node, in case we have a GC before
782           // deserializing.
783           node->set_size(this, sizes[space]);
784           locations_out[space] = node->address();
785         }
786       }
787     }
788   }
789
790   if (gc_performed) {
791     // Failed to reserve the space after several attempts.
792     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
793   }
794 }
795
796
797 void Heap::EnsureFromSpaceIsCommitted() {
798   if (new_space_.CommitFromSpaceIfNeeded()) return;
799
800   // Committing memory to from space failed.
801   // Memory is exhausted and we will die.
802   V8::FatalProcessOutOfMemory("Committing semi space failed.");
803 }
804
805
806 void Heap::ClearJSFunctionResultCaches() {
807   if (isolate_->bootstrapper()->IsActive()) return;
808
809   Object* context = native_contexts_list_;
810   while (!context->IsUndefined()) {
811     // Get the caches for this context. GC can happen when the context
812     // is not fully initialized, so the caches can be undefined.
813     Object* caches_or_undefined =
814         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
815     if (!caches_or_undefined->IsUndefined()) {
816       FixedArray* caches = FixedArray::cast(caches_or_undefined);
817       // Clear the caches:
818       int length = caches->length();
819       for (int i = 0; i < length; i++) {
820         JSFunctionResultCache::cast(caches->get(i))->Clear();
821       }
822     }
823     // Get the next context:
824     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
825   }
826 }
827
828
829 void Heap::ClearNormalizedMapCaches() {
830   if (isolate_->bootstrapper()->IsActive() &&
831       !incremental_marking()->IsMarking()) {
832     return;
833   }
834
835   Object* context = native_contexts_list_;
836   while (!context->IsUndefined()) {
837     // GC can happen when the context is not fully initialized,
838     // so the cache can be undefined.
839     Object* cache =
840         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
841     if (!cache->IsUndefined()) {
842       NormalizedMapCache::cast(cache)->Clear();
843     }
844     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
845   }
846 }
847
848
849 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
850   double survival_rate =
851       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
852       start_new_space_size;
853
854   if (survival_rate > kYoungSurvivalRateHighThreshold) {
855     high_survival_rate_period_length_++;
856   } else {
857     high_survival_rate_period_length_ = 0;
858   }
859
860   if (survival_rate < kYoungSurvivalRateLowThreshold) {
861     low_survival_rate_period_length_++;
862   } else {
863     low_survival_rate_period_length_ = 0;
864   }
865
866   double survival_rate_diff = survival_rate_ - survival_rate;
867
868   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
869     set_survival_rate_trend(DECREASING);
870   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
871     set_survival_rate_trend(INCREASING);
872   } else {
873     set_survival_rate_trend(STABLE);
874   }
875
876   survival_rate_ = survival_rate;
877 }
878
879 bool Heap::PerformGarbageCollection(GarbageCollector collector,
880                                     GCTracer* tracer) {
881   bool next_gc_likely_to_collect_more = false;
882
883   if (collector != SCAVENGER) {
884     PROFILE(isolate_, CodeMovingGCEvent());
885   }
886
887 #ifdef VERIFY_HEAP
888   if (FLAG_verify_heap) {
889     VerifyStringTable();
890   }
891 #endif
892
893   GCType gc_type =
894       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
895
896   {
897     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
898     VMState<EXTERNAL> state(isolate_);
899     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
900   }
901
902   EnsureFromSpaceIsCommitted();
903
904   int start_new_space_size = Heap::new_space()->SizeAsInt();
905
906   if (IsHighSurvivalRate()) {
907     // We speed up the incremental marker if it is running so that it
908     // does not fall behind the rate of promotion, which would cause a
909     // constantly growing old space.
910     incremental_marking()->NotifyOfHighPromotionRate();
911   }
912
913   if (collector == MARK_COMPACTOR) {
914     // Perform mark-sweep with optional compaction.
915     MarkCompact(tracer);
916     sweep_generation_++;
917
918     UpdateSurvivalRateTrend(start_new_space_size);
919
920     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
921
922     old_generation_allocation_limit_ =
923         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
924
925     old_gen_exhausted_ = false;
926   } else {
927     tracer_ = tracer;
928     Scavenge();
929     tracer_ = NULL;
930
931     UpdateSurvivalRateTrend(start_new_space_size);
932   }
933
934   if (!new_space_high_promotion_mode_active_ &&
935       new_space_.Capacity() == new_space_.MaximumCapacity() &&
936       IsStableOrIncreasingSurvivalTrend() &&
937       IsHighSurvivalRate()) {
938     // Stable high survival rates even though young generation is at
939     // maximum capacity indicates that most objects will be promoted.
940     // To decrease scavenger pauses and final mark-sweep pauses, we
941     // have to limit maximal capacity of the young generation.
942     SetNewSpaceHighPromotionModeActive(true);
943     if (FLAG_trace_gc) {
944       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
945                new_space_.InitialCapacity() / MB);
946     }
947     // Support for global pre-tenuring uses the high promotion mode as a
948     // heuristic indicator of whether to pretenure or not, we trigger
949     // deoptimization here to take advantage of pre-tenuring as soon as
950     // possible.
951     if (FLAG_pretenuring) {
952       isolate_->stack_guard()->FullDeopt();
953     }
954   } else if (new_space_high_promotion_mode_active_ &&
955       IsStableOrDecreasingSurvivalTrend() &&
956       IsLowSurvivalRate()) {
957     // Decreasing low survival rates might indicate that the above high
958     // promotion mode is over and we should allow the young generation
959     // to grow again.
960     SetNewSpaceHighPromotionModeActive(false);
961     if (FLAG_trace_gc) {
962       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
963                new_space_.MaximumCapacity() / MB);
964     }
965     // Trigger deoptimization here to turn off pre-tenuring as soon as
966     // possible.
967     if (FLAG_pretenuring) {
968       isolate_->stack_guard()->FullDeopt();
969     }
970   }
971
972   if (new_space_high_promotion_mode_active_ &&
973       new_space_.Capacity() > new_space_.InitialCapacity()) {
974     new_space_.Shrink();
975   }
976
977   isolate_->counters()->objs_since_last_young()->Set(0);
978
979   // Callbacks that fire after this point might trigger nested GCs and
980   // restart incremental marking, the assertion can't be moved down.
981   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
982
983   gc_post_processing_depth_++;
984   { AllowHeapAllocation allow_allocation;
985     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
986     next_gc_likely_to_collect_more =
987         isolate_->global_handles()->PostGarbageCollectionProcessing(
988             collector, tracer);
989   }
990   gc_post_processing_depth_--;
991
992   // Update relocatables.
993   Relocatable::PostGarbageCollectionProcessing();
994
995   if (collector == MARK_COMPACTOR) {
996     // Register the amount of external allocated memory.
997     amount_of_external_allocated_memory_at_last_global_gc_ =
998         amount_of_external_allocated_memory_;
999   }
1000
1001   {
1002     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1003     VMState<EXTERNAL> state(isolate_);
1004     CallGCEpilogueCallbacks(gc_type);
1005   }
1006
1007 #ifdef VERIFY_HEAP
1008   if (FLAG_verify_heap) {
1009     VerifyStringTable();
1010   }
1011 #endif
1012
1013   return next_gc_likely_to_collect_more;
1014 }
1015
1016
1017 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1018   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1019     global_gc_prologue_callback_();
1020   }
1021   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1022     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1023       gc_prologue_callbacks_[i].callback(gc_type, flags);
1024     }
1025   }
1026 }
1027
1028
1029 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1030   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1031     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1032       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1033     }
1034   }
1035   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1036     global_gc_epilogue_callback_();
1037   }
1038 }
1039
1040
1041 void Heap::MarkCompact(GCTracer* tracer) {
1042   gc_state_ = MARK_COMPACT;
1043   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1044
1045   mark_compact_collector_.Prepare(tracer);
1046
1047   ms_count_++;
1048   tracer->set_full_gc_count(ms_count_);
1049
1050   MarkCompactPrologue();
1051
1052   mark_compact_collector_.CollectGarbage();
1053
1054   LOG(isolate_, ResourceEvent("markcompact", "end"));
1055
1056   gc_state_ = NOT_IN_GC;
1057
1058   isolate_->counters()->objs_since_last_full()->Set(0);
1059
1060   contexts_disposed_ = 0;
1061
1062   flush_monomorphic_ics_ = false;
1063 }
1064
1065
1066 void Heap::MarkCompactPrologue() {
1067   // At any old GC clear the keyed lookup cache to enable collection of unused
1068   // maps.
1069   isolate_->keyed_lookup_cache()->Clear();
1070   isolate_->context_slot_cache()->Clear();
1071   isolate_->descriptor_lookup_cache()->Clear();
1072   RegExpResultsCache::Clear(string_split_cache());
1073   RegExpResultsCache::Clear(regexp_multiple_cache());
1074
1075   isolate_->compilation_cache()->MarkCompactPrologue();
1076
1077   CompletelyClearInstanceofCache();
1078
1079   FlushNumberStringCache();
1080   if (FLAG_cleanup_code_caches_at_gc) {
1081     polymorphic_code_cache()->set_cache(undefined_value());
1082   }
1083
1084   ClearNormalizedMapCaches();
1085 }
1086
1087
1088 Object* Heap::FindCodeObject(Address a) {
1089   return isolate()->inner_pointer_to_code_cache()->
1090       GcSafeFindCodeForInnerPointer(a);
1091 }
1092
1093
1094 // Helper class for copying HeapObjects
1095 class ScavengeVisitor: public ObjectVisitor {
1096  public:
1097   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1098
1099   void VisitPointer(Object** p) { ScavengePointer(p); }
1100
1101   void VisitPointers(Object** start, Object** end) {
1102     // Copy all HeapObject pointers in [start, end)
1103     for (Object** p = start; p < end; p++) ScavengePointer(p);
1104   }
1105
1106  private:
1107   void ScavengePointer(Object** p) {
1108     Object* object = *p;
1109     if (!heap_->InNewSpace(object)) return;
1110     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1111                          reinterpret_cast<HeapObject*>(object));
1112   }
1113
1114   Heap* heap_;
1115 };
1116
1117
1118 #ifdef VERIFY_HEAP
1119 // Visitor class to verify pointers in code or data space do not point into
1120 // new space.
1121 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1122  public:
1123   void VisitPointers(Object** start, Object**end) {
1124     for (Object** current = start; current < end; current++) {
1125       if ((*current)->IsHeapObject()) {
1126         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1127       }
1128     }
1129   }
1130 };
1131
1132
1133 static void VerifyNonPointerSpacePointers() {
1134   // Verify that there are no pointers to new space in spaces where we
1135   // do not expect them.
1136   VerifyNonPointerSpacePointersVisitor v;
1137   HeapObjectIterator code_it(HEAP->code_space());
1138   for (HeapObject* object = code_it.Next();
1139        object != NULL; object = code_it.Next())
1140     object->Iterate(&v);
1141
1142   // The old data space was normally swept conservatively so that the iterator
1143   // doesn't work, so we normally skip the next bit.
1144   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1145     HeapObjectIterator data_it(HEAP->old_data_space());
1146     for (HeapObject* object = data_it.Next();
1147          object != NULL; object = data_it.Next())
1148       object->Iterate(&v);
1149   }
1150 }
1151 #endif  // VERIFY_HEAP
1152
1153
1154 void Heap::CheckNewSpaceExpansionCriteria() {
1155   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1156       survived_since_last_expansion_ > new_space_.Capacity() &&
1157       !new_space_high_promotion_mode_active_) {
1158     // Grow the size of new space if there is room to grow, enough data
1159     // has survived scavenge since the last expansion and we are not in
1160     // high promotion mode.
1161     new_space_.Grow();
1162     survived_since_last_expansion_ = 0;
1163   }
1164 }
1165
1166
1167 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1168   return heap->InNewSpace(*p) &&
1169       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1170 }
1171
1172
1173 void Heap::ScavengeStoreBufferCallback(
1174     Heap* heap,
1175     MemoryChunk* page,
1176     StoreBufferEvent event) {
1177   heap->store_buffer_rebuilder_.Callback(page, event);
1178 }
1179
1180
1181 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1182   if (event == kStoreBufferStartScanningPagesEvent) {
1183     start_of_current_page_ = NULL;
1184     current_page_ = NULL;
1185   } else if (event == kStoreBufferScanningPageEvent) {
1186     if (current_page_ != NULL) {
1187       // If this page already overflowed the store buffer during this iteration.
1188       if (current_page_->scan_on_scavenge()) {
1189         // Then we should wipe out the entries that have been added for it.
1190         store_buffer_->SetTop(start_of_current_page_);
1191       } else if (store_buffer_->Top() - start_of_current_page_ >=
1192                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1193         // Did we find too many pointers in the previous page?  The heuristic is
1194         // that no page can take more then 1/5 the remaining slots in the store
1195         // buffer.
1196         current_page_->set_scan_on_scavenge(true);
1197         store_buffer_->SetTop(start_of_current_page_);
1198       } else {
1199         // In this case the page we scanned took a reasonable number of slots in
1200         // the store buffer.  It has now been rehabilitated and is no longer
1201         // marked scan_on_scavenge.
1202         ASSERT(!current_page_->scan_on_scavenge());
1203       }
1204     }
1205     start_of_current_page_ = store_buffer_->Top();
1206     current_page_ = page;
1207   } else if (event == kStoreBufferFullEvent) {
1208     // The current page overflowed the store buffer again.  Wipe out its entries
1209     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1210     // several times while scanning.
1211     if (current_page_ == NULL) {
1212       // Store Buffer overflowed while scanning promoted objects.  These are not
1213       // in any particular page, though they are likely to be clustered by the
1214       // allocation routines.
1215       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1216     } else {
1217       // Store Buffer overflowed while scanning a particular old space page for
1218       // pointers to new space.
1219       ASSERT(current_page_ == page);
1220       ASSERT(page != NULL);
1221       current_page_->set_scan_on_scavenge(true);
1222       ASSERT(start_of_current_page_ != store_buffer_->Top());
1223       store_buffer_->SetTop(start_of_current_page_);
1224     }
1225   } else {
1226     UNREACHABLE();
1227   }
1228 }
1229
1230
1231 void PromotionQueue::Initialize() {
1232   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1233   // entries (where each is a pair of intptr_t). This allows us to simplify
1234   // the test fpr when to switch pages.
1235   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1236          == 0);
1237   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1238   front_ = rear_ =
1239       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1240   emergency_stack_ = NULL;
1241   guard_ = false;
1242 }
1243
1244
1245 void PromotionQueue::RelocateQueueHead() {
1246   ASSERT(emergency_stack_ == NULL);
1247
1248   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1249   intptr_t* head_start = rear_;
1250   intptr_t* head_end =
1251       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1252
1253   int entries_count =
1254       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1255
1256   emergency_stack_ = new List<Entry>(2 * entries_count);
1257
1258   while (head_start != head_end) {
1259     int size = static_cast<int>(*(head_start++));
1260     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1261     emergency_stack_->Add(Entry(obj, size));
1262   }
1263   rear_ = head_end;
1264 }
1265
1266
1267 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1268  public:
1269   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1270
1271   virtual Object* RetainAs(Object* object) {
1272     if (!heap_->InFromSpace(object)) {
1273       return object;
1274     }
1275
1276     MapWord map_word = HeapObject::cast(object)->map_word();
1277     if (map_word.IsForwardingAddress()) {
1278       return map_word.ToForwardingAddress();
1279     }
1280     return NULL;
1281   }
1282
1283  private:
1284   Heap* heap_;
1285 };
1286
1287
1288 void Heap::Scavenge() {
1289   RelocationLock relocation_lock(this);
1290
1291 #ifdef VERIFY_HEAP
1292   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1293 #endif
1294
1295   gc_state_ = SCAVENGE;
1296
1297   // Implements Cheney's copying algorithm
1298   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1299
1300   // Clear descriptor cache.
1301   isolate_->descriptor_lookup_cache()->Clear();
1302
1303   // Used for updating survived_since_last_expansion_ at function end.
1304   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1305
1306   CheckNewSpaceExpansionCriteria();
1307
1308   SelectScavengingVisitorsTable();
1309
1310   incremental_marking()->PrepareForScavenge();
1311
1312   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1313   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1314
1315   // Flip the semispaces.  After flipping, to space is empty, from space has
1316   // live objects.
1317   new_space_.Flip();
1318   new_space_.ResetAllocationInfo();
1319
1320   // We need to sweep newly copied objects which can be either in the
1321   // to space or promoted to the old generation.  For to-space
1322   // objects, we treat the bottom of the to space as a queue.  Newly
1323   // copied and unswept objects lie between a 'front' mark and the
1324   // allocation pointer.
1325   //
1326   // Promoted objects can go into various old-generation spaces, and
1327   // can be allocated internally in the spaces (from the free list).
1328   // We treat the top of the to space as a queue of addresses of
1329   // promoted objects.  The addresses of newly promoted and unswept
1330   // objects lie between a 'front' mark and a 'rear' mark that is
1331   // updated as a side effect of promoting an object.
1332   //
1333   // There is guaranteed to be enough room at the top of the to space
1334   // for the addresses of promoted objects: every object promoted
1335   // frees up its size in bytes from the top of the new space, and
1336   // objects are at least one pointer in size.
1337   Address new_space_front = new_space_.ToSpaceStart();
1338   promotion_queue_.Initialize();
1339
1340 #ifdef DEBUG
1341   store_buffer()->Clean();
1342 #endif
1343
1344   ScavengeVisitor scavenge_visitor(this);
1345   // Copy roots.
1346   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1347
1348   // Copy objects reachable from the old generation.
1349   {
1350     StoreBufferRebuildScope scope(this,
1351                                   store_buffer(),
1352                                   &ScavengeStoreBufferCallback);
1353     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1354   }
1355
1356   // Copy objects reachable from cells by scavenging cell values directly.
1357   HeapObjectIterator cell_iterator(cell_space_);
1358   for (HeapObject* heap_object = cell_iterator.Next();
1359        heap_object != NULL;
1360        heap_object = cell_iterator.Next()) {
1361     if (heap_object->IsJSGlobalPropertyCell()) {
1362       JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1363       Address value_address = cell->ValueAddress();
1364       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1365     }
1366   }
1367
1368   // Copy objects reachable from the code flushing candidates list.
1369   MarkCompactCollector* collector = mark_compact_collector();
1370   if (collector->is_code_flushing_enabled()) {
1371     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1372   }
1373
1374   // Scavenge object reachable from the native contexts list directly.
1375   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1376
1377   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1378
1379   while (isolate()->global_handles()->IterateObjectGroups(
1380       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1381     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1382   }
1383   isolate()->global_handles()->RemoveObjectGroups();
1384   isolate()->global_handles()->RemoveImplicitRefGroups();
1385
1386   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1387       &IsUnscavengedHeapObject);
1388   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1389       &scavenge_visitor);
1390   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1391
1392   UpdateNewSpaceReferencesInExternalStringTable(
1393       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1394
1395   error_object_list_.UpdateReferencesInNewSpace(this);
1396
1397   promotion_queue_.Destroy();
1398
1399   if (!FLAG_watch_ic_patching) {
1400     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1401   }
1402   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1403
1404   ScavengeWeakObjectRetainer weak_object_retainer(this);
1405   ProcessWeakReferences(&weak_object_retainer);
1406
1407   ASSERT(new_space_front == new_space_.top());
1408
1409   // Set age mark.
1410   new_space_.set_age_mark(new_space_.top());
1411
1412   new_space_.LowerInlineAllocationLimit(
1413       new_space_.inline_allocation_limit_step());
1414
1415   // Update how much has survived scavenge.
1416   IncrementYoungSurvivorsCounter(static_cast<int>(
1417       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1418
1419   LOG(isolate_, ResourceEvent("scavenge", "end"));
1420
1421   gc_state_ = NOT_IN_GC;
1422
1423   scavenges_since_last_idle_round_++;
1424 }
1425
1426
1427 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1428                                                                 Object** p) {
1429   MapWord first_word = HeapObject::cast(*p)->map_word();
1430
1431   if (!first_word.IsForwardingAddress()) {
1432     // Unreachable external string can be finalized.
1433     heap->FinalizeExternalString(String::cast(*p));
1434     return NULL;
1435   }
1436
1437   // String is still reachable.
1438   return String::cast(first_word.ToForwardingAddress());
1439 }
1440
1441
1442 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1443     ExternalStringTableUpdaterCallback updater_func) {
1444 #ifdef VERIFY_HEAP
1445   if (FLAG_verify_heap) {
1446     external_string_table_.Verify();
1447   }
1448 #endif
1449
1450   if (external_string_table_.new_space_strings_.is_empty()) return;
1451
1452   Object** start = &external_string_table_.new_space_strings_[0];
1453   Object** end = start + external_string_table_.new_space_strings_.length();
1454   Object** last = start;
1455
1456   for (Object** p = start; p < end; ++p) {
1457     ASSERT(InFromSpace(*p));
1458     String* target = updater_func(this, p);
1459
1460     if (target == NULL) continue;
1461
1462     ASSERT(target->IsExternalString());
1463
1464     if (InNewSpace(target)) {
1465       // String is still in new space.  Update the table entry.
1466       *last = target;
1467       ++last;
1468     } else {
1469       // String got promoted.  Move it to the old string list.
1470       external_string_table_.AddOldString(target);
1471     }
1472   }
1473
1474   ASSERT(last <= end);
1475   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1476 }
1477
1478
1479 void Heap::UpdateReferencesInExternalStringTable(
1480     ExternalStringTableUpdaterCallback updater_func) {
1481
1482   // Update old space string references.
1483   if (external_string_table_.old_space_strings_.length() > 0) {
1484     Object** start = &external_string_table_.old_space_strings_[0];
1485     Object** end = start + external_string_table_.old_space_strings_.length();
1486     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1487   }
1488
1489   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1490 }
1491
1492
1493 static Object* ProcessFunctionWeakReferences(Heap* heap,
1494                                              Object* function,
1495                                              WeakObjectRetainer* retainer,
1496                                              bool record_slots) {
1497   Object* undefined = heap->undefined_value();
1498   Object* head = undefined;
1499   JSFunction* tail = NULL;
1500   Object* candidate = function;
1501   while (candidate != undefined) {
1502     // Check whether to keep the candidate in the list.
1503     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1504     Object* retain = retainer->RetainAs(candidate);
1505     if (retain != NULL) {
1506       if (head == undefined) {
1507         // First element in the list.
1508         head = retain;
1509       } else {
1510         // Subsequent elements in the list.
1511         ASSERT(tail != NULL);
1512         tail->set_next_function_link(retain);
1513         if (record_slots) {
1514           Object** next_function =
1515               HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1516           heap->mark_compact_collector()->RecordSlot(
1517               next_function, next_function, retain);
1518         }
1519       }
1520       // Retained function is new tail.
1521       candidate_function = reinterpret_cast<JSFunction*>(retain);
1522       tail = candidate_function;
1523
1524       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1525
1526       if (retain == undefined) break;
1527     }
1528
1529     // Move to next element in the list.
1530     candidate = candidate_function->next_function_link();
1531   }
1532
1533   // Terminate the list if there is one or more elements.
1534   if (tail != NULL) {
1535     tail->set_next_function_link(undefined);
1536   }
1537
1538   return head;
1539 }
1540
1541
1542 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1543   // We don't record weak slots during marking or scavenges.
1544   // Instead we do it once when we complete mark-compact cycle.
1545   // Note that write barrier has no effect if we are already in the middle of
1546   // compacting mark-sweep cycle and we have to record slots manually.
1547   bool record_slots =
1548       gc_state() == MARK_COMPACT &&
1549       mark_compact_collector()->is_compacting();
1550   ProcessArrayBuffers(retainer, record_slots);
1551   ProcessNativeContexts(retainer, record_slots);
1552 }
1553
1554 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1555                                  bool record_slots) {
1556   Object* undefined = undefined_value();
1557   Object* head = undefined;
1558   Context* tail = NULL;
1559   Object* candidate = native_contexts_list_;
1560
1561   while (candidate != undefined) {
1562     // Check whether to keep the candidate in the list.
1563     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1564     Object* retain = retainer->RetainAs(candidate);
1565     if (retain != NULL) {
1566       if (head == undefined) {
1567         // First element in the list.
1568         head = retain;
1569       } else {
1570         // Subsequent elements in the list.
1571         ASSERT(tail != NULL);
1572         tail->set_unchecked(this,
1573                             Context::NEXT_CONTEXT_LINK,
1574                             retain,
1575                             UPDATE_WRITE_BARRIER);
1576
1577         if (record_slots) {
1578           Object** next_context =
1579               HeapObject::RawField(
1580                   tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1581           mark_compact_collector()->RecordSlot(
1582               next_context, next_context, retain);
1583         }
1584       }
1585       // Retained context is new tail.
1586       candidate_context = reinterpret_cast<Context*>(retain);
1587       tail = candidate_context;
1588
1589       if (retain == undefined) break;
1590
1591       // Process the weak list of optimized functions for the context.
1592       Object* function_list_head =
1593           ProcessFunctionWeakReferences(
1594               this,
1595               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1596               retainer,
1597               record_slots);
1598       candidate_context->set_unchecked(this,
1599                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1600                                        function_list_head,
1601                                        UPDATE_WRITE_BARRIER);
1602       if (record_slots) {
1603         Object** optimized_functions =
1604             HeapObject::RawField(
1605                 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1606         mark_compact_collector()->RecordSlot(
1607             optimized_functions, optimized_functions, function_list_head);
1608       }
1609     }
1610
1611     // Move to next element in the list.
1612     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1613   }
1614
1615   // Terminate the list if there is one or more elements.
1616   if (tail != NULL) {
1617     tail->set_unchecked(this,
1618                         Context::NEXT_CONTEXT_LINK,
1619                         Heap::undefined_value(),
1620                         UPDATE_WRITE_BARRIER);
1621   }
1622
1623   // Update the head of the list of contexts.
1624   native_contexts_list_ = head;
1625 }
1626
1627
1628 template <class T>
1629 struct WeakListVisitor;
1630
1631
1632 template <class T>
1633 static Object* VisitWeakList(Object* list,
1634                       MarkCompactCollector* collector,
1635                       WeakObjectRetainer* retainer, bool record_slots) {
1636   Object* head = Smi::FromInt(0);
1637   T* tail = NULL;
1638   while (list != Smi::FromInt(0)) {
1639     Object* retained = retainer->RetainAs(list);
1640     if (retained != NULL) {
1641       if (head == Smi::FromInt(0)) {
1642         head = retained;
1643       } else {
1644         ASSERT(tail != NULL);
1645         WeakListVisitor<T>::set_weak_next(tail, retained);
1646         if (record_slots) {
1647           Object** next_slot =
1648             HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
1649           collector->RecordSlot(next_slot, next_slot, retained);
1650         }
1651       }
1652       tail = reinterpret_cast<T*>(retained);
1653       WeakListVisitor<T>::VisitLiveObject(
1654           tail, collector, retainer, record_slots);
1655     }
1656     list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
1657   }
1658   if (tail != NULL) {
1659     tail->set_weak_next(Smi::FromInt(0));
1660   }
1661   return head;
1662 }
1663
1664
1665 template<>
1666 struct WeakListVisitor<JSTypedArray> {
1667   static void set_weak_next(JSTypedArray* obj, Object* next) {
1668     obj->set_weak_next(next);
1669   }
1670
1671   static Object* get_weak_next(JSTypedArray* obj) {
1672     return obj->weak_next();
1673   }
1674
1675   static void VisitLiveObject(JSTypedArray* obj,
1676                               MarkCompactCollector* collector,
1677                               WeakObjectRetainer* retainer,
1678                               bool record_slots) {}
1679
1680   static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
1681 };
1682
1683
1684 template<>
1685 struct WeakListVisitor<JSArrayBuffer> {
1686   static void set_weak_next(JSArrayBuffer* obj, Object* next) {
1687     obj->set_weak_next(next);
1688   }
1689
1690   static Object* get_weak_next(JSArrayBuffer* obj) {
1691     return obj->weak_next();
1692   }
1693
1694   static void VisitLiveObject(JSArrayBuffer* array_buffer,
1695                               MarkCompactCollector* collector,
1696                               WeakObjectRetainer* retainer,
1697                               bool record_slots) {
1698     Object* typed_array_obj =
1699         VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
1700                                     collector, retainer, record_slots);
1701     array_buffer->set_weak_first_array(typed_array_obj);
1702     if (typed_array_obj != Smi::FromInt(0) && record_slots) {
1703       Object** slot = HeapObject::RawField(
1704           array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
1705       collector->RecordSlot(slot, slot, typed_array_obj);
1706     }
1707   }
1708
1709   static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
1710 };
1711
1712
1713 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1714                                bool record_slots) {
1715   Object* array_buffer_obj =
1716       VisitWeakList<JSArrayBuffer>(array_buffers_list(),
1717                                    mark_compact_collector(),
1718                                    retainer, record_slots);
1719   set_array_buffers_list(array_buffer_obj);
1720 }
1721
1722
1723 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1724   DisallowHeapAllocation no_allocation;
1725
1726   // Both the external string table and the string table may contain
1727   // external strings, but neither lists them exhaustively, nor is the
1728   // intersection set empty.  Therefore we iterate over the external string
1729   // table first, ignoring internalized strings, and then over the
1730   // internalized string table.
1731
1732   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1733    public:
1734     explicit ExternalStringTableVisitorAdapter(
1735         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1736     virtual void VisitPointers(Object** start, Object** end) {
1737       for (Object** p = start; p < end; p++) {
1738         // Visit non-internalized external strings,
1739         // since internalized strings are listed in the string table.
1740         if (!(*p)->IsInternalizedString()) {
1741           ASSERT((*p)->IsExternalString());
1742           visitor_->VisitExternalString(Utils::ToLocal(
1743               Handle<String>(String::cast(*p))));
1744         }
1745       }
1746     }
1747    private:
1748     v8::ExternalResourceVisitor* visitor_;
1749   } external_string_table_visitor(visitor);
1750
1751   external_string_table_.Iterate(&external_string_table_visitor);
1752
1753   class StringTableVisitorAdapter : public ObjectVisitor {
1754    public:
1755     explicit StringTableVisitorAdapter(
1756         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1757     virtual void VisitPointers(Object** start, Object** end) {
1758       for (Object** p = start; p < end; p++) {
1759         if ((*p)->IsExternalString()) {
1760           ASSERT((*p)->IsInternalizedString());
1761           visitor_->VisitExternalString(Utils::ToLocal(
1762               Handle<String>(String::cast(*p))));
1763         }
1764       }
1765     }
1766    private:
1767     v8::ExternalResourceVisitor* visitor_;
1768   } string_table_visitor(visitor);
1769
1770   string_table()->IterateElements(&string_table_visitor);
1771 }
1772
1773
1774 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1775  public:
1776   static inline void VisitPointer(Heap* heap, Object** p) {
1777     Object* object = *p;
1778     if (!heap->InNewSpace(object)) return;
1779     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1780                          reinterpret_cast<HeapObject*>(object));
1781   }
1782 };
1783
1784
1785 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1786                          Address new_space_front) {
1787   do {
1788     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1789     // The addresses new_space_front and new_space_.top() define a
1790     // queue of unprocessed copied objects.  Process them until the
1791     // queue is empty.
1792     while (new_space_front != new_space_.top()) {
1793       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1794         HeapObject* object = HeapObject::FromAddress(new_space_front);
1795         new_space_front +=
1796           NewSpaceScavenger::IterateBody(object->map(), object);
1797       } else {
1798         new_space_front =
1799             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1800       }
1801     }
1802
1803     // Promote and process all the to-be-promoted objects.
1804     {
1805       StoreBufferRebuildScope scope(this,
1806                                     store_buffer(),
1807                                     &ScavengeStoreBufferCallback);
1808       while (!promotion_queue()->is_empty()) {
1809         HeapObject* target;
1810         int size;
1811         promotion_queue()->remove(&target, &size);
1812
1813         // Promoted object might be already partially visited
1814         // during old space pointer iteration. Thus we search specificly
1815         // for pointers to from semispace instead of looking for pointers
1816         // to new space.
1817         ASSERT(!target->IsMap());
1818         IterateAndMarkPointersToFromSpace(target->address(),
1819                                           target->address() + size,
1820                                           &ScavengeObject);
1821       }
1822     }
1823
1824     // Take another spin if there are now unswept objects in new space
1825     // (there are currently no more unswept promoted objects).
1826   } while (new_space_front != new_space_.top());
1827
1828   return new_space_front;
1829 }
1830
1831
1832 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1833
1834
1835 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1836                                               HeapObject* object,
1837                                               int size));
1838
1839 static HeapObject* EnsureDoubleAligned(Heap* heap,
1840                                        HeapObject* object,
1841                                        int size) {
1842   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1843     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1844     return HeapObject::FromAddress(object->address() + kPointerSize);
1845   } else {
1846     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1847                                kPointerSize);
1848     return object;
1849   }
1850 }
1851
1852
1853 enum LoggingAndProfiling {
1854   LOGGING_AND_PROFILING_ENABLED,
1855   LOGGING_AND_PROFILING_DISABLED
1856 };
1857
1858
1859 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1860
1861
1862 template<MarksHandling marks_handling,
1863          LoggingAndProfiling logging_and_profiling_mode>
1864 class ScavengingVisitor : public StaticVisitorBase {
1865  public:
1866   static void Initialize() {
1867     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1868     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1869     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1870     table_.Register(kVisitByteArray, &EvacuateByteArray);
1871     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1872     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1873
1874     table_.Register(kVisitNativeContext,
1875                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1876                         template VisitSpecialized<Context::kSize>);
1877
1878     table_.Register(kVisitConsString,
1879                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1880                         template VisitSpecialized<ConsString::kSize>);
1881
1882     table_.Register(kVisitSlicedString,
1883                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1884                         template VisitSpecialized<SlicedString::kSize>);
1885
1886     table_.Register(kVisitSymbol,
1887                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1888                         template VisitSpecialized<Symbol::kSize>);
1889
1890     table_.Register(kVisitSharedFunctionInfo,
1891                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1892                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1893
1894     table_.Register(kVisitJSWeakMap,
1895                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1896                     Visit);
1897
1898     table_.Register(kVisitJSArrayBuffer,
1899                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1900                     Visit);
1901
1902     table_.Register(kVisitJSTypedArray,
1903                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1904                     Visit);
1905
1906     table_.Register(kVisitJSRegExp,
1907                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1908                     Visit);
1909
1910     if (marks_handling == IGNORE_MARKS) {
1911       table_.Register(kVisitJSFunction,
1912                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1913                           template VisitSpecialized<JSFunction::kSize>);
1914     } else {
1915       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1916     }
1917
1918     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1919                                    kVisitDataObject,
1920                                    kVisitDataObjectGeneric>();
1921
1922     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1923                                    kVisitJSObject,
1924                                    kVisitJSObjectGeneric>();
1925
1926     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1927                                    kVisitStruct,
1928                                    kVisitStructGeneric>();
1929   }
1930
1931   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1932     return &table_;
1933   }
1934
1935  private:
1936   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1937   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1938
1939   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1940     bool should_record = false;
1941 #ifdef DEBUG
1942     should_record = FLAG_heap_stats;
1943 #endif
1944     should_record = should_record || FLAG_log_gc;
1945     if (should_record) {
1946       if (heap->new_space()->Contains(obj)) {
1947         heap->new_space()->RecordAllocation(obj);
1948       } else {
1949         heap->new_space()->RecordPromotion(obj);
1950       }
1951     }
1952   }
1953
1954   // Helper function used by CopyObject to copy a source object to an
1955   // allocated target object and update the forwarding pointer in the source
1956   // object.  Returns the target object.
1957   INLINE(static void MigrateObject(Heap* heap,
1958                                    HeapObject* source,
1959                                    HeapObject* target,
1960                                    int size)) {
1961     // Copy the content of source to target.
1962     heap->CopyBlock(target->address(), source->address(), size);
1963
1964     // Set the forwarding address.
1965     source->set_map_word(MapWord::FromForwardingAddress(target));
1966
1967     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1968       // Update NewSpace stats if necessary.
1969       RecordCopiedObject(heap, target);
1970       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1971       Isolate* isolate = heap->isolate();
1972       if (isolate->logger()->is_logging_code_events() ||
1973           isolate->cpu_profiler()->is_profiling()) {
1974         if (target->IsSharedFunctionInfo()) {
1975           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1976               source->address(), target->address()));
1977         }
1978       }
1979     }
1980
1981     if (marks_handling == TRANSFER_MARKS) {
1982       if (Marking::TransferColor(source, target)) {
1983         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1984       }
1985     }
1986   }
1987
1988
1989   template<ObjectContents object_contents,
1990            SizeRestriction size_restriction,
1991            int alignment>
1992   static inline void EvacuateObject(Map* map,
1993                                     HeapObject** slot,
1994                                     HeapObject* object,
1995                                     int object_size) {
1996     SLOW_ASSERT((size_restriction != SMALL) ||
1997                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1998     SLOW_ASSERT(object->Size() == object_size);
1999
2000     int allocation_size = object_size;
2001     if (alignment != kObjectAlignment) {
2002       ASSERT(alignment == kDoubleAlignment);
2003       allocation_size += kPointerSize;
2004     }
2005
2006     Heap* heap = map->GetHeap();
2007     if (heap->ShouldBePromoted(object->address(), object_size)) {
2008       MaybeObject* maybe_result;
2009
2010       if ((size_restriction != SMALL) &&
2011           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2012         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2013                                                      NOT_EXECUTABLE);
2014       } else {
2015         if (object_contents == DATA_OBJECT) {
2016           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2017         } else {
2018           maybe_result =
2019               heap->old_pointer_space()->AllocateRaw(allocation_size);
2020         }
2021       }
2022
2023       Object* result = NULL;  // Initialization to please compiler.
2024       if (maybe_result->ToObject(&result)) {
2025         HeapObject* target = HeapObject::cast(result);
2026
2027         if (alignment != kObjectAlignment) {
2028           target = EnsureDoubleAligned(heap, target, allocation_size);
2029         }
2030
2031         // Order is important: slot might be inside of the target if target
2032         // was allocated over a dead object and slot comes from the store
2033         // buffer.
2034         *slot = target;
2035         MigrateObject(heap, object, target, object_size);
2036
2037         if (object_contents == POINTER_OBJECT) {
2038           if (map->instance_type() == JS_FUNCTION_TYPE) {
2039             heap->promotion_queue()->insert(
2040                 target, JSFunction::kNonWeakFieldsEndOffset);
2041           } else {
2042             heap->promotion_queue()->insert(target, object_size);
2043           }
2044         }
2045
2046         heap->tracer()->increment_promoted_objects_size(object_size);
2047         return;
2048       }
2049     }
2050     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2051     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2052     Object* result = allocation->ToObjectUnchecked();
2053     HeapObject* target = HeapObject::cast(result);
2054
2055     if (alignment != kObjectAlignment) {
2056       target = EnsureDoubleAligned(heap, target, allocation_size);
2057     }
2058
2059     // Order is important: slot might be inside of the target if target
2060     // was allocated over a dead object and slot comes from the store
2061     // buffer.
2062     *slot = target;
2063     MigrateObject(heap, object, target, object_size);
2064     return;
2065   }
2066
2067
2068   static inline void EvacuateJSFunction(Map* map,
2069                                         HeapObject** slot,
2070                                         HeapObject* object) {
2071     ObjectEvacuationStrategy<POINTER_OBJECT>::
2072         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2073
2074     HeapObject* target = *slot;
2075     MarkBit mark_bit = Marking::MarkBitFrom(target);
2076     if (Marking::IsBlack(mark_bit)) {
2077       // This object is black and it might not be rescanned by marker.
2078       // We should explicitly record code entry slot for compaction because
2079       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2080       // miss it as it is not HeapObject-tagged.
2081       Address code_entry_slot =
2082           target->address() + JSFunction::kCodeEntryOffset;
2083       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2084       map->GetHeap()->mark_compact_collector()->
2085           RecordCodeEntrySlot(code_entry_slot, code);
2086     }
2087   }
2088
2089
2090   static inline void EvacuateFixedArray(Map* map,
2091                                         HeapObject** slot,
2092                                         HeapObject* object) {
2093     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2094     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2095                                                  slot,
2096                                                  object,
2097                                                  object_size);
2098   }
2099
2100
2101   static inline void EvacuateFixedDoubleArray(Map* map,
2102                                               HeapObject** slot,
2103                                               HeapObject* object) {
2104     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2105     int object_size = FixedDoubleArray::SizeFor(length);
2106     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2107         map,
2108         slot,
2109         object,
2110         object_size);
2111   }
2112
2113
2114   static inline void EvacuateByteArray(Map* map,
2115                                        HeapObject** slot,
2116                                        HeapObject* object) {
2117     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2118     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2119         map, slot, object, object_size);
2120   }
2121
2122
2123   static inline void EvacuateSeqOneByteString(Map* map,
2124                                             HeapObject** slot,
2125                                             HeapObject* object) {
2126     int object_size = SeqOneByteString::cast(object)->
2127         SeqOneByteStringSize(map->instance_type());
2128     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2129         map, slot, object, object_size);
2130   }
2131
2132
2133   static inline void EvacuateSeqTwoByteString(Map* map,
2134                                               HeapObject** slot,
2135                                               HeapObject* object) {
2136     int object_size = SeqTwoByteString::cast(object)->
2137         SeqTwoByteStringSize(map->instance_type());
2138     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2139         map, slot, object, object_size);
2140   }
2141
2142
2143   static inline bool IsShortcutCandidate(int type) {
2144     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2145   }
2146
2147   static inline void EvacuateShortcutCandidate(Map* map,
2148                                                HeapObject** slot,
2149                                                HeapObject* object) {
2150     ASSERT(IsShortcutCandidate(map->instance_type()));
2151
2152     Heap* heap = map->GetHeap();
2153
2154     if (marks_handling == IGNORE_MARKS &&
2155         ConsString::cast(object)->unchecked_second() ==
2156         heap->empty_string()) {
2157       HeapObject* first =
2158           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2159
2160       *slot = first;
2161
2162       if (!heap->InNewSpace(first)) {
2163         object->set_map_word(MapWord::FromForwardingAddress(first));
2164         return;
2165       }
2166
2167       MapWord first_word = first->map_word();
2168       if (first_word.IsForwardingAddress()) {
2169         HeapObject* target = first_word.ToForwardingAddress();
2170
2171         *slot = target;
2172         object->set_map_word(MapWord::FromForwardingAddress(target));
2173         return;
2174       }
2175
2176       heap->DoScavengeObject(first->map(), slot, first);
2177       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2178       return;
2179     }
2180
2181     int object_size = ConsString::kSize;
2182     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2183         map, slot, object, object_size);
2184   }
2185
2186   template<ObjectContents object_contents>
2187   class ObjectEvacuationStrategy {
2188    public:
2189     template<int object_size>
2190     static inline void VisitSpecialized(Map* map,
2191                                         HeapObject** slot,
2192                                         HeapObject* object) {
2193       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2194           map, slot, object, object_size);
2195     }
2196
2197     static inline void Visit(Map* map,
2198                              HeapObject** slot,
2199                              HeapObject* object) {
2200       int object_size = map->instance_size();
2201       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2202           map, slot, object, object_size);
2203     }
2204   };
2205
2206   static VisitorDispatchTable<ScavengingCallback> table_;
2207 };
2208
2209
2210 template<MarksHandling marks_handling,
2211          LoggingAndProfiling logging_and_profiling_mode>
2212 VisitorDispatchTable<ScavengingCallback>
2213     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2214
2215
2216 static void InitializeScavengingVisitorsTables() {
2217   ScavengingVisitor<TRANSFER_MARKS,
2218                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2220   ScavengingVisitor<TRANSFER_MARKS,
2221                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2222   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2223 }
2224
2225
2226 void Heap::SelectScavengingVisitorsTable() {
2227   bool logging_and_profiling =
2228       isolate()->logger()->is_logging() ||
2229       isolate()->cpu_profiler()->is_profiling() ||
2230       (isolate()->heap_profiler() != NULL &&
2231        isolate()->heap_profiler()->is_profiling());
2232
2233   if (!incremental_marking()->IsMarking()) {
2234     if (!logging_and_profiling) {
2235       scavenging_visitors_table_.CopyFrom(
2236           ScavengingVisitor<IGNORE_MARKS,
2237                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2238     } else {
2239       scavenging_visitors_table_.CopyFrom(
2240           ScavengingVisitor<IGNORE_MARKS,
2241                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2242     }
2243   } else {
2244     if (!logging_and_profiling) {
2245       scavenging_visitors_table_.CopyFrom(
2246           ScavengingVisitor<TRANSFER_MARKS,
2247                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2248     } else {
2249       scavenging_visitors_table_.CopyFrom(
2250           ScavengingVisitor<TRANSFER_MARKS,
2251                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2252     }
2253
2254     if (incremental_marking()->IsCompacting()) {
2255       // When compacting forbid short-circuiting of cons-strings.
2256       // Scavenging code relies on the fact that new space object
2257       // can't be evacuated into evacuation candidate but
2258       // short-circuiting violates this assumption.
2259       scavenging_visitors_table_.Register(
2260           StaticVisitorBase::kVisitShortcutCandidate,
2261           scavenging_visitors_table_.GetVisitorById(
2262               StaticVisitorBase::kVisitConsString));
2263     }
2264   }
2265 }
2266
2267
2268 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2269   SLOW_ASSERT(HEAP->InFromSpace(object));
2270   MapWord first_word = object->map_word();
2271   SLOW_ASSERT(!first_word.IsForwardingAddress());
2272   Map* map = first_word.ToMap();
2273   map->GetHeap()->DoScavengeObject(map, p, object);
2274 }
2275
2276
2277 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2278                                       int instance_size) {
2279   Object* result;
2280   MaybeObject* maybe_result = AllocateRawMap();
2281   if (!maybe_result->ToObject(&result)) return maybe_result;
2282
2283   // Map::cast cannot be used due to uninitialized map field.
2284   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2285   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2286   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2287   reinterpret_cast<Map*>(result)->set_visitor_id(
2288         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2289   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2290   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2291   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2292   reinterpret_cast<Map*>(result)->set_bit_field(0);
2293   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2294   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2295                    Map::OwnsDescriptors::encode(true);
2296   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2297   return result;
2298 }
2299
2300
2301 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2302                                int instance_size,
2303                                ElementsKind elements_kind) {
2304   Object* result;
2305   MaybeObject* maybe_result = AllocateRawMap();
2306   if (!maybe_result->To(&result)) return maybe_result;
2307
2308   Map* map = reinterpret_cast<Map*>(result);
2309   map->set_map_no_write_barrier(meta_map());
2310   map->set_instance_type(instance_type);
2311   map->set_visitor_id(
2312       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2313   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2314   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2315   map->set_instance_size(instance_size);
2316   map->set_inobject_properties(0);
2317   map->set_pre_allocated_property_fields(0);
2318   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2319   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2320                           SKIP_WRITE_BARRIER);
2321   map->init_back_pointer(undefined_value());
2322   map->set_unused_property_fields(0);
2323   map->set_instance_descriptors(empty_descriptor_array());
2324   map->set_bit_field(0);
2325   map->set_bit_field2(1 << Map::kIsExtensible);
2326   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2327                    Map::OwnsDescriptors::encode(true);
2328   map->set_bit_field3(bit_field3);
2329   map->set_elements_kind(elements_kind);
2330
2331   return map;
2332 }
2333
2334
2335 MaybeObject* Heap::AllocateCodeCache() {
2336   CodeCache* code_cache;
2337   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2338     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2339   }
2340   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2341   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2342   return code_cache;
2343 }
2344
2345
2346 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2347   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2348 }
2349
2350
2351 MaybeObject* Heap::AllocateAccessorPair() {
2352   AccessorPair* accessors;
2353   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2354     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2355   }
2356   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2357   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2358   return accessors;
2359 }
2360
2361
2362 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2363   TypeFeedbackInfo* info;
2364   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2365     if (!maybe_info->To(&info)) return maybe_info;
2366   }
2367   info->initialize_storage();
2368   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2369                                 SKIP_WRITE_BARRIER);
2370   return info;
2371 }
2372
2373
2374 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2375   AliasedArgumentsEntry* entry;
2376   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2377     if (!maybe_entry->To(&entry)) return maybe_entry;
2378   }
2379   entry->set_aliased_context_slot(aliased_context_slot);
2380   return entry;
2381 }
2382
2383
2384 const Heap::StringTypeTable Heap::string_type_table[] = {
2385 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2386   {type, size, k##camel_name##MapRootIndex},
2387   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2388 #undef STRING_TYPE_ELEMENT
2389 };
2390
2391
2392 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2393 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2394   {contents, k##name##RootIndex},
2395   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2396 #undef CONSTANT_STRING_ELEMENT
2397 };
2398
2399
2400 const Heap::StructTable Heap::struct_table[] = {
2401 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2402   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2403   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2404 #undef STRUCT_TABLE_ELEMENT
2405 };
2406
2407
2408 bool Heap::CreateInitialMaps() {
2409   Object* obj;
2410   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2411     if (!maybe_obj->ToObject(&obj)) return false;
2412   }
2413   // Map::cast cannot be used due to uninitialized map field.
2414   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2415   set_meta_map(new_meta_map);
2416   new_meta_map->set_map(new_meta_map);
2417
2418   { MaybeObject* maybe_obj =
2419         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2420     if (!maybe_obj->ToObject(&obj)) return false;
2421   }
2422   set_fixed_array_map(Map::cast(obj));
2423
2424   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2425     if (!maybe_obj->ToObject(&obj)) return false;
2426   }
2427   set_oddball_map(Map::cast(obj));
2428
2429   // Allocate the empty array.
2430   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2431     if (!maybe_obj->ToObject(&obj)) return false;
2432   }
2433   set_empty_fixed_array(FixedArray::cast(obj));
2434
2435   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2436     if (!maybe_obj->ToObject(&obj)) return false;
2437   }
2438   set_null_value(Oddball::cast(obj));
2439   Oddball::cast(obj)->set_kind(Oddball::kNull);
2440
2441   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2442     if (!maybe_obj->ToObject(&obj)) return false;
2443   }
2444   set_undefined_value(Oddball::cast(obj));
2445   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2446   ASSERT(!InNewSpace(undefined_value()));
2447
2448   // Allocate the empty descriptor array.
2449   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2450     if (!maybe_obj->ToObject(&obj)) return false;
2451   }
2452   set_empty_descriptor_array(DescriptorArray::cast(obj));
2453
2454   // Fix the instance_descriptors for the existing maps.
2455   meta_map()->set_code_cache(empty_fixed_array());
2456   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2457   meta_map()->init_back_pointer(undefined_value());
2458   meta_map()->set_instance_descriptors(empty_descriptor_array());
2459
2460   fixed_array_map()->set_code_cache(empty_fixed_array());
2461   fixed_array_map()->set_dependent_code(
2462       DependentCode::cast(empty_fixed_array()));
2463   fixed_array_map()->init_back_pointer(undefined_value());
2464   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2465
2466   oddball_map()->set_code_cache(empty_fixed_array());
2467   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2468   oddball_map()->init_back_pointer(undefined_value());
2469   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2470
2471   // Fix prototype object for existing maps.
2472   meta_map()->set_prototype(null_value());
2473   meta_map()->set_constructor(null_value());
2474
2475   fixed_array_map()->set_prototype(null_value());
2476   fixed_array_map()->set_constructor(null_value());
2477
2478   oddball_map()->set_prototype(null_value());
2479   oddball_map()->set_constructor(null_value());
2480
2481   { MaybeObject* maybe_obj =
2482         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2483     if (!maybe_obj->ToObject(&obj)) return false;
2484   }
2485   set_fixed_cow_array_map(Map::cast(obj));
2486   ASSERT(fixed_array_map() != fixed_cow_array_map());
2487
2488   { MaybeObject* maybe_obj =
2489         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2490     if (!maybe_obj->ToObject(&obj)) return false;
2491   }
2492   set_scope_info_map(Map::cast(obj));
2493
2494   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2495     if (!maybe_obj->ToObject(&obj)) return false;
2496   }
2497   set_heap_number_map(Map::cast(obj));
2498
2499   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2500     if (!maybe_obj->ToObject(&obj)) return false;
2501   }
2502   set_symbol_map(Map::cast(obj));
2503
2504   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_foreign_map(Map::cast(obj));
2508
2509   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2510     const StringTypeTable& entry = string_type_table[i];
2511     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2512       if (!maybe_obj->ToObject(&obj)) return false;
2513     }
2514     roots_[entry.index] = Map::cast(obj);
2515   }
2516
2517   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2518     if (!maybe_obj->ToObject(&obj)) return false;
2519   }
2520   set_undetectable_string_map(Map::cast(obj));
2521   Map::cast(obj)->set_is_undetectable();
2522
2523   { MaybeObject* maybe_obj =
2524         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2525     if (!maybe_obj->ToObject(&obj)) return false;
2526   }
2527   set_undetectable_ascii_string_map(Map::cast(obj));
2528   Map::cast(obj)->set_is_undetectable();
2529
2530   { MaybeObject* maybe_obj =
2531         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2532     if (!maybe_obj->ToObject(&obj)) return false;
2533   }
2534   set_fixed_double_array_map(Map::cast(obj));
2535
2536   { MaybeObject* maybe_obj =
2537         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2538     if (!maybe_obj->ToObject(&obj)) return false;
2539   }
2540   set_byte_array_map(Map::cast(obj));
2541
2542   { MaybeObject* maybe_obj =
2543         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2544     if (!maybe_obj->ToObject(&obj)) return false;
2545   }
2546   set_free_space_map(Map::cast(obj));
2547
2548   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2549     if (!maybe_obj->ToObject(&obj)) return false;
2550   }
2551   set_empty_byte_array(ByteArray::cast(obj));
2552
2553   { MaybeObject* maybe_obj =
2554         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2555     if (!maybe_obj->ToObject(&obj)) return false;
2556   }
2557   set_external_pixel_array_map(Map::cast(obj));
2558
2559   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2560                                          ExternalArray::kAlignedSize);
2561     if (!maybe_obj->ToObject(&obj)) return false;
2562   }
2563   set_external_byte_array_map(Map::cast(obj));
2564
2565   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2566                                          ExternalArray::kAlignedSize);
2567     if (!maybe_obj->ToObject(&obj)) return false;
2568   }
2569   set_external_unsigned_byte_array_map(Map::cast(obj));
2570
2571   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2572                                          ExternalArray::kAlignedSize);
2573     if (!maybe_obj->ToObject(&obj)) return false;
2574   }
2575   set_external_short_array_map(Map::cast(obj));
2576
2577   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2578                                          ExternalArray::kAlignedSize);
2579     if (!maybe_obj->ToObject(&obj)) return false;
2580   }
2581   set_external_unsigned_short_array_map(Map::cast(obj));
2582
2583   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2584                                          ExternalArray::kAlignedSize);
2585     if (!maybe_obj->ToObject(&obj)) return false;
2586   }
2587   set_external_int_array_map(Map::cast(obj));
2588
2589   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2590                                          ExternalArray::kAlignedSize);
2591     if (!maybe_obj->ToObject(&obj)) return false;
2592   }
2593   set_external_unsigned_int_array_map(Map::cast(obj));
2594
2595   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2596                                          ExternalArray::kAlignedSize);
2597     if (!maybe_obj->ToObject(&obj)) return false;
2598   }
2599   set_external_float_array_map(Map::cast(obj));
2600
2601   { MaybeObject* maybe_obj =
2602         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2603     if (!maybe_obj->ToObject(&obj)) return false;
2604   }
2605   set_non_strict_arguments_elements_map(Map::cast(obj));
2606
2607   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2608                                          ExternalArray::kAlignedSize);
2609     if (!maybe_obj->ToObject(&obj)) return false;
2610   }
2611   set_external_double_array_map(Map::cast(obj));
2612
2613   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2614     if (!maybe_obj->ToObject(&obj)) return false;
2615   }
2616   set_empty_external_byte_array(ExternalArray::cast(obj));
2617
2618   { MaybeObject* maybe_obj =
2619         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2620     if (!maybe_obj->ToObject(&obj)) return false;
2621   }
2622   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2623
2624   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2625     if (!maybe_obj->ToObject(&obj)) return false;
2626   }
2627   set_empty_external_short_array(ExternalArray::cast(obj));
2628
2629   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2630       kExternalUnsignedShortArray);
2631     if (!maybe_obj->ToObject(&obj)) return false;
2632   }
2633   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2634
2635   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2636     if (!maybe_obj->ToObject(&obj)) return false;
2637   }
2638   set_empty_external_int_array(ExternalArray::cast(obj));
2639
2640   { MaybeObject* maybe_obj =
2641         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2642     if (!maybe_obj->ToObject(&obj)) return false;
2643   }
2644   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2645
2646   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2647     if (!maybe_obj->ToObject(&obj)) return false;
2648   }
2649   set_empty_external_float_array(ExternalArray::cast(obj));
2650
2651   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2652     if (!maybe_obj->ToObject(&obj)) return false;
2653   }
2654   set_empty_external_double_array(ExternalArray::cast(obj));
2655
2656   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2657     if (!maybe_obj->ToObject(&obj)) return false;
2658   }
2659   set_empty_external_pixel_array(ExternalArray::cast(obj));
2660
2661   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2662     if (!maybe_obj->ToObject(&obj)) return false;
2663   }
2664   set_code_map(Map::cast(obj));
2665
2666   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2667                                          JSGlobalPropertyCell::kSize);
2668     if (!maybe_obj->ToObject(&obj)) return false;
2669   }
2670   set_global_property_cell_map(Map::cast(obj));
2671
2672   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2673     if (!maybe_obj->ToObject(&obj)) return false;
2674   }
2675   set_one_pointer_filler_map(Map::cast(obj));
2676
2677   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2678     if (!maybe_obj->ToObject(&obj)) return false;
2679   }
2680   set_two_pointer_filler_map(Map::cast(obj));
2681
2682   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2683     const StructTable& entry = struct_table[i];
2684     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2685       if (!maybe_obj->ToObject(&obj)) return false;
2686     }
2687     roots_[entry.index] = Map::cast(obj);
2688   }
2689
2690   { MaybeObject* maybe_obj =
2691         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2692     if (!maybe_obj->ToObject(&obj)) return false;
2693   }
2694   set_hash_table_map(Map::cast(obj));
2695
2696   { MaybeObject* maybe_obj =
2697         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2698     if (!maybe_obj->ToObject(&obj)) return false;
2699   }
2700   set_function_context_map(Map::cast(obj));
2701
2702   { MaybeObject* maybe_obj =
2703         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2704     if (!maybe_obj->ToObject(&obj)) return false;
2705   }
2706   set_catch_context_map(Map::cast(obj));
2707
2708   { MaybeObject* maybe_obj =
2709         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2710     if (!maybe_obj->ToObject(&obj)) return false;
2711   }
2712   set_with_context_map(Map::cast(obj));
2713
2714   { MaybeObject* maybe_obj =
2715         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2716     if (!maybe_obj->ToObject(&obj)) return false;
2717   }
2718   set_block_context_map(Map::cast(obj));
2719
2720   { MaybeObject* maybe_obj =
2721         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2722     if (!maybe_obj->ToObject(&obj)) return false;
2723   }
2724   set_module_context_map(Map::cast(obj));
2725
2726   { MaybeObject* maybe_obj =
2727         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2728     if (!maybe_obj->ToObject(&obj)) return false;
2729   }
2730   set_global_context_map(Map::cast(obj));
2731
2732   { MaybeObject* maybe_obj =
2733         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2734     if (!maybe_obj->ToObject(&obj)) return false;
2735   }
2736   Map* native_context_map = Map::cast(obj);
2737   native_context_map->set_dictionary_map(true);
2738   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2739   set_native_context_map(native_context_map);
2740
2741   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2742                                          SharedFunctionInfo::kAlignedSize);
2743     if (!maybe_obj->ToObject(&obj)) return false;
2744   }
2745   set_shared_function_info_map(Map::cast(obj));
2746
2747   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2748                                          JSMessageObject::kSize);
2749     if (!maybe_obj->ToObject(&obj)) return false;
2750   }
2751   set_message_object_map(Map::cast(obj));
2752
2753   Map* external_map;
2754   { MaybeObject* maybe_obj =
2755         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2756     if (!maybe_obj->To(&external_map)) return false;
2757   }
2758   external_map->set_is_extensible(false);
2759   set_external_map(external_map);
2760
2761   ASSERT(!InNewSpace(empty_fixed_array()));
2762   return true;
2763 }
2764
2765
2766 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2767   // Statically ensure that it is safe to allocate heap numbers in paged
2768   // spaces.
2769   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2770   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2771
2772   Object* result;
2773   { MaybeObject* maybe_result =
2774         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2775     if (!maybe_result->ToObject(&result)) return maybe_result;
2776   }
2777
2778   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2779   HeapNumber::cast(result)->set_value(value);
2780   return result;
2781 }
2782
2783
2784 MaybeObject* Heap::AllocateHeapNumber(double value) {
2785   // Use general version, if we're forced to always allocate.
2786   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2787
2788   // This version of AllocateHeapNumber is optimized for
2789   // allocation in new space.
2790   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2791   Object* result;
2792   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2793     if (!maybe_result->ToObject(&result)) return maybe_result;
2794   }
2795   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2796   HeapNumber::cast(result)->set_value(value);
2797   return result;
2798 }
2799
2800
2801 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2802   Object* result;
2803   { MaybeObject* maybe_result = AllocateRawCell();
2804     if (!maybe_result->ToObject(&result)) return maybe_result;
2805   }
2806   HeapObject::cast(result)->set_map_no_write_barrier(
2807       global_property_cell_map());
2808   JSGlobalPropertyCell::cast(result)->set_value(value);
2809   return result;
2810 }
2811
2812
2813 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2814   Box* result;
2815   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2816   if (!maybe_result->To(&result)) return maybe_result;
2817   result->set_value(value);
2818   return result;
2819 }
2820
2821
2822 MaybeObject* Heap::CreateOddball(const char* to_string,
2823                                  Object* to_number,
2824                                  byte kind) {
2825   Object* result;
2826   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2827     if (!maybe_result->ToObject(&result)) return maybe_result;
2828   }
2829   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2830 }
2831
2832
2833 bool Heap::CreateApiObjects() {
2834   Object* obj;
2835
2836   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2837     if (!maybe_obj->ToObject(&obj)) return false;
2838   }
2839   // Don't use Smi-only elements optimizations for objects with the neander
2840   // map. There are too many cases where element values are set directly with a
2841   // bottleneck to trap the Smi-only -> fast elements transition, and there
2842   // appears to be no benefit for optimize this case.
2843   Map* new_neander_map = Map::cast(obj);
2844   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2845   set_neander_map(new_neander_map);
2846
2847   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2848     if (!maybe_obj->ToObject(&obj)) return false;
2849   }
2850   Object* elements;
2851   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2852     if (!maybe_elements->ToObject(&elements)) return false;
2853   }
2854   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2855   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2856   set_message_listeners(JSObject::cast(obj));
2857
2858   return true;
2859 }
2860
2861
2862 void Heap::CreateJSEntryStub() {
2863   JSEntryStub stub;
2864   set_js_entry_code(*stub.GetCode(isolate()));
2865 }
2866
2867
2868 void Heap::CreateJSConstructEntryStub() {
2869   JSConstructEntryStub stub;
2870   set_js_construct_entry_code(*stub.GetCode(isolate()));
2871 }
2872
2873
2874 void Heap::CreateFixedStubs() {
2875   // Here we create roots for fixed stubs. They are needed at GC
2876   // for cooking and uncooking (check out frames.cc).
2877   // The eliminates the need for doing dictionary lookup in the
2878   // stub cache for these stubs.
2879   HandleScope scope(isolate());
2880   // gcc-4.4 has problem generating correct code of following snippet:
2881   // {  JSEntryStub stub;
2882   //    js_entry_code_ = *stub.GetCode();
2883   // }
2884   // {  JSConstructEntryStub stub;
2885   //    js_construct_entry_code_ = *stub.GetCode();
2886   // }
2887   // To workaround the problem, make separate functions without inlining.
2888   Heap::CreateJSEntryStub();
2889   Heap::CreateJSConstructEntryStub();
2890
2891   // Create stubs that should be there, so we don't unexpectedly have to
2892   // create them if we need them during the creation of another stub.
2893   // Stub creation mixes raw pointers and handles in an unsafe manner so
2894   // we cannot create stubs while we are creating stubs.
2895   CodeStub::GenerateStubsAheadOfTime(isolate());
2896 }
2897
2898
2899 bool Heap::CreateInitialObjects() {
2900   Object* obj;
2901
2902   // The -0 value must be set before NumberFromDouble works.
2903   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2904     if (!maybe_obj->ToObject(&obj)) return false;
2905   }
2906   set_minus_zero_value(HeapNumber::cast(obj));
2907   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2908
2909   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2910     if (!maybe_obj->ToObject(&obj)) return false;
2911   }
2912   set_nan_value(HeapNumber::cast(obj));
2913
2914   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2915     if (!maybe_obj->ToObject(&obj)) return false;
2916   }
2917   set_infinity_value(HeapNumber::cast(obj));
2918
2919   // The hole has not been created yet, but we want to put something
2920   // predictable in the gaps in the string table, so lets make that Smi zero.
2921   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2922
2923   // Allocate initial string table.
2924   { MaybeObject* maybe_obj =
2925         StringTable::Allocate(this, kInitialStringTableSize);
2926     if (!maybe_obj->ToObject(&obj)) return false;
2927   }
2928   // Don't use set_string_table() due to asserts.
2929   roots_[kStringTableRootIndex] = obj;
2930
2931   // Finish initializing oddballs after creating the string table.
2932   { MaybeObject* maybe_obj =
2933         undefined_value()->Initialize("undefined",
2934                                       nan_value(),
2935                                       Oddball::kUndefined);
2936     if (!maybe_obj->ToObject(&obj)) return false;
2937   }
2938
2939   // Initialize the null_value.
2940   { MaybeObject* maybe_obj =
2941         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2942     if (!maybe_obj->ToObject(&obj)) return false;
2943   }
2944
2945   { MaybeObject* maybe_obj = CreateOddball("true",
2946                                            Smi::FromInt(1),
2947                                            Oddball::kTrue);
2948     if (!maybe_obj->ToObject(&obj)) return false;
2949   }
2950   set_true_value(Oddball::cast(obj));
2951
2952   { MaybeObject* maybe_obj = CreateOddball("false",
2953                                            Smi::FromInt(0),
2954                                            Oddball::kFalse);
2955     if (!maybe_obj->ToObject(&obj)) return false;
2956   }
2957   set_false_value(Oddball::cast(obj));
2958
2959   { MaybeObject* maybe_obj = CreateOddball("hole",
2960                                            Smi::FromInt(-1),
2961                                            Oddball::kTheHole);
2962     if (!maybe_obj->ToObject(&obj)) return false;
2963   }
2964   set_the_hole_value(Oddball::cast(obj));
2965
2966   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
2967                                            Smi::FromInt(-1),
2968                                            Oddball::kUninitialized);
2969     if (!maybe_obj->ToObject(&obj)) return false;
2970   }
2971   set_uninitialized_value(Oddball::cast(obj));
2972
2973   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2974                                            Smi::FromInt(-4),
2975                                            Oddball::kArgumentMarker);
2976     if (!maybe_obj->ToObject(&obj)) return false;
2977   }
2978   set_arguments_marker(Oddball::cast(obj));
2979
2980   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2981                                            Smi::FromInt(-2),
2982                                            Oddball::kOther);
2983     if (!maybe_obj->ToObject(&obj)) return false;
2984   }
2985   set_no_interceptor_result_sentinel(obj);
2986
2987   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2988                                            Smi::FromInt(-3),
2989                                            Oddball::kOther);
2990     if (!maybe_obj->ToObject(&obj)) return false;
2991   }
2992   set_termination_exception(obj);
2993
2994   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2995     { MaybeObject* maybe_obj =
2996           InternalizeUtf8String(constant_string_table[i].contents);
2997       if (!maybe_obj->ToObject(&obj)) return false;
2998     }
2999     roots_[constant_string_table[i].index] = String::cast(obj);
3000   }
3001
3002   // Allocate the hidden string which is used to identify the hidden properties
3003   // in JSObjects. The hash code has a special value so that it will not match
3004   // the empty string when searching for the property. It cannot be part of the
3005   // loop above because it needs to be allocated manually with the special
3006   // hash code in place. The hash code for the hidden_string is zero to ensure
3007   // that it will always be at the first entry in property descriptors.
3008   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3009       OneByteVector("", 0), String::kEmptyStringHash);
3010     if (!maybe_obj->ToObject(&obj)) return false;
3011   }
3012   hidden_string_ = String::cast(obj);
3013
3014   // Allocate the code_stubs dictionary. The initial size is set to avoid
3015   // expanding the dictionary during bootstrapping.
3016   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3017     if (!maybe_obj->ToObject(&obj)) return false;
3018   }
3019   set_code_stubs(UnseededNumberDictionary::cast(obj));
3020
3021
3022   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3023   // is set to avoid expanding the dictionary during bootstrapping.
3024   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3025     if (!maybe_obj->ToObject(&obj)) return false;
3026   }
3027   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3028
3029   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3030     if (!maybe_obj->ToObject(&obj)) return false;
3031   }
3032   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3033
3034   set_instanceof_cache_function(Smi::FromInt(0));
3035   set_instanceof_cache_map(Smi::FromInt(0));
3036   set_instanceof_cache_answer(Smi::FromInt(0));
3037
3038   CreateFixedStubs();
3039
3040   // Allocate the dictionary of intrinsic function names.
3041   { MaybeObject* maybe_obj =
3042         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3043     if (!maybe_obj->ToObject(&obj)) return false;
3044   }
3045   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3046                                                                        obj);
3047     if (!maybe_obj->ToObject(&obj)) return false;
3048   }
3049   set_intrinsic_function_names(NameDictionary::cast(obj));
3050
3051   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3052     if (!maybe_obj->ToObject(&obj)) return false;
3053   }
3054   set_number_string_cache(FixedArray::cast(obj));
3055
3056   // Allocate cache for single character one byte strings.
3057   { MaybeObject* maybe_obj =
3058         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3059     if (!maybe_obj->ToObject(&obj)) return false;
3060   }
3061   set_single_character_string_cache(FixedArray::cast(obj));
3062
3063   // Allocate cache for string split.
3064   { MaybeObject* maybe_obj = AllocateFixedArray(
3065       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3066     if (!maybe_obj->ToObject(&obj)) return false;
3067   }
3068   set_string_split_cache(FixedArray::cast(obj));
3069
3070   { MaybeObject* maybe_obj = AllocateFixedArray(
3071       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3072     if (!maybe_obj->ToObject(&obj)) return false;
3073   }
3074   set_regexp_multiple_cache(FixedArray::cast(obj));
3075
3076   // Allocate cache for external strings pointing to native source code.
3077   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3078     if (!maybe_obj->ToObject(&obj)) return false;
3079   }
3080   set_natives_source_cache(FixedArray::cast(obj));
3081
3082   // Allocate object to hold object observation state.
3083   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3084     if (!maybe_obj->ToObject(&obj)) return false;
3085   }
3086   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3087     if (!maybe_obj->ToObject(&obj)) return false;
3088   }
3089   set_observation_state(JSObject::cast(obj));
3090
3091   { MaybeObject* maybe_obj = AllocateSymbol();
3092     if (!maybe_obj->ToObject(&obj)) return false;
3093   }
3094   set_frozen_symbol(Symbol::cast(obj));
3095
3096   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3097     if (!maybe_obj->ToObject(&obj)) return false;
3098   }
3099   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3100   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3101
3102   // Handling of script id generation is in Factory::NewScript.
3103   set_last_script_id(undefined_value());
3104
3105   // Initialize keyed lookup cache.
3106   isolate_->keyed_lookup_cache()->Clear();
3107
3108   // Initialize context slot cache.
3109   isolate_->context_slot_cache()->Clear();
3110
3111   // Initialize descriptor cache.
3112   isolate_->descriptor_lookup_cache()->Clear();
3113
3114   // Initialize compilation cache.
3115   isolate_->compilation_cache()->Clear();
3116
3117   return true;
3118 }
3119
3120
3121 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3122   RootListIndex writable_roots[] = {
3123     kStoreBufferTopRootIndex,
3124     kStackLimitRootIndex,
3125     kNumberStringCacheRootIndex,
3126     kInstanceofCacheFunctionRootIndex,
3127     kInstanceofCacheMapRootIndex,
3128     kInstanceofCacheAnswerRootIndex,
3129     kCodeStubsRootIndex,
3130     kNonMonomorphicCacheRootIndex,
3131     kPolymorphicCodeCacheRootIndex,
3132     kLastScriptIdRootIndex,
3133     kEmptyScriptRootIndex,
3134     kRealStackLimitRootIndex,
3135     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3136     kConstructStubDeoptPCOffsetRootIndex,
3137     kGetterStubDeoptPCOffsetRootIndex,
3138     kSetterStubDeoptPCOffsetRootIndex,
3139     kStringTableRootIndex,
3140   };
3141
3142   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3143     if (root_index == writable_roots[i])
3144       return true;
3145   }
3146   return false;
3147 }
3148
3149
3150 Object* RegExpResultsCache::Lookup(Heap* heap,
3151                                    String* key_string,
3152                                    Object* key_pattern,
3153                                    ResultsCacheType type) {
3154   FixedArray* cache;
3155   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3156   if (type == STRING_SPLIT_SUBSTRINGS) {
3157     ASSERT(key_pattern->IsString());
3158     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3159     cache = heap->string_split_cache();
3160   } else {
3161     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3162     ASSERT(key_pattern->IsFixedArray());
3163     cache = heap->regexp_multiple_cache();
3164   }
3165
3166   uint32_t hash = key_string->Hash();
3167   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3168       ~(kArrayEntriesPerCacheEntry - 1));
3169   if (cache->get(index + kStringOffset) == key_string &&
3170       cache->get(index + kPatternOffset) == key_pattern) {
3171     return cache->get(index + kArrayOffset);
3172   }
3173   index =
3174       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3175   if (cache->get(index + kStringOffset) == key_string &&
3176       cache->get(index + kPatternOffset) == key_pattern) {
3177     return cache->get(index + kArrayOffset);
3178   }
3179   return Smi::FromInt(0);
3180 }
3181
3182
3183 void RegExpResultsCache::Enter(Heap* heap,
3184                                String* key_string,
3185                                Object* key_pattern,
3186                                FixedArray* value_array,
3187                                ResultsCacheType type) {
3188   FixedArray* cache;
3189   if (!key_string->IsInternalizedString()) return;
3190   if (type == STRING_SPLIT_SUBSTRINGS) {
3191     ASSERT(key_pattern->IsString());
3192     if (!key_pattern->IsInternalizedString()) return;
3193     cache = heap->string_split_cache();
3194   } else {
3195     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3196     ASSERT(key_pattern->IsFixedArray());
3197     cache = heap->regexp_multiple_cache();
3198   }
3199
3200   uint32_t hash = key_string->Hash();
3201   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3202       ~(kArrayEntriesPerCacheEntry - 1));
3203   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3204     cache->set(index + kStringOffset, key_string);
3205     cache->set(index + kPatternOffset, key_pattern);
3206     cache->set(index + kArrayOffset, value_array);
3207   } else {
3208     uint32_t index2 =
3209         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3210     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3211       cache->set(index2 + kStringOffset, key_string);
3212       cache->set(index2 + kPatternOffset, key_pattern);
3213       cache->set(index2 + kArrayOffset, value_array);
3214     } else {
3215       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3216       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3217       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3218       cache->set(index + kStringOffset, key_string);
3219       cache->set(index + kPatternOffset, key_pattern);
3220       cache->set(index + kArrayOffset, value_array);
3221     }
3222   }
3223   // If the array is a reasonably short list of substrings, convert it into a
3224   // list of internalized strings.
3225   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3226     for (int i = 0; i < value_array->length(); i++) {
3227       String* str = String::cast(value_array->get(i));
3228       Object* internalized_str;
3229       MaybeObject* maybe_string = heap->InternalizeString(str);
3230       if (maybe_string->ToObject(&internalized_str)) {
3231         value_array->set(i, internalized_str);
3232       }
3233     }
3234   }
3235   // Convert backing store to a copy-on-write array.
3236   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3237 }
3238
3239
3240 void RegExpResultsCache::Clear(FixedArray* cache) {
3241   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3242     cache->set(i, Smi::FromInt(0));
3243   }
3244 }
3245
3246
3247 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3248   MaybeObject* maybe_obj =
3249       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3250   return maybe_obj;
3251 }
3252
3253
3254 int Heap::FullSizeNumberStringCacheLength() {
3255   // Compute the size of the number string cache based on the max newspace size.
3256   // The number string cache has a minimum size based on twice the initial cache
3257   // size to ensure that it is bigger after being made 'full size'.
3258   int number_string_cache_size = max_semispace_size_ / 512;
3259   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3260                                  Min(0x4000, number_string_cache_size));
3261   // There is a string and a number per entry so the length is twice the number
3262   // of entries.
3263   return number_string_cache_size * 2;
3264 }
3265
3266
3267 void Heap::AllocateFullSizeNumberStringCache() {
3268   // The idea is to have a small number string cache in the snapshot to keep
3269   // boot-time memory usage down.  If we expand the number string cache already
3270   // while creating the snapshot then that didn't work out.
3271   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3272   MaybeObject* maybe_obj =
3273       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3274   Object* new_cache;
3275   if (maybe_obj->ToObject(&new_cache)) {
3276     // We don't bother to repopulate the cache with entries from the old cache.
3277     // It will be repopulated soon enough with new strings.
3278     set_number_string_cache(FixedArray::cast(new_cache));
3279   }
3280   // If allocation fails then we just return without doing anything.  It is only
3281   // a cache, so best effort is OK here.
3282 }
3283
3284
3285 void Heap::FlushNumberStringCache() {
3286   // Flush the number to string cache.
3287   int len = number_string_cache()->length();
3288   for (int i = 0; i < len; i++) {
3289     number_string_cache()->set_undefined(this, i);
3290   }
3291 }
3292
3293
3294 static inline int double_get_hash(double d) {
3295   DoubleRepresentation rep(d);
3296   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3297 }
3298
3299
3300 static inline int smi_get_hash(Smi* smi) {
3301   return smi->value();
3302 }
3303
3304
3305 Object* Heap::GetNumberStringCache(Object* number) {
3306   int hash;
3307   int mask = (number_string_cache()->length() >> 1) - 1;
3308   if (number->IsSmi()) {
3309     hash = smi_get_hash(Smi::cast(number)) & mask;
3310   } else {
3311     hash = double_get_hash(number->Number()) & mask;
3312   }
3313   Object* key = number_string_cache()->get(hash * 2);
3314   if (key == number) {
3315     return String::cast(number_string_cache()->get(hash * 2 + 1));
3316   } else if (key->IsHeapNumber() &&
3317              number->IsHeapNumber() &&
3318              key->Number() == number->Number()) {
3319     return String::cast(number_string_cache()->get(hash * 2 + 1));
3320   }
3321   return undefined_value();
3322 }
3323
3324
3325 void Heap::SetNumberStringCache(Object* number, String* string) {
3326   int hash;
3327   int mask = (number_string_cache()->length() >> 1) - 1;
3328   if (number->IsSmi()) {
3329     hash = smi_get_hash(Smi::cast(number)) & mask;
3330   } else {
3331     hash = double_get_hash(number->Number()) & mask;
3332   }
3333   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3334       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3335     // The first time we have a hash collision, we move to the full sized
3336     // number string cache.
3337     AllocateFullSizeNumberStringCache();
3338     return;
3339   }
3340   number_string_cache()->set(hash * 2, number);
3341   number_string_cache()->set(hash * 2 + 1, string);
3342 }
3343
3344
3345 MaybeObject* Heap::NumberToString(Object* number,
3346                                   bool check_number_string_cache,
3347                                   PretenureFlag pretenure) {
3348   isolate_->counters()->number_to_string_runtime()->Increment();
3349   if (check_number_string_cache) {
3350     Object* cached = GetNumberStringCache(number);
3351     if (cached != undefined_value()) {
3352       return cached;
3353     }
3354   }
3355
3356   char arr[100];
3357   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3358   const char* str;
3359   if (number->IsSmi()) {
3360     int num = Smi::cast(number)->value();
3361     str = IntToCString(num, buffer);
3362   } else {
3363     double num = HeapNumber::cast(number)->value();
3364     str = DoubleToCString(num, buffer);
3365   }
3366
3367   Object* js_string;
3368   MaybeObject* maybe_js_string =
3369       AllocateStringFromOneByte(CStrVector(str), pretenure);
3370   if (maybe_js_string->ToObject(&js_string)) {
3371     SetNumberStringCache(number, String::cast(js_string));
3372   }
3373   return maybe_js_string;
3374 }
3375
3376
3377 MaybeObject* Heap::Uint32ToString(uint32_t value,
3378                                   bool check_number_string_cache) {
3379   Object* number;
3380   MaybeObject* maybe = NumberFromUint32(value);
3381   if (!maybe->To<Object>(&number)) return maybe;
3382   return NumberToString(number, check_number_string_cache);
3383 }
3384
3385
3386 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3387   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3388 }
3389
3390
3391 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3392     ExternalArrayType array_type) {
3393   switch (array_type) {
3394     case kExternalByteArray:
3395       return kExternalByteArrayMapRootIndex;
3396     case kExternalUnsignedByteArray:
3397       return kExternalUnsignedByteArrayMapRootIndex;
3398     case kExternalShortArray:
3399       return kExternalShortArrayMapRootIndex;
3400     case kExternalUnsignedShortArray:
3401       return kExternalUnsignedShortArrayMapRootIndex;
3402     case kExternalIntArray:
3403       return kExternalIntArrayMapRootIndex;
3404     case kExternalUnsignedIntArray:
3405       return kExternalUnsignedIntArrayMapRootIndex;
3406     case kExternalFloatArray:
3407       return kExternalFloatArrayMapRootIndex;
3408     case kExternalDoubleArray:
3409       return kExternalDoubleArrayMapRootIndex;
3410     case kExternalPixelArray:
3411       return kExternalPixelArrayMapRootIndex;
3412     default:
3413       UNREACHABLE();
3414       return kUndefinedValueRootIndex;
3415   }
3416 }
3417
3418 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3419     ElementsKind elementsKind) {
3420   switch (elementsKind) {
3421     case EXTERNAL_BYTE_ELEMENTS:
3422       return kEmptyExternalByteArrayRootIndex;
3423     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3424       return kEmptyExternalUnsignedByteArrayRootIndex;
3425     case EXTERNAL_SHORT_ELEMENTS:
3426       return kEmptyExternalShortArrayRootIndex;
3427     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3428       return kEmptyExternalUnsignedShortArrayRootIndex;
3429     case EXTERNAL_INT_ELEMENTS:
3430       return kEmptyExternalIntArrayRootIndex;
3431     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3432       return kEmptyExternalUnsignedIntArrayRootIndex;
3433     case EXTERNAL_FLOAT_ELEMENTS:
3434       return kEmptyExternalFloatArrayRootIndex;
3435     case EXTERNAL_DOUBLE_ELEMENTS:
3436       return kEmptyExternalDoubleArrayRootIndex;
3437     case EXTERNAL_PIXEL_ELEMENTS:
3438       return kEmptyExternalPixelArrayRootIndex;
3439     default:
3440       UNREACHABLE();
3441       return kUndefinedValueRootIndex;
3442   }
3443 }
3444
3445 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3446   return ExternalArray::cast(
3447       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3448 }
3449
3450
3451
3452
3453 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3454   // We need to distinguish the minus zero value and this cannot be
3455   // done after conversion to int. Doing this by comparing bit
3456   // patterns is faster than using fpclassify() et al.
3457   static const DoubleRepresentation minus_zero(-0.0);
3458
3459   DoubleRepresentation rep(value);
3460   if (rep.bits == minus_zero.bits) {
3461     return AllocateHeapNumber(-0.0, pretenure);
3462   }
3463
3464   int int_value = FastD2I(value);
3465   if (value == int_value && Smi::IsValid(int_value)) {
3466     return Smi::FromInt(int_value);
3467   }
3468
3469   // Materialize the value in the heap.
3470   return AllocateHeapNumber(value, pretenure);
3471 }
3472
3473
3474 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3475   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3476   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3477   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3478   Foreign* result;
3479   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3480   if (!maybe_result->To(&result)) return maybe_result;
3481   result->set_foreign_address(address);
3482   return result;
3483 }
3484
3485
3486 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3487   SharedFunctionInfo* share;
3488   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3489   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3490
3491   // Set pointer fields.
3492   share->set_name(name);
3493   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3494   share->set_code(illegal);
3495   share->set_optimized_code_map(Smi::FromInt(0));
3496   share->set_scope_info(ScopeInfo::Empty(isolate_));
3497   Code* construct_stub =
3498       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3499   share->set_construct_stub(construct_stub);
3500   share->set_instance_class_name(Object_string());
3501   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3502   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3503   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3504   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3505   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3506   share->set_ast_node_count(0);
3507   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3508   share->set_counters(0);
3509
3510   // Set integer fields (smi or int, depending on the architecture).
3511   share->set_length(0);
3512   share->set_formal_parameter_count(0);
3513   share->set_expected_nof_properties(0);
3514   share->set_num_literals(0);
3515   share->set_start_position_and_type(0);
3516   share->set_end_position(0);
3517   share->set_function_token_position(0);
3518   // All compiler hints default to false or 0.
3519   share->set_compiler_hints(0);
3520   share->set_opt_count(0);
3521
3522   return share;
3523 }
3524
3525
3526 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3527                                            JSArray* arguments,
3528                                            int start_position,
3529                                            int end_position,
3530                                            Object* script,
3531                                            Object* stack_trace,
3532                                            Object* stack_frames) {
3533   Object* result;
3534   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3535     if (!maybe_result->ToObject(&result)) return maybe_result;
3536   }
3537   JSMessageObject* message = JSMessageObject::cast(result);
3538   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3539   message->initialize_elements();
3540   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3541   message->set_type(type);
3542   message->set_arguments(arguments);
3543   message->set_start_position(start_position);
3544   message->set_end_position(end_position);
3545   message->set_script(script);
3546   message->set_stack_trace(stack_trace);
3547   message->set_stack_frames(stack_frames);
3548   return result;
3549 }
3550
3551
3552
3553 // Returns true for a character in a range.  Both limits are inclusive.
3554 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3555   // This makes uses of the the unsigned wraparound.
3556   return character - from <= to - from;
3557 }
3558
3559
3560 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3561     Heap* heap,
3562     uint16_t c1,
3563     uint16_t c2) {
3564   String* result;
3565   // Numeric strings have a different hash algorithm not known by
3566   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3567   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3568       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3569     return result;
3570   // Now we know the length is 2, we might as well make use of that fact
3571   // when building the new string.
3572   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3573     // We can do this.
3574     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3575     Object* result;
3576     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3577       if (!maybe_result->ToObject(&result)) return maybe_result;
3578     }
3579     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3580     dest[0] = static_cast<uint8_t>(c1);
3581     dest[1] = static_cast<uint8_t>(c2);
3582     return result;
3583   } else {
3584     Object* result;
3585     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3586       if (!maybe_result->ToObject(&result)) return maybe_result;
3587     }
3588     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3589     dest[0] = c1;
3590     dest[1] = c2;
3591     return result;
3592   }
3593 }
3594
3595
3596 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3597   int first_length = first->length();
3598   if (first_length == 0) {
3599     return second;
3600   }
3601
3602   int second_length = second->length();
3603   if (second_length == 0) {
3604     return first;
3605   }
3606
3607   int length = first_length + second_length;
3608
3609   // Optimization for 2-byte strings often used as keys in a decompression
3610   // dictionary.  Check whether we already have the string in the string
3611   // table to prevent creation of many unneccesary strings.
3612   if (length == 2) {
3613     uint16_t c1 = first->Get(0);
3614     uint16_t c2 = second->Get(0);
3615     return MakeOrFindTwoCharacterString(this, c1, c2);
3616   }
3617
3618   bool first_is_one_byte = first->IsOneByteRepresentation();
3619   bool second_is_one_byte = second->IsOneByteRepresentation();
3620   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3621   // Make sure that an out of memory exception is thrown if the length
3622   // of the new cons string is too large.
3623   if (length > String::kMaxLength || length < 0) {
3624     isolate()->context()->mark_out_of_memory();
3625     return Failure::OutOfMemoryException(0x4);
3626   }
3627
3628   bool is_one_byte_data_in_two_byte_string = false;
3629   if (!is_one_byte) {
3630     // At least one of the strings uses two-byte representation so we
3631     // can't use the fast case code for short ASCII strings below, but
3632     // we can try to save memory if all chars actually fit in ASCII.
3633     is_one_byte_data_in_two_byte_string =
3634         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3635     if (is_one_byte_data_in_two_byte_string) {
3636       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3637     }
3638   }
3639
3640   // If the resulting string is small make a flat string.
3641   if (length < ConsString::kMinLength) {
3642     // Note that neither of the two inputs can be a slice because:
3643     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3644     ASSERT(first->IsFlat());
3645     ASSERT(second->IsFlat());
3646     if (is_one_byte) {
3647       Object* result;
3648       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3649         if (!maybe_result->ToObject(&result)) return maybe_result;
3650       }
3651       // Copy the characters into the new object.
3652       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3653       // Copy first part.
3654       const uint8_t* src;
3655       if (first->IsExternalString()) {
3656         src = ExternalAsciiString::cast(first)->GetChars();
3657       } else {
3658         src = SeqOneByteString::cast(first)->GetChars();
3659       }
3660       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3661       // Copy second part.
3662       if (second->IsExternalString()) {
3663         src = ExternalAsciiString::cast(second)->GetChars();
3664       } else {
3665         src = SeqOneByteString::cast(second)->GetChars();
3666       }
3667       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3668       return result;
3669     } else {
3670       if (is_one_byte_data_in_two_byte_string) {
3671         Object* result;
3672         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3673           if (!maybe_result->ToObject(&result)) return maybe_result;
3674         }
3675         // Copy the characters into the new object.
3676         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3677         String::WriteToFlat(first, dest, 0, first_length);
3678         String::WriteToFlat(second, dest + first_length, 0, second_length);
3679         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3680         return result;
3681       }
3682
3683       Object* result;
3684       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3685         if (!maybe_result->ToObject(&result)) return maybe_result;
3686       }
3687       // Copy the characters into the new object.
3688       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3689       String::WriteToFlat(first, dest, 0, first_length);
3690       String::WriteToFlat(second, dest + first_length, 0, second_length);
3691       return result;
3692     }
3693   }
3694
3695   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3696       cons_ascii_string_map() : cons_string_map();
3697
3698   Object* result;
3699   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3700     if (!maybe_result->ToObject(&result)) return maybe_result;
3701   }
3702
3703   DisallowHeapAllocation no_gc;
3704   ConsString* cons_string = ConsString::cast(result);
3705   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3706   cons_string->set_length(length);
3707   cons_string->set_hash_field(String::kEmptyHashField);
3708   cons_string->set_first(first, mode);
3709   cons_string->set_second(second, mode);
3710   return result;
3711 }
3712
3713
3714 MaybeObject* Heap::AllocateSubString(String* buffer,
3715                                      int start,
3716                                      int end,
3717                                      PretenureFlag pretenure) {
3718   int length = end - start;
3719   if (length <= 0) {
3720     return empty_string();
3721   } else if (length == 1) {
3722     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3723   } else if (length == 2) {
3724     // Optimization for 2-byte strings often used as keys in a decompression
3725     // dictionary.  Check whether we already have the string in the string
3726     // table to prevent creation of many unnecessary strings.
3727     uint16_t c1 = buffer->Get(start);
3728     uint16_t c2 = buffer->Get(start + 1);
3729     return MakeOrFindTwoCharacterString(this, c1, c2);
3730   }
3731
3732   // Make an attempt to flatten the buffer to reduce access time.
3733   buffer = buffer->TryFlattenGetString();
3734
3735   if (!FLAG_string_slices ||
3736       !buffer->IsFlat() ||
3737       length < SlicedString::kMinLength ||
3738       pretenure == TENURED) {
3739     Object* result;
3740     // WriteToFlat takes care of the case when an indirect string has a
3741     // different encoding from its underlying string.  These encodings may
3742     // differ because of externalization.
3743     bool is_one_byte = buffer->IsOneByteRepresentation();
3744     { MaybeObject* maybe_result = is_one_byte
3745                                   ? AllocateRawOneByteString(length, pretenure)
3746                                   : AllocateRawTwoByteString(length, pretenure);
3747       if (!maybe_result->ToObject(&result)) return maybe_result;
3748     }
3749     String* string_result = String::cast(result);
3750     // Copy the characters into the new object.
3751     if (is_one_byte) {
3752       ASSERT(string_result->IsOneByteRepresentation());
3753       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3754       String::WriteToFlat(buffer, dest, start, end);
3755     } else {
3756       ASSERT(string_result->IsTwoByteRepresentation());
3757       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3758       String::WriteToFlat(buffer, dest, start, end);
3759     }
3760     return result;
3761   }
3762
3763   ASSERT(buffer->IsFlat());
3764 #if VERIFY_HEAP
3765   if (FLAG_verify_heap) {
3766     buffer->StringVerify();
3767   }
3768 #endif
3769
3770   Object* result;
3771   // When slicing an indirect string we use its encoding for a newly created
3772   // slice and don't check the encoding of the underlying string.  This is safe
3773   // even if the encodings are different because of externalization.  If an
3774   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3775   // codes of the underlying string must still fit into ASCII (because
3776   // externalization must not change char codes).
3777   { Map* map = buffer->IsOneByteRepresentation()
3778                  ? sliced_ascii_string_map()
3779                  : sliced_string_map();
3780     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3781     if (!maybe_result->ToObject(&result)) return maybe_result;
3782   }
3783
3784   DisallowHeapAllocation no_gc;
3785   SlicedString* sliced_string = SlicedString::cast(result);
3786   sliced_string->set_length(length);
3787   sliced_string->set_hash_field(String::kEmptyHashField);
3788   if (buffer->IsConsString()) {
3789     ConsString* cons = ConsString::cast(buffer);
3790     ASSERT(cons->second()->length() == 0);
3791     sliced_string->set_parent(cons->first());
3792     sliced_string->set_offset(start);
3793   } else if (buffer->IsSlicedString()) {
3794     // Prevent nesting sliced strings.
3795     SlicedString* parent_slice = SlicedString::cast(buffer);
3796     sliced_string->set_parent(parent_slice->parent());
3797     sliced_string->set_offset(start + parent_slice->offset());
3798   } else {
3799     sliced_string->set_parent(buffer);
3800     sliced_string->set_offset(start);
3801   }
3802   ASSERT(sliced_string->parent()->IsSeqString() ||
3803          sliced_string->parent()->IsExternalString());
3804   return result;
3805 }
3806
3807
3808 MaybeObject* Heap::AllocateExternalStringFromAscii(
3809     const ExternalAsciiString::Resource* resource) {
3810   size_t length = resource->length();
3811   if (length > static_cast<size_t>(String::kMaxLength)) {
3812     isolate()->context()->mark_out_of_memory();
3813     return Failure::OutOfMemoryException(0x5);
3814   }
3815
3816   Map* map = external_ascii_string_map();
3817   Object* result;
3818   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3819     if (!maybe_result->ToObject(&result)) return maybe_result;
3820   }
3821
3822   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3823   external_string->set_length(static_cast<int>(length));
3824   external_string->set_hash_field(String::kEmptyHashField);
3825   external_string->set_resource(resource);
3826
3827   return result;
3828 }
3829
3830
3831 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3832     const ExternalTwoByteString::Resource* resource) {
3833   size_t length = resource->length();
3834   if (length > static_cast<size_t>(String::kMaxLength)) {
3835     isolate()->context()->mark_out_of_memory();
3836     return Failure::OutOfMemoryException(0x6);
3837   }
3838
3839   // For small strings we check whether the resource contains only
3840   // one byte characters.  If yes, we use a different string map.
3841   static const size_t kOneByteCheckLengthLimit = 32;
3842   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3843       String::IsOneByte(resource->data(), static_cast<int>(length));
3844   Map* map = is_one_byte ?
3845       external_string_with_one_byte_data_map() : external_string_map();
3846   Object* result;
3847   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3848     if (!maybe_result->ToObject(&result)) return maybe_result;
3849   }
3850
3851   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3852   external_string->set_length(static_cast<int>(length));
3853   external_string->set_hash_field(String::kEmptyHashField);
3854   external_string->set_resource(resource);
3855
3856   return result;
3857 }
3858
3859
3860 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3861   if (code <= String::kMaxOneByteCharCode) {
3862     Object* value = single_character_string_cache()->get(code);
3863     if (value != undefined_value()) return value;
3864
3865     uint8_t buffer[1];
3866     buffer[0] = static_cast<uint8_t>(code);
3867     Object* result;
3868     MaybeObject* maybe_result =
3869         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3870
3871     if (!maybe_result->ToObject(&result)) return maybe_result;
3872     single_character_string_cache()->set(code, result);
3873     return result;
3874   }
3875
3876   Object* result;
3877   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3878     if (!maybe_result->ToObject(&result)) return maybe_result;
3879   }
3880   String* answer = String::cast(result);
3881   answer->Set(0, code);
3882   return answer;
3883 }
3884
3885
3886 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3887   if (length < 0 || length > ByteArray::kMaxLength) {
3888     return Failure::OutOfMemoryException(0x7);
3889   }
3890   if (pretenure == NOT_TENURED) {
3891     return AllocateByteArray(length);
3892   }
3893   int size = ByteArray::SizeFor(length);
3894   Object* result;
3895   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3896                    ? old_data_space_->AllocateRaw(size)
3897                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3898     if (!maybe_result->ToObject(&result)) return maybe_result;
3899   }
3900
3901   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3902       byte_array_map());
3903   reinterpret_cast<ByteArray*>(result)->set_length(length);
3904   return result;
3905 }
3906
3907
3908 MaybeObject* Heap::AllocateByteArray(int length) {
3909   if (length < 0 || length > ByteArray::kMaxLength) {
3910     return Failure::OutOfMemoryException(0x8);
3911   }
3912   int size = ByteArray::SizeFor(length);
3913   AllocationSpace space =
3914       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3915   Object* result;
3916   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3917     if (!maybe_result->ToObject(&result)) return maybe_result;
3918   }
3919
3920   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3921       byte_array_map());
3922   reinterpret_cast<ByteArray*>(result)->set_length(length);
3923   return result;
3924 }
3925
3926
3927 void Heap::CreateFillerObjectAt(Address addr, int size) {
3928   if (size == 0) return;
3929   HeapObject* filler = HeapObject::FromAddress(addr);
3930   if (size == kPointerSize) {
3931     filler->set_map_no_write_barrier(one_pointer_filler_map());
3932   } else if (size == 2 * kPointerSize) {
3933     filler->set_map_no_write_barrier(two_pointer_filler_map());
3934   } else {
3935     filler->set_map_no_write_barrier(free_space_map());
3936     FreeSpace::cast(filler)->set_size(size);
3937   }
3938 }
3939
3940
3941 MaybeObject* Heap::AllocateExternalArray(int length,
3942                                          ExternalArrayType array_type,
3943                                          void* external_pointer,
3944                                          PretenureFlag pretenure) {
3945   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3946   Object* result;
3947   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3948                                             space,
3949                                             OLD_DATA_SPACE);
3950     if (!maybe_result->ToObject(&result)) return maybe_result;
3951   }
3952
3953   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3954       MapForExternalArrayType(array_type));
3955   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3956   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3957       external_pointer);
3958
3959   return result;
3960 }
3961
3962
3963 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3964                               Code::Flags flags,
3965                               Handle<Object> self_reference,
3966                               bool immovable,
3967                               bool crankshafted) {
3968   // Allocate ByteArray before the Code object, so that we do not risk
3969   // leaving uninitialized Code object (and breaking the heap).
3970   ByteArray* reloc_info;
3971   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3972   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3973
3974   // Compute size.
3975   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3976   int obj_size = Code::SizeFor(body_size);
3977   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3978   MaybeObject* maybe_result;
3979   // Large code objects and code objects which should stay at a fixed address
3980   // are allocated in large object space.
3981   HeapObject* result;
3982   bool force_lo_space = obj_size > code_space()->AreaSize();
3983   if (force_lo_space) {
3984     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3985   } else {
3986     maybe_result = code_space_->AllocateRaw(obj_size);
3987   }
3988   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3989
3990   if (immovable && !force_lo_space &&
3991       // Objects on the first page of each space are never moved.
3992       !code_space_->FirstPage()->Contains(result->address())) {
3993     // Discard the first code allocation, which was on a page where it could be
3994     // moved.
3995     CreateFillerObjectAt(result->address(), obj_size);
3996     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3997     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3998   }
3999
4000   // Initialize the object
4001   result->set_map_no_write_barrier(code_map());
4002   Code* code = Code::cast(result);
4003   ASSERT(!isolate_->code_range()->exists() ||
4004       isolate_->code_range()->contains(code->address()));
4005   code->set_instruction_size(desc.instr_size);
4006   code->set_relocation_info(reloc_info);
4007   code->set_flags(flags);
4008   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4009     code->set_check_type(RECEIVER_MAP_CHECK);
4010   }
4011   code->set_is_crankshafted(crankshafted);
4012   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4013   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4014   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4015   code->set_gc_metadata(Smi::FromInt(0));
4016   code->set_ic_age(global_ic_age_);
4017   code->set_prologue_offset(kPrologueOffsetNotSet);
4018   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4019     code->set_marked_for_deoptimization(false);
4020   }
4021   // Allow self references to created code object by patching the handle to
4022   // point to the newly allocated Code object.
4023   if (!self_reference.is_null()) {
4024     *(self_reference.location()) = code;
4025   }
4026   // Migrate generated code.
4027   // The generated code can contain Object** values (typically from handles)
4028   // that are dereferenced during the copy to point directly to the actual heap
4029   // objects. These pointers can include references to the code object itself,
4030   // through the self_reference parameter.
4031   code->CopyFrom(desc);
4032
4033 #ifdef VERIFY_HEAP
4034   if (FLAG_verify_heap) {
4035     code->Verify();
4036   }
4037 #endif
4038   return code;
4039 }
4040
4041
4042 MaybeObject* Heap::CopyCode(Code* code) {
4043   // Allocate an object the same size as the code object.
4044   int obj_size = code->Size();
4045   MaybeObject* maybe_result;
4046   if (obj_size > code_space()->AreaSize()) {
4047     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4048   } else {
4049     maybe_result = code_space_->AllocateRaw(obj_size);
4050   }
4051
4052   Object* result;
4053   if (!maybe_result->ToObject(&result)) return maybe_result;
4054
4055   // Copy code object.
4056   Address old_addr = code->address();
4057   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4058   CopyBlock(new_addr, old_addr, obj_size);
4059   // Relocate the copy.
4060   Code* new_code = Code::cast(result);
4061   ASSERT(!isolate_->code_range()->exists() ||
4062       isolate_->code_range()->contains(code->address()));
4063   new_code->Relocate(new_addr - old_addr);
4064   return new_code;
4065 }
4066
4067
4068 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4069   // Allocate ByteArray before the Code object, so that we do not risk
4070   // leaving uninitialized Code object (and breaking the heap).
4071   Object* reloc_info_array;
4072   { MaybeObject* maybe_reloc_info_array =
4073         AllocateByteArray(reloc_info.length(), TENURED);
4074     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4075       return maybe_reloc_info_array;
4076     }
4077   }
4078
4079   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4080
4081   int new_obj_size = Code::SizeFor(new_body_size);
4082
4083   Address old_addr = code->address();
4084
4085   size_t relocation_offset =
4086       static_cast<size_t>(code->instruction_end() - old_addr);
4087
4088   MaybeObject* maybe_result;
4089   if (new_obj_size > code_space()->AreaSize()) {
4090     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4091   } else {
4092     maybe_result = code_space_->AllocateRaw(new_obj_size);
4093   }
4094
4095   Object* result;
4096   if (!maybe_result->ToObject(&result)) return maybe_result;
4097
4098   // Copy code object.
4099   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4100
4101   // Copy header and instructions.
4102   CopyBytes(new_addr, old_addr, relocation_offset);
4103
4104   Code* new_code = Code::cast(result);
4105   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4106
4107   // Copy patched rinfo.
4108   CopyBytes(new_code->relocation_start(),
4109             reloc_info.start(),
4110             static_cast<size_t>(reloc_info.length()));
4111
4112   // Relocate the copy.
4113   ASSERT(!isolate_->code_range()->exists() ||
4114       isolate_->code_range()->contains(code->address()));
4115   new_code->Relocate(new_addr - old_addr);
4116
4117 #ifdef VERIFY_HEAP
4118   if (FLAG_verify_heap) {
4119     code->Verify();
4120   }
4121 #endif
4122   return new_code;
4123 }
4124
4125
4126 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4127     Handle<Object> allocation_site_info_payload) {
4128   ASSERT(gc_state_ == NOT_IN_GC);
4129   ASSERT(map->instance_type() != MAP_TYPE);
4130   // If allocation failures are disallowed, we may allocate in a different
4131   // space when new space is full and the object is not a large object.
4132   AllocationSpace retry_space =
4133       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4134   int size = map->instance_size() + AllocationSiteInfo::kSize;
4135   Object* result;
4136   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4137   if (!maybe_result->ToObject(&result)) return maybe_result;
4138   // No need for write barrier since object is white and map is in old space.
4139   HeapObject::cast(result)->set_map_no_write_barrier(map);
4140   AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4141       reinterpret_cast<Address>(result) + map->instance_size());
4142   alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4143   alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
4144   return result;
4145 }
4146
4147
4148 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4149   ASSERT(gc_state_ == NOT_IN_GC);
4150   ASSERT(map->instance_type() != MAP_TYPE);
4151   // If allocation failures are disallowed, we may allocate in a different
4152   // space when new space is full and the object is not a large object.
4153   AllocationSpace retry_space =
4154       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4155   int size = map->instance_size();
4156   Object* result;
4157   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4158   if (!maybe_result->ToObject(&result)) return maybe_result;
4159   // No need for write barrier since object is white and map is in old space.
4160   HeapObject::cast(result)->set_map_no_write_barrier(map);
4161   return result;
4162 }
4163
4164
4165 void Heap::InitializeFunction(JSFunction* function,
4166                               SharedFunctionInfo* shared,
4167                               Object* prototype) {
4168   ASSERT(!prototype->IsMap());
4169   function->initialize_properties();
4170   function->initialize_elements();
4171   function->set_shared(shared);
4172   function->set_code(shared->code());
4173   function->set_prototype_or_initial_map(prototype);
4174   function->set_context(undefined_value());
4175   function->set_literals_or_bindings(empty_fixed_array());
4176   function->set_next_function_link(undefined_value());
4177 }
4178
4179
4180 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4181   // Make sure to use globals from the function's context, since the function
4182   // can be from a different context.
4183   Context* native_context = function->context()->native_context();
4184   Map* new_map;
4185   if (function->shared()->is_generator()) {
4186     // Generator prototypes can share maps since they don't have "constructor"
4187     // properties.
4188     new_map = native_context->generator_object_prototype_map();
4189   } else {
4190     // Each function prototype gets a fresh map to avoid unwanted sharing of
4191     // maps between prototypes of different constructors.
4192     JSFunction* object_function = native_context->object_function();
4193     ASSERT(object_function->has_initial_map());
4194     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4195     if (!maybe_map->To(&new_map)) return maybe_map;
4196   }
4197
4198   Object* prototype;
4199   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4200   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4201
4202   if (!function->shared()->is_generator()) {
4203     MaybeObject* maybe_failure =
4204         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4205             constructor_string(), function, DONT_ENUM);
4206     if (maybe_failure->IsFailure()) return maybe_failure;
4207   }
4208
4209   return prototype;
4210 }
4211
4212
4213 MaybeObject* Heap::AllocateFunction(Map* function_map,
4214                                     SharedFunctionInfo* shared,
4215                                     Object* prototype,
4216                                     PretenureFlag pretenure) {
4217   AllocationSpace space =
4218       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4219   Object* result;
4220   { MaybeObject* maybe_result = Allocate(function_map, space);
4221     if (!maybe_result->ToObject(&result)) return maybe_result;
4222   }
4223   InitializeFunction(JSFunction::cast(result), shared, prototype);
4224   return result;
4225 }
4226
4227
4228 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4229   // To get fast allocation and map sharing for arguments objects we
4230   // allocate them based on an arguments boilerplate.
4231
4232   JSObject* boilerplate;
4233   int arguments_object_size;
4234   bool strict_mode_callee = callee->IsJSFunction() &&
4235       !JSFunction::cast(callee)->shared()->is_classic_mode();
4236   if (strict_mode_callee) {
4237     boilerplate =
4238         isolate()->context()->native_context()->
4239             strict_mode_arguments_boilerplate();
4240     arguments_object_size = kArgumentsObjectSizeStrict;
4241   } else {
4242     boilerplate =
4243         isolate()->context()->native_context()->arguments_boilerplate();
4244     arguments_object_size = kArgumentsObjectSize;
4245   }
4246
4247   // This calls Copy directly rather than using Heap::AllocateRaw so we
4248   // duplicate the check here.
4249   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4250
4251   // Check that the size of the boilerplate matches our
4252   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4253   // on the size being a known constant.
4254   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4255
4256   // Do the allocation.
4257   Object* result;
4258   { MaybeObject* maybe_result =
4259         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4260     if (!maybe_result->ToObject(&result)) return maybe_result;
4261   }
4262
4263   // Copy the content. The arguments boilerplate doesn't have any
4264   // fields that point to new space so it's safe to skip the write
4265   // barrier here.
4266   CopyBlock(HeapObject::cast(result)->address(),
4267             boilerplate->address(),
4268             JSObject::kHeaderSize);
4269
4270   // Set the length property.
4271   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4272                                                 Smi::FromInt(length),
4273                                                 SKIP_WRITE_BARRIER);
4274   // Set the callee property for non-strict mode arguments object only.
4275   if (!strict_mode_callee) {
4276     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4277                                                   callee);
4278   }
4279
4280   // Check the state of the object
4281   ASSERT(JSObject::cast(result)->HasFastProperties());
4282   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4283
4284   return result;
4285 }
4286
4287
4288 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4289   ASSERT(!fun->has_initial_map());
4290
4291   // First create a new map with the size and number of in-object properties
4292   // suggested by the function.
4293   InstanceType instance_type;
4294   int instance_size;
4295   int in_object_properties;
4296   if (fun->shared()->is_generator()) {
4297     instance_type = JS_GENERATOR_OBJECT_TYPE;
4298     instance_size = JSGeneratorObject::kSize;
4299     in_object_properties = 0;
4300   } else {
4301     instance_type = JS_OBJECT_TYPE;
4302     instance_size = fun->shared()->CalculateInstanceSize();
4303     in_object_properties = fun->shared()->CalculateInObjectProperties();
4304   }
4305   Map* map;
4306   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4307   if (!maybe_map->To(&map)) return maybe_map;
4308
4309   // Fetch or allocate prototype.
4310   Object* prototype;
4311   if (fun->has_instance_prototype()) {
4312     prototype = fun->instance_prototype();
4313   } else {
4314     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4315     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4316   }
4317   map->set_inobject_properties(in_object_properties);
4318   map->set_unused_property_fields(in_object_properties);
4319   map->set_prototype(prototype);
4320   ASSERT(map->has_fast_object_elements());
4321
4322   if (!fun->shared()->is_generator()) {
4323     fun->shared()->StartInobjectSlackTracking(map);
4324   }
4325
4326   return map;
4327 }
4328
4329
4330 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4331                                      FixedArray* properties,
4332                                      Map* map) {
4333   obj->set_properties(properties);
4334   obj->initialize_elements();
4335   // TODO(1240798): Initialize the object's body using valid initial values
4336   // according to the object's initial map.  For example, if the map's
4337   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4338   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4339   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4340   // verification code has to cope with (temporarily) invalid objects.  See
4341   // for example, JSArray::JSArrayVerify).
4342   Object* filler;
4343   // We cannot always fill with one_pointer_filler_map because objects
4344   // created from API functions expect their internal fields to be initialized
4345   // with undefined_value.
4346   // Pre-allocated fields need to be initialized with undefined_value as well
4347   // so that object accesses before the constructor completes (e.g. in the
4348   // debugger) will not cause a crash.
4349   if (map->constructor()->IsJSFunction() &&
4350       JSFunction::cast(map->constructor())->shared()->
4351           IsInobjectSlackTrackingInProgress()) {
4352     // We might want to shrink the object later.
4353     ASSERT(obj->GetInternalFieldCount() == 0);
4354     filler = Heap::one_pointer_filler_map();
4355   } else {
4356     filler = Heap::undefined_value();
4357   }
4358   obj->InitializeBody(map, Heap::undefined_value(), filler);
4359 }
4360
4361
4362 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4363   // JSFunctions should be allocated using AllocateFunction to be
4364   // properly initialized.
4365   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4366
4367   // Both types of global objects should be allocated using
4368   // AllocateGlobalObject to be properly initialized.
4369   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4370   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4371
4372   // Allocate the backing storage for the properties.
4373   int prop_size = map->InitialPropertiesLength();
4374   ASSERT(prop_size >= 0);
4375   Object* properties;
4376   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4377     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4378   }
4379
4380   // Allocate the JSObject.
4381   AllocationSpace space =
4382       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4383   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4384   Object* obj;
4385   MaybeObject* maybe_obj = Allocate(map, space);
4386   if (!maybe_obj->To(&obj)) return maybe_obj;
4387
4388   // Initialize the JSObject.
4389   InitializeJSObjectFromMap(JSObject::cast(obj),
4390                             FixedArray::cast(properties),
4391                             map);
4392   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4393          JSObject::cast(obj)->HasExternalArrayElements());
4394   return obj;
4395 }
4396
4397
4398 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4399     Handle<Object> allocation_site_info_payload) {
4400   // JSFunctions should be allocated using AllocateFunction to be
4401   // properly initialized.
4402   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4403
4404   // Both types of global objects should be allocated using
4405   // AllocateGlobalObject to be properly initialized.
4406   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4407   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4408
4409   // Allocate the backing storage for the properties.
4410   int prop_size = map->InitialPropertiesLength();
4411   ASSERT(prop_size >= 0);
4412   Object* properties;
4413   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4414     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4415   }
4416
4417   // Allocate the JSObject.
4418   AllocationSpace space = NEW_SPACE;
4419   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4420   Object* obj;
4421   MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4422       allocation_site_info_payload);
4423   if (!maybe_obj->To(&obj)) return maybe_obj;
4424
4425   // Initialize the JSObject.
4426   InitializeJSObjectFromMap(JSObject::cast(obj),
4427                             FixedArray::cast(properties),
4428                             map);
4429   ASSERT(JSObject::cast(obj)->HasFastElements());
4430   return obj;
4431 }
4432
4433
4434 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4435                                     PretenureFlag pretenure) {
4436   // Allocate the initial map if absent.
4437   if (!constructor->has_initial_map()) {
4438     Object* initial_map;
4439     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4440       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4441     }
4442     constructor->set_initial_map(Map::cast(initial_map));
4443     Map::cast(initial_map)->set_constructor(constructor);
4444   }
4445   // Allocate the object based on the constructors initial map.
4446   MaybeObject* result = AllocateJSObjectFromMap(
4447       constructor->initial_map(), pretenure);
4448 #ifdef DEBUG
4449   // Make sure result is NOT a global object if valid.
4450   Object* non_failure;
4451   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4452 #endif
4453   return result;
4454 }
4455
4456
4457 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4458     Handle<Object> allocation_site_info_payload) {
4459   // Allocate the initial map if absent.
4460   if (!constructor->has_initial_map()) {
4461     Object* initial_map;
4462     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4463       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4464     }
4465     constructor->set_initial_map(Map::cast(initial_map));
4466     Map::cast(initial_map)->set_constructor(constructor);
4467   }
4468   // Allocate the object based on the constructors initial map, or the payload
4469   // advice
4470   Map* initial_map = constructor->initial_map();
4471
4472   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4473       *allocation_site_info_payload);
4474   Smi* smi = Smi::cast(cell->value());
4475   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4476   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4477   if (to_kind != initial_map->elements_kind()) {
4478     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4479     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4480     // Possibly alter the mode, since we found an updated elements kind
4481     // in the type info cell.
4482     mode = AllocationSiteInfo::GetMode(to_kind);
4483   }
4484
4485   MaybeObject* result;
4486   if (mode == TRACK_ALLOCATION_SITE) {
4487     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4488         allocation_site_info_payload);
4489   } else {
4490     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4491   }
4492 #ifdef DEBUG
4493   // Make sure result is NOT a global object if valid.
4494   Object* non_failure;
4495   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4496 #endif
4497   return result;
4498 }
4499
4500
4501 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4502   ASSERT(function->shared()->is_generator());
4503   Map *map;
4504   if (function->has_initial_map()) {
4505     map = function->initial_map();
4506   } else {
4507     // Allocate the initial map if absent.
4508     MaybeObject* maybe_map = AllocateInitialMap(function);
4509     if (!maybe_map->To(&map)) return maybe_map;
4510     function->set_initial_map(map);
4511     map->set_constructor(function);
4512   }
4513   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4514   return AllocateJSObjectFromMap(map);
4515 }
4516
4517
4518 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4519   // Allocate a fresh map. Modules do not have a prototype.
4520   Map* map;
4521   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4522   if (!maybe_map->To(&map)) return maybe_map;
4523   // Allocate the object based on the map.
4524   JSModule* module;
4525   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4526   if (!maybe_module->To(&module)) return maybe_module;
4527   module->set_context(context);
4528   module->set_scope_info(scope_info);
4529   return module;
4530 }
4531
4532
4533 MaybeObject* Heap::AllocateJSArrayAndStorage(
4534     ElementsKind elements_kind,
4535     int length,
4536     int capacity,
4537     ArrayStorageAllocationMode mode,
4538     PretenureFlag pretenure) {
4539   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4540   JSArray* array;
4541   if (!maybe_array->To(&array)) return maybe_array;
4542
4543   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4544   // for performance reasons.
4545   ASSERT(capacity >= length);
4546
4547   if (capacity == 0) {
4548     array->set_length(Smi::FromInt(0));
4549     array->set_elements(empty_fixed_array());
4550     return array;
4551   }
4552
4553   FixedArrayBase* elms;
4554   MaybeObject* maybe_elms = NULL;
4555   if (IsFastDoubleElementsKind(elements_kind)) {
4556     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4557       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4558     } else {
4559       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4560       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4561     }
4562   } else {
4563     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4564     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4565       maybe_elms = AllocateUninitializedFixedArray(capacity);
4566     } else {
4567       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4568       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4569     }
4570   }
4571   if (!maybe_elms->To(&elms)) return maybe_elms;
4572
4573   array->set_elements(elms);
4574   array->set_length(Smi::FromInt(length));
4575   return array;
4576 }
4577
4578
4579 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4580     ElementsKind elements_kind,
4581     int length,
4582     int capacity,
4583     Handle<Object> allocation_site_payload,
4584     ArrayStorageAllocationMode mode) {
4585   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4586       allocation_site_payload);
4587   JSArray* array;
4588   if (!maybe_array->To(&array)) return maybe_array;
4589   return AllocateJSArrayStorage(array, length, capacity, mode);
4590 }
4591
4592
4593 MaybeObject* Heap::AllocateJSArrayStorage(
4594     JSArray* array,
4595     int length,
4596     int capacity,
4597     ArrayStorageAllocationMode mode) {
4598   ASSERT(capacity >= length);
4599
4600   if (capacity == 0) {
4601     array->set_length(Smi::FromInt(0));
4602     array->set_elements(empty_fixed_array());
4603     return array;
4604   }
4605
4606   FixedArrayBase* elms;
4607   MaybeObject* maybe_elms = NULL;
4608   ElementsKind elements_kind = array->GetElementsKind();
4609   if (IsFastDoubleElementsKind(elements_kind)) {
4610     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4611       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4612     } else {
4613       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4614       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4615     }
4616   } else {
4617     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4618     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4619       maybe_elms = AllocateUninitializedFixedArray(capacity);
4620     } else {
4621       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4622       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4623     }
4624   }
4625   if (!maybe_elms->To(&elms)) return maybe_elms;
4626
4627   array->set_elements(elms);
4628   array->set_length(Smi::FromInt(length));
4629   return array;
4630 }
4631
4632
4633 MaybeObject* Heap::AllocateJSArrayWithElements(
4634     FixedArrayBase* elements,
4635     ElementsKind elements_kind,
4636     int length,
4637     PretenureFlag pretenure) {
4638   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4639   JSArray* array;
4640   if (!maybe_array->To(&array)) return maybe_array;
4641
4642   array->set_elements(elements);
4643   array->set_length(Smi::FromInt(length));
4644   array->ValidateElements();
4645   return array;
4646 }
4647
4648
4649 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4650   // Allocate map.
4651   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4652   // maps. Will probably depend on the identity of the handler object, too.
4653   Map* map;
4654   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4655   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4656   map->set_prototype(prototype);
4657
4658   // Allocate the proxy object.
4659   JSProxy* result;
4660   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4661   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4662   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4663   result->set_handler(handler);
4664   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4665   return result;
4666 }
4667
4668
4669 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4670                                            Object* call_trap,
4671                                            Object* construct_trap,
4672                                            Object* prototype) {
4673   // Allocate map.
4674   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4675   // maps. Will probably depend on the identity of the handler object, too.
4676   Map* map;
4677   MaybeObject* maybe_map_obj =
4678       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4679   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4680   map->set_prototype(prototype);
4681
4682   // Allocate the proxy object.
4683   JSFunctionProxy* result;
4684   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4685   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4686   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4687   result->set_handler(handler);
4688   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4689   result->set_call_trap(call_trap);
4690   result->set_construct_trap(construct_trap);
4691   return result;
4692 }
4693
4694
4695 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4696   ASSERT(constructor->has_initial_map());
4697   Map* map = constructor->initial_map();
4698   ASSERT(map->is_dictionary_map());
4699
4700   // Make sure no field properties are described in the initial map.
4701   // This guarantees us that normalizing the properties does not
4702   // require us to change property values to JSGlobalPropertyCells.
4703   ASSERT(map->NextFreePropertyIndex() == 0);
4704
4705   // Make sure we don't have a ton of pre-allocated slots in the
4706   // global objects. They will be unused once we normalize the object.
4707   ASSERT(map->unused_property_fields() == 0);
4708   ASSERT(map->inobject_properties() == 0);
4709
4710   // Initial size of the backing store to avoid resize of the storage during
4711   // bootstrapping. The size differs between the JS global object ad the
4712   // builtins object.
4713   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4714
4715   // Allocate a dictionary object for backing storage.
4716   NameDictionary* dictionary;
4717   MaybeObject* maybe_dictionary =
4718       NameDictionary::Allocate(
4719           this,
4720           map->NumberOfOwnDescriptors() * 2 + initial_size);
4721   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4722
4723   // The global object might be created from an object template with accessors.
4724   // Fill these accessors into the dictionary.
4725   DescriptorArray* descs = map->instance_descriptors();
4726   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4727     PropertyDetails details = descs->GetDetails(i);
4728     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4729     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4730     Object* value = descs->GetCallbacksObject(i);
4731     MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4732     if (!maybe_value->ToObject(&value)) return maybe_value;
4733
4734     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4735     if (!maybe_added->To(&dictionary)) return maybe_added;
4736   }
4737
4738   // Allocate the global object and initialize it with the backing store.
4739   JSObject* global;
4740   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4741   if (!maybe_global->To(&global)) return maybe_global;
4742
4743   InitializeJSObjectFromMap(global, dictionary, map);
4744
4745   // Create a new map for the global object.
4746   Map* new_map;
4747   MaybeObject* maybe_map = map->CopyDropDescriptors();
4748   if (!maybe_map->To(&new_map)) return maybe_map;
4749   new_map->set_dictionary_map(true);
4750
4751   // Set up the global object as a normalized object.
4752   global->set_map(new_map);
4753   global->set_properties(dictionary);
4754
4755   // Make sure result is a global object with properties in dictionary.
4756   ASSERT(global->IsGlobalObject());
4757   ASSERT(!global->HasFastProperties());
4758   return global;
4759 }
4760
4761
4762 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4763   // Never used to copy functions.  If functions need to be copied we
4764   // have to be careful to clear the literals array.
4765   SLOW_ASSERT(!source->IsJSFunction());
4766
4767   // Make the clone.
4768   Map* map = source->map();
4769   int object_size = map->instance_size();
4770   Object* clone;
4771
4772   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4773
4774   // If we're forced to always allocate, we use the general allocation
4775   // functions which may leave us with an object in old space.
4776   if (always_allocate()) {
4777     { MaybeObject* maybe_clone =
4778           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4779       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4780     }
4781     Address clone_address = HeapObject::cast(clone)->address();
4782     CopyBlock(clone_address,
4783               source->address(),
4784               object_size);
4785     // Update write barrier for all fields that lie beyond the header.
4786     RecordWrites(clone_address,
4787                  JSObject::kHeaderSize,
4788                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4789   } else {
4790     wb_mode = SKIP_WRITE_BARRIER;
4791
4792     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4793       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4794     }
4795     SLOW_ASSERT(InNewSpace(clone));
4796     // Since we know the clone is allocated in new space, we can copy
4797     // the contents without worrying about updating the write barrier.
4798     CopyBlock(HeapObject::cast(clone)->address(),
4799               source->address(),
4800               object_size);
4801   }
4802
4803   SLOW_ASSERT(
4804       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4805   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4806   FixedArray* properties = FixedArray::cast(source->properties());
4807   // Update elements if necessary.
4808   if (elements->length() > 0) {
4809     Object* elem;
4810     { MaybeObject* maybe_elem;
4811       if (elements->map() == fixed_cow_array_map()) {
4812         maybe_elem = FixedArray::cast(elements);
4813       } else if (source->HasFastDoubleElements()) {
4814         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4815       } else {
4816         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4817       }
4818       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4819     }
4820     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4821   }
4822   // Update properties if necessary.
4823   if (properties->length() > 0) {
4824     Object* prop;
4825     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4826       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4827     }
4828     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4829   }
4830   // Return the new clone.
4831   return clone;
4832 }
4833
4834
4835 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4836   // Never used to copy functions.  If functions need to be copied we
4837   // have to be careful to clear the literals array.
4838   SLOW_ASSERT(!source->IsJSFunction());
4839
4840   // Make the clone.
4841   Map* map = source->map();
4842   int object_size = map->instance_size();
4843   Object* clone;
4844
4845   ASSERT(map->CanTrackAllocationSite());
4846   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4847   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4848
4849   // If we're forced to always allocate, we use the general allocation
4850   // functions which may leave us with an object in old space.
4851   int adjusted_object_size = object_size;
4852   if (always_allocate()) {
4853     // We'll only track origin if we are certain to allocate in new space
4854     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4855     if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4856       adjusted_object_size += AllocationSiteInfo::kSize;
4857     }
4858
4859     { MaybeObject* maybe_clone =
4860           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4861       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4862     }
4863     Address clone_address = HeapObject::cast(clone)->address();
4864     CopyBlock(clone_address,
4865               source->address(),
4866               object_size);
4867     // Update write barrier for all fields that lie beyond the header.
4868     int write_barrier_offset = adjusted_object_size > object_size
4869         ? JSArray::kSize + AllocationSiteInfo::kSize
4870         : JSObject::kHeaderSize;
4871     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4872       RecordWrites(clone_address,
4873                    write_barrier_offset,
4874                    (object_size - write_barrier_offset) / kPointerSize);
4875     }
4876
4877     // Track allocation site information, if we failed to allocate it inline.
4878     if (InNewSpace(clone) &&
4879         adjusted_object_size == object_size) {
4880       MaybeObject* maybe_alloc_info =
4881           AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4882       AllocationSiteInfo* alloc_info;
4883       if (maybe_alloc_info->To(&alloc_info)) {
4884         alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4885         alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4886       }
4887     }
4888   } else {
4889     wb_mode = SKIP_WRITE_BARRIER;
4890     adjusted_object_size += AllocationSiteInfo::kSize;
4891
4892     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4893       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4894     }
4895     SLOW_ASSERT(InNewSpace(clone));
4896     // Since we know the clone is allocated in new space, we can copy
4897     // the contents without worrying about updating the write barrier.
4898     CopyBlock(HeapObject::cast(clone)->address(),
4899               source->address(),
4900               object_size);
4901   }
4902
4903   if (adjusted_object_size > object_size) {
4904     AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4905         reinterpret_cast<Address>(clone) + object_size);
4906     alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4907     alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4908   }
4909
4910   SLOW_ASSERT(
4911       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4912   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4913   FixedArray* properties = FixedArray::cast(source->properties());
4914   // Update elements if necessary.
4915   if (elements->length() > 0) {
4916     Object* elem;
4917     { MaybeObject* maybe_elem;
4918       if (elements->map() == fixed_cow_array_map()) {
4919         maybe_elem = FixedArray::cast(elements);
4920       } else if (source->HasFastDoubleElements()) {
4921         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4922       } else {
4923         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4924       }
4925       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4926     }
4927     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4928   }
4929   // Update properties if necessary.
4930   if (properties->length() > 0) {
4931     Object* prop;
4932     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4933       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4934     }
4935     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4936   }
4937   // Return the new clone.
4938   return clone;
4939 }
4940
4941
4942 MaybeObject* Heap::ReinitializeJSReceiver(
4943     JSReceiver* object, InstanceType type, int size) {
4944   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4945
4946   // Allocate fresh map.
4947   // TODO(rossberg): Once we optimize proxies, cache these maps.
4948   Map* map;
4949   MaybeObject* maybe = AllocateMap(type, size);
4950   if (!maybe->To<Map>(&map)) return maybe;
4951
4952   // Check that the receiver has at least the size of the fresh object.
4953   int size_difference = object->map()->instance_size() - map->instance_size();
4954   ASSERT(size_difference >= 0);
4955
4956   map->set_prototype(object->map()->prototype());
4957
4958   // Allocate the backing storage for the properties.
4959   int prop_size = map->unused_property_fields() - map->inobject_properties();
4960   Object* properties;
4961   maybe = AllocateFixedArray(prop_size, TENURED);
4962   if (!maybe->ToObject(&properties)) return maybe;
4963
4964   // Functions require some allocation, which might fail here.
4965   SharedFunctionInfo* shared = NULL;
4966   if (type == JS_FUNCTION_TYPE) {
4967     String* name;
4968     maybe =
4969         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4970     if (!maybe->To<String>(&name)) return maybe;
4971     maybe = AllocateSharedFunctionInfo(name);
4972     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4973   }
4974
4975   // Because of possible retries of this function after failure,
4976   // we must NOT fail after this point, where we have changed the type!
4977
4978   // Reset the map for the object.
4979   object->set_map(map);
4980   JSObject* jsobj = JSObject::cast(object);
4981
4982   // Reinitialize the object from the constructor map.
4983   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4984
4985   // Functions require some minimal initialization.
4986   if (type == JS_FUNCTION_TYPE) {
4987     map->set_function_with_prototype(true);
4988     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4989     JSFunction::cast(object)->set_context(
4990         isolate()->context()->native_context());
4991   }
4992
4993   // Put in filler if the new object is smaller than the old.
4994   if (size_difference > 0) {
4995     CreateFillerObjectAt(
4996         object->address() + map->instance_size(), size_difference);
4997   }
4998
4999   return object;
5000 }
5001
5002
5003 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5004                                              JSGlobalProxy* object) {
5005   ASSERT(constructor->has_initial_map());
5006   Map* map = constructor->initial_map();
5007
5008   // Check that the already allocated object has the same size and type as
5009   // objects allocated using the constructor.
5010   ASSERT(map->instance_size() == object->map()->instance_size());
5011   ASSERT(map->instance_type() == object->map()->instance_type());
5012
5013   // Allocate the backing storage for the properties.
5014   int prop_size = map->unused_property_fields() - map->inobject_properties();
5015   Object* properties;
5016   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5017     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5018   }
5019
5020   // Reset the map for the object.
5021   object->set_map(constructor->initial_map());
5022
5023   // Reinitialize the object from the constructor map.
5024   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5025   return object;
5026 }
5027
5028
5029 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5030                                            PretenureFlag pretenure) {
5031   int length = string.length();
5032   if (length == 1) {
5033     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5034   }
5035   Object* result;
5036   { MaybeObject* maybe_result =
5037         AllocateRawOneByteString(string.length(), pretenure);
5038     if (!maybe_result->ToObject(&result)) return maybe_result;
5039   }
5040
5041   // Copy the characters into the new object.
5042   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5043             string.start(),
5044             length);
5045   return result;
5046 }
5047
5048
5049 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5050                                               int non_ascii_start,
5051                                               PretenureFlag pretenure) {
5052   // Continue counting the number of characters in the UTF-8 string, starting
5053   // from the first non-ascii character or word.
5054   Access<UnicodeCache::Utf8Decoder>
5055       decoder(isolate_->unicode_cache()->utf8_decoder());
5056   decoder->Reset(string.start() + non_ascii_start,
5057                  string.length() - non_ascii_start);
5058   int utf16_length = decoder->Utf16Length();
5059   ASSERT(utf16_length > 0);
5060   // Allocate string.
5061   Object* result;
5062   {
5063     int chars = non_ascii_start + utf16_length;
5064     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5065     if (!maybe_result->ToObject(&result)) return maybe_result;
5066   }
5067   // Convert and copy the characters into the new object.
5068   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5069   // Copy ascii portion.
5070   uint16_t* data = twobyte->GetChars();
5071   if (non_ascii_start != 0) {
5072     const char* ascii_data = string.start();
5073     for (int i = 0; i < non_ascii_start; i++) {
5074       *data++ = *ascii_data++;
5075     }
5076   }
5077   // Now write the remainder.
5078   decoder->WriteUtf16(data, utf16_length);
5079   return result;
5080 }
5081
5082
5083 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5084                                              PretenureFlag pretenure) {
5085   // Check if the string is an ASCII string.
5086   Object* result;
5087   int length = string.length();
5088   const uc16* start = string.start();
5089
5090   if (String::IsOneByte(start, length)) {
5091     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5092     if (!maybe_result->ToObject(&result)) return maybe_result;
5093     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5094   } else {  // It's not a one byte string.
5095     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5096     if (!maybe_result->ToObject(&result)) return maybe_result;
5097     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5098   }
5099   return result;
5100 }
5101
5102
5103 Map* Heap::InternalizedStringMapForString(String* string) {
5104   // If the string is in new space it cannot be used as internalized.
5105   if (InNewSpace(string)) return NULL;
5106
5107   // Find the corresponding internalized string map for strings.
5108   switch (string->map()->instance_type()) {
5109     case STRING_TYPE: return internalized_string_map();
5110     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5111     case CONS_STRING_TYPE: return cons_internalized_string_map();
5112     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5113     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5114     case EXTERNAL_ASCII_STRING_TYPE:
5115       return external_ascii_internalized_string_map();
5116     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5117       return external_internalized_string_with_one_byte_data_map();
5118     case SHORT_EXTERNAL_STRING_TYPE:
5119       return short_external_internalized_string_map();
5120     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5121       return short_external_ascii_internalized_string_map();
5122     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5123       return short_external_internalized_string_with_one_byte_data_map();
5124     default: return NULL;  // No match found.
5125   }
5126 }
5127
5128
5129 static inline void WriteOneByteData(Vector<const char> vector,
5130                                     uint8_t* chars,
5131                                     int len) {
5132   // Only works for ascii.
5133   ASSERT(vector.length() == len);
5134   OS::MemCopy(chars, vector.start(), len);
5135 }
5136
5137 static inline void WriteTwoByteData(Vector<const char> vector,
5138                                     uint16_t* chars,
5139                                     int len) {
5140   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5141   unsigned stream_length = vector.length();
5142   while (stream_length != 0) {
5143     unsigned consumed = 0;
5144     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5145     ASSERT(c != unibrow::Utf8::kBadChar);
5146     ASSERT(consumed <= stream_length);
5147     stream_length -= consumed;
5148     stream += consumed;
5149     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5150       len -= 2;
5151       if (len < 0) break;
5152       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5153       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5154     } else {
5155       len -= 1;
5156       if (len < 0) break;
5157       *chars++ = c;
5158     }
5159   }
5160   ASSERT(stream_length == 0);
5161   ASSERT(len == 0);
5162 }
5163
5164
5165 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5166   ASSERT(s->length() == len);
5167   String::WriteToFlat(s, chars, 0, len);
5168 }
5169
5170 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5171   ASSERT(s->length() == len);
5172   String::WriteToFlat(s, chars, 0, len);
5173 }
5174
5175
5176 template<bool is_one_byte, typename T>
5177 MaybeObject* Heap::AllocateInternalizedStringImpl(
5178     T t, int chars, uint32_t hash_field) {
5179   ASSERT(chars >= 0);
5180   // Compute map and object size.
5181   int size;
5182   Map* map;
5183
5184   if (is_one_byte) {
5185     if (chars > SeqOneByteString::kMaxLength) {
5186       return Failure::OutOfMemoryException(0x9);
5187     }
5188     map = ascii_internalized_string_map();
5189     size = SeqOneByteString::SizeFor(chars);
5190   } else {
5191     if (chars > SeqTwoByteString::kMaxLength) {
5192       return Failure::OutOfMemoryException(0xa);
5193     }
5194     map = internalized_string_map();
5195     size = SeqTwoByteString::SizeFor(chars);
5196   }
5197
5198   // Allocate string.
5199   Object* result;
5200   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5201                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5202                    : old_data_space_->AllocateRaw(size);
5203     if (!maybe_result->ToObject(&result)) return maybe_result;
5204   }
5205
5206   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5207   // Set length and hash fields of the allocated string.
5208   String* answer = String::cast(result);
5209   answer->set_length(chars);
5210   answer->set_hash_field(hash_field);
5211
5212   ASSERT_EQ(size, answer->Size());
5213
5214   if (is_one_byte) {
5215     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5216   } else {
5217     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5218   }
5219   return answer;
5220 }
5221
5222
5223 // Need explicit instantiations.
5224 template
5225 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5226 template
5227 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5228     String*, int, uint32_t);
5229 template
5230 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5231     Vector<const char>, int, uint32_t);
5232
5233
5234 MaybeObject* Heap::AllocateRawOneByteString(int length,
5235                                             PretenureFlag pretenure) {
5236   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5237     return Failure::OutOfMemoryException(0xb);
5238   }
5239
5240   int size = SeqOneByteString::SizeFor(length);
5241   ASSERT(size <= SeqOneByteString::kMaxSize);
5242
5243   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5244   AllocationSpace retry_space = OLD_DATA_SPACE;
5245
5246   if (space == NEW_SPACE) {
5247     if (size > kMaxObjectSizeInNewSpace) {
5248       // Allocate in large object space, retry space will be ignored.
5249       space = LO_SPACE;
5250     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5251       // Allocate in new space, retry in large object space.
5252       retry_space = LO_SPACE;
5253     }
5254   } else if (space == OLD_DATA_SPACE &&
5255              size > Page::kMaxNonCodeHeapObjectSize) {
5256     space = LO_SPACE;
5257   }
5258   Object* result;
5259   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5260     if (!maybe_result->ToObject(&result)) return maybe_result;
5261   }
5262
5263   // Partially initialize the object.
5264   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5265   String::cast(result)->set_length(length);
5266   String::cast(result)->set_hash_field(String::kEmptyHashField);
5267   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5268
5269   return result;
5270 }
5271
5272
5273 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5274                                             PretenureFlag pretenure) {
5275   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5276     return Failure::OutOfMemoryException(0xc);
5277   }
5278   int size = SeqTwoByteString::SizeFor(length);
5279   ASSERT(size <= SeqTwoByteString::kMaxSize);
5280   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5281   AllocationSpace retry_space = OLD_DATA_SPACE;
5282
5283   if (space == NEW_SPACE) {
5284     if (size > kMaxObjectSizeInNewSpace) {
5285       // Allocate in large object space, retry space will be ignored.
5286       space = LO_SPACE;
5287     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5288       // Allocate in new space, retry in large object space.
5289       retry_space = LO_SPACE;
5290     }
5291   } else if (space == OLD_DATA_SPACE &&
5292              size > Page::kMaxNonCodeHeapObjectSize) {
5293     space = LO_SPACE;
5294   }
5295   Object* result;
5296   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5297     if (!maybe_result->ToObject(&result)) return maybe_result;
5298   }
5299
5300   // Partially initialize the object.
5301   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5302   String::cast(result)->set_length(length);
5303   String::cast(result)->set_hash_field(String::kEmptyHashField);
5304   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5305   return result;
5306 }
5307
5308
5309 MaybeObject* Heap::AllocateJSArray(
5310     ElementsKind elements_kind,
5311     PretenureFlag pretenure) {
5312   Context* native_context = isolate()->context()->native_context();
5313   JSFunction* array_function = native_context->array_function();
5314   Map* map = array_function->initial_map();
5315   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5316   if (transition_map != NULL) map = transition_map;
5317   return AllocateJSObjectFromMap(map, pretenure);
5318 }
5319
5320
5321 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5322     ElementsKind elements_kind,
5323     Handle<Object> allocation_site_info_payload) {
5324   Context* native_context = isolate()->context()->native_context();
5325   JSFunction* array_function = native_context->array_function();
5326   Map* map = array_function->initial_map();
5327   Object* maybe_map_array = native_context->js_array_maps();
5328   if (!maybe_map_array->IsUndefined()) {
5329     Object* maybe_transitioned_map =
5330         FixedArray::cast(maybe_map_array)->get(elements_kind);
5331     if (!maybe_transitioned_map->IsUndefined()) {
5332       map = Map::cast(maybe_transitioned_map);
5333     }
5334   }
5335   return AllocateJSObjectFromMapWithAllocationSite(map,
5336       allocation_site_info_payload);
5337 }
5338
5339
5340 MaybeObject* Heap::AllocateEmptyFixedArray() {
5341   int size = FixedArray::SizeFor(0);
5342   Object* result;
5343   { MaybeObject* maybe_result =
5344         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5345     if (!maybe_result->ToObject(&result)) return maybe_result;
5346   }
5347   // Initialize the object.
5348   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5349       fixed_array_map());
5350   reinterpret_cast<FixedArray*>(result)->set_length(0);
5351   return result;
5352 }
5353
5354 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5355   return AllocateExternalArray(0, array_type, NULL, TENURED);
5356 }
5357
5358
5359 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5360   if (length < 0 || length > FixedArray::kMaxLength) {
5361     return Failure::OutOfMemoryException(0xd);
5362   }
5363   ASSERT(length > 0);
5364   // Use the general function if we're forced to always allocate.
5365   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5366   // Allocate the raw data for a fixed array.
5367   int size = FixedArray::SizeFor(length);
5368   return size <= kMaxObjectSizeInNewSpace
5369       ? new_space_.AllocateRaw(size)
5370       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5371 }
5372
5373
5374 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5375   int len = src->length();
5376   Object* obj;
5377   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5378     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5379   }
5380   if (InNewSpace(obj)) {
5381     HeapObject* dst = HeapObject::cast(obj);
5382     dst->set_map_no_write_barrier(map);
5383     CopyBlock(dst->address() + kPointerSize,
5384               src->address() + kPointerSize,
5385               FixedArray::SizeFor(len) - kPointerSize);
5386     return obj;
5387   }
5388   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5389   FixedArray* result = FixedArray::cast(obj);
5390   result->set_length(len);
5391
5392   // Copy the content
5393   DisallowHeapAllocation no_gc;
5394   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5395   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5396   return result;
5397 }
5398
5399
5400 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5401                                                Map* map) {
5402   int len = src->length();
5403   Object* obj;
5404   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5405     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5406   }
5407   HeapObject* dst = HeapObject::cast(obj);
5408   dst->set_map_no_write_barrier(map);
5409   CopyBlock(
5410       dst->address() + FixedDoubleArray::kLengthOffset,
5411       src->address() + FixedDoubleArray::kLengthOffset,
5412       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5413   return obj;
5414 }
5415
5416
5417 MaybeObject* Heap::AllocateFixedArray(int length) {
5418   ASSERT(length >= 0);
5419   if (length == 0) return empty_fixed_array();
5420   Object* result;
5421   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5422     if (!maybe_result->ToObject(&result)) return maybe_result;
5423   }
5424   // Initialize header.
5425   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5426   array->set_map_no_write_barrier(fixed_array_map());
5427   array->set_length(length);
5428   // Initialize body.
5429   ASSERT(!InNewSpace(undefined_value()));
5430   MemsetPointer(array->data_start(), undefined_value(), length);
5431   return result;
5432 }
5433
5434
5435 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5436   if (length < 0 || length > FixedArray::kMaxLength) {
5437     return Failure::OutOfMemoryException(0xe);
5438   }
5439
5440   AllocationSpace space =
5441       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5442   int size = FixedArray::SizeFor(length);
5443   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5444     // Too big for new space.
5445     space = LO_SPACE;
5446   } else if (space == OLD_POINTER_SPACE &&
5447              size > Page::kMaxNonCodeHeapObjectSize) {
5448     // Too big for old pointer space.
5449     space = LO_SPACE;
5450   }
5451
5452   AllocationSpace retry_space =
5453       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5454
5455   return AllocateRaw(size, space, retry_space);
5456 }
5457
5458
5459 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5460     Heap* heap,
5461     int length,
5462     PretenureFlag pretenure,
5463     Object* filler) {
5464   ASSERT(length >= 0);
5465   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5466   if (length == 0) return heap->empty_fixed_array();
5467
5468   ASSERT(!heap->InNewSpace(filler));
5469   Object* result;
5470   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5471     if (!maybe_result->ToObject(&result)) return maybe_result;
5472   }
5473
5474   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5475   FixedArray* array = FixedArray::cast(result);
5476   array->set_length(length);
5477   MemsetPointer(array->data_start(), filler, length);
5478   return array;
5479 }
5480
5481
5482 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5483   return AllocateFixedArrayWithFiller(this,
5484                                       length,
5485                                       pretenure,
5486                                       undefined_value());
5487 }
5488
5489
5490 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5491                                                PretenureFlag pretenure) {
5492   return AllocateFixedArrayWithFiller(this,
5493                                       length,
5494                                       pretenure,
5495                                       the_hole_value());
5496 }
5497
5498
5499 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5500   if (length == 0) return empty_fixed_array();
5501
5502   Object* obj;
5503   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5504     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5505   }
5506
5507   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5508       fixed_array_map());
5509   FixedArray::cast(obj)->set_length(length);
5510   return obj;
5511 }
5512
5513
5514 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5515   int size = FixedDoubleArray::SizeFor(0);
5516   Object* result;
5517   { MaybeObject* maybe_result =
5518         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5519     if (!maybe_result->ToObject(&result)) return maybe_result;
5520   }
5521   // Initialize the object.
5522   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5523       fixed_double_array_map());
5524   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5525   return result;
5526 }
5527
5528
5529 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5530     int length,
5531     PretenureFlag pretenure) {
5532   if (length == 0) return empty_fixed_array();
5533
5534   Object* elements_object;
5535   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5536   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5537   FixedDoubleArray* elements =
5538       reinterpret_cast<FixedDoubleArray*>(elements_object);
5539
5540   elements->set_map_no_write_barrier(fixed_double_array_map());
5541   elements->set_length(length);
5542   return elements;
5543 }
5544
5545
5546 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5547     int length,
5548     PretenureFlag pretenure) {
5549   if (length == 0) return empty_fixed_array();
5550
5551   Object* elements_object;
5552   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5553   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5554   FixedDoubleArray* elements =
5555       reinterpret_cast<FixedDoubleArray*>(elements_object);
5556
5557   for (int i = 0; i < length; ++i) {
5558     elements->set_the_hole(i);
5559   }
5560
5561   elements->set_map_no_write_barrier(fixed_double_array_map());
5562   elements->set_length(length);
5563   return elements;
5564 }
5565
5566
5567 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5568                                                PretenureFlag pretenure) {
5569   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5570     return Failure::OutOfMemoryException(0xf);
5571   }
5572
5573   AllocationSpace space =
5574       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5575   int size = FixedDoubleArray::SizeFor(length);
5576
5577 #ifndef V8_HOST_ARCH_64_BIT
5578   size += kPointerSize;
5579 #endif
5580
5581   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5582     // Too big for new space.
5583     space = LO_SPACE;
5584   } else if (space == OLD_DATA_SPACE &&
5585              size > Page::kMaxNonCodeHeapObjectSize) {
5586     // Too big for old data space.
5587     space = LO_SPACE;
5588   }
5589
5590   AllocationSpace retry_space =
5591       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5592
5593   HeapObject* object;
5594   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5595     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5596   }
5597
5598   return EnsureDoubleAligned(this, object, size);
5599 }
5600
5601
5602 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5603   Object* result;
5604   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5605     if (!maybe_result->ToObject(&result)) return maybe_result;
5606   }
5607   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5608       hash_table_map());
5609   ASSERT(result->IsHashTable());
5610   return result;
5611 }
5612
5613
5614 MaybeObject* Heap::AllocateSymbol() {
5615   // Statically ensure that it is safe to allocate symbols in paged spaces.
5616   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5617
5618   Object* result;
5619   MaybeObject* maybe =
5620       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5621   if (!maybe->ToObject(&result)) return maybe;
5622
5623   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5624
5625   // Generate a random hash value.
5626   int hash;
5627   int attempts = 0;
5628   do {
5629     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5630     attempts++;
5631   } while (hash == 0 && attempts < 30);
5632   if (hash == 0) hash = 1;  // never return 0
5633
5634   Symbol::cast(result)->set_hash_field(
5635       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5636   Symbol::cast(result)->set_name(undefined_value());
5637
5638   ASSERT(result->IsSymbol());
5639   return result;
5640 }
5641
5642
5643 MaybeObject* Heap::AllocateNativeContext() {
5644   Object* result;
5645   { MaybeObject* maybe_result =
5646         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5647     if (!maybe_result->ToObject(&result)) return maybe_result;
5648   }
5649   Context* context = reinterpret_cast<Context*>(result);
5650   context->set_map_no_write_barrier(native_context_map());
5651   context->set_js_array_maps(undefined_value());
5652   ASSERT(context->IsNativeContext());
5653   ASSERT(result->IsContext());
5654   return result;
5655 }
5656
5657
5658 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5659                                          ScopeInfo* scope_info) {
5660   Object* result;
5661   { MaybeObject* maybe_result =
5662         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5663     if (!maybe_result->ToObject(&result)) return maybe_result;
5664   }
5665   Context* context = reinterpret_cast<Context*>(result);
5666   context->set_map_no_write_barrier(global_context_map());
5667   context->set_closure(function);
5668   context->set_previous(function->context());
5669   context->set_extension(scope_info);
5670   context->set_global_object(function->context()->global_object());
5671   ASSERT(context->IsGlobalContext());
5672   ASSERT(result->IsContext());
5673   return context;
5674 }
5675
5676
5677 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5678   Object* result;
5679   { MaybeObject* maybe_result =
5680         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5681     if (!maybe_result->ToObject(&result)) return maybe_result;
5682   }
5683   Context* context = reinterpret_cast<Context*>(result);
5684   context->set_map_no_write_barrier(module_context_map());
5685   // Instance link will be set later.
5686   context->set_extension(Smi::FromInt(0));
5687   return context;
5688 }
5689
5690
5691 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5692   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5693   Object* result;
5694   { MaybeObject* maybe_result = AllocateFixedArray(length);
5695     if (!maybe_result->ToObject(&result)) return maybe_result;
5696   }
5697   Context* context = reinterpret_cast<Context*>(result);
5698   context->set_map_no_write_barrier(function_context_map());
5699   context->set_closure(function);
5700   context->set_previous(function->context());
5701   context->set_extension(Smi::FromInt(0));
5702   context->set_global_object(function->context()->global_object());
5703   return context;
5704 }
5705
5706
5707 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5708                                         Context* previous,
5709                                         String* name,
5710                                         Object* thrown_object) {
5711   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5712   Object* result;
5713   { MaybeObject* maybe_result =
5714         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5715     if (!maybe_result->ToObject(&result)) return maybe_result;
5716   }
5717   Context* context = reinterpret_cast<Context*>(result);
5718   context->set_map_no_write_barrier(catch_context_map());
5719   context->set_closure(function);
5720   context->set_previous(previous);
5721   context->set_extension(name);
5722   context->set_global_object(previous->global_object());
5723   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5724   return context;
5725 }
5726
5727
5728 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5729                                        Context* previous,
5730                                        JSObject* extension) {
5731   Object* result;
5732   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5733     if (!maybe_result->ToObject(&result)) return maybe_result;
5734   }
5735   Context* context = reinterpret_cast<Context*>(result);
5736   context->set_map_no_write_barrier(with_context_map());
5737   context->set_closure(function);
5738   context->set_previous(previous);
5739   context->set_extension(extension);
5740   context->set_global_object(previous->global_object());
5741   return context;
5742 }
5743
5744
5745 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5746                                         Context* previous,
5747                                         ScopeInfo* scope_info) {
5748   Object* result;
5749   { MaybeObject* maybe_result =
5750         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5751     if (!maybe_result->ToObject(&result)) return maybe_result;
5752   }
5753   Context* context = reinterpret_cast<Context*>(result);
5754   context->set_map_no_write_barrier(block_context_map());
5755   context->set_closure(function);
5756   context->set_previous(previous);
5757   context->set_extension(scope_info);
5758   context->set_global_object(previous->global_object());
5759   return context;
5760 }
5761
5762
5763 MaybeObject* Heap::AllocateScopeInfo(int length) {
5764   FixedArray* scope_info;
5765   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5766   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5767   scope_info->set_map_no_write_barrier(scope_info_map());
5768   return scope_info;
5769 }
5770
5771
5772 MaybeObject* Heap::AllocateExternal(void* value) {
5773   Foreign* foreign;
5774   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5775     if (!maybe_result->To(&foreign)) return maybe_result;
5776   }
5777   JSObject* external;
5778   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5779     if (!maybe_result->To(&external)) return maybe_result;
5780   }
5781   external->SetInternalField(0, foreign);
5782   return external;
5783 }
5784
5785
5786 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5787   Map* map;
5788   switch (type) {
5789 #define MAKE_CASE(NAME, Name, name) \
5790     case NAME##_TYPE: map = name##_map(); break;
5791 STRUCT_LIST(MAKE_CASE)
5792 #undef MAKE_CASE
5793     default:
5794       UNREACHABLE();
5795       return Failure::InternalError();
5796   }
5797   int size = map->instance_size();
5798   AllocationSpace space =
5799       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5800   Object* result;
5801   { MaybeObject* maybe_result = Allocate(map, space);
5802     if (!maybe_result->ToObject(&result)) return maybe_result;
5803   }
5804   Struct::cast(result)->InitializeBody(size);
5805   return result;
5806 }
5807
5808
5809 bool Heap::IsHeapIterable() {
5810   return (!old_pointer_space()->was_swept_conservatively() &&
5811           !old_data_space()->was_swept_conservatively());
5812 }
5813
5814
5815 void Heap::EnsureHeapIsIterable() {
5816   ASSERT(AllowHeapAllocation::IsAllowed());
5817   if (!IsHeapIterable()) {
5818     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5819   }
5820   ASSERT(IsHeapIterable());
5821 }
5822
5823
5824 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5825   incremental_marking()->Step(step_size,
5826                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5827
5828   if (incremental_marking()->IsComplete()) {
5829     bool uncommit = false;
5830     if (gc_count_at_last_idle_gc_ == gc_count_) {
5831       // No GC since the last full GC, the mutator is probably not active.
5832       isolate_->compilation_cache()->Clear();
5833       uncommit = true;
5834     }
5835     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5836     gc_count_at_last_idle_gc_ = gc_count_;
5837     if (uncommit) {
5838       new_space_.Shrink();
5839       UncommitFromSpace();
5840     }
5841   }
5842 }
5843
5844
5845 bool Heap::IdleNotification(int hint) {
5846   // Hints greater than this value indicate that
5847   // the embedder is requesting a lot of GC work.
5848   const int kMaxHint = 1000;
5849   // Minimal hint that allows to do full GC.
5850   const int kMinHintForFullGC = 100;
5851   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5852   // The size factor is in range [5..250]. The numbers here are chosen from
5853   // experiments. If you changes them, make sure to test with
5854   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5855   intptr_t step_size =
5856       size_factor * IncrementalMarking::kAllocatedThreshold;
5857
5858   if (contexts_disposed_ > 0) {
5859     if (hint >= kMaxHint) {
5860       // The embedder is requesting a lot of GC work after context disposal,
5861       // we age inline caches so that they don't keep objects from
5862       // the old context alive.
5863       AgeInlineCaches();
5864     }
5865     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5866     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5867         incremental_marking()->IsStopped()) {
5868       HistogramTimerScope scope(isolate_->counters()->gc_context());
5869       CollectAllGarbage(kReduceMemoryFootprintMask,
5870                         "idle notification: contexts disposed");
5871     } else {
5872       AdvanceIdleIncrementalMarking(step_size);
5873       contexts_disposed_ = 0;
5874     }
5875     // After context disposal there is likely a lot of garbage remaining, reset
5876     // the idle notification counters in order to trigger more incremental GCs
5877     // on subsequent idle notifications.
5878     StartIdleRound();
5879     return false;
5880   }
5881
5882   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5883     return IdleGlobalGC();
5884   }
5885
5886   // By doing small chunks of GC work in each IdleNotification,
5887   // perform a round of incremental GCs and after that wait until
5888   // the mutator creates enough garbage to justify a new round.
5889   // An incremental GC progresses as follows:
5890   // 1. many incremental marking steps,
5891   // 2. one old space mark-sweep-compact,
5892   // 3. many lazy sweep steps.
5893   // Use mark-sweep-compact events to count incremental GCs in a round.
5894
5895   if (incremental_marking()->IsStopped()) {
5896     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5897         !IsSweepingComplete() &&
5898         !AdvanceSweepers(static_cast<int>(step_size))) {
5899       return false;
5900     }
5901   }
5902
5903   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5904     if (EnoughGarbageSinceLastIdleRound()) {
5905       StartIdleRound();
5906     } else {
5907       return true;
5908     }
5909   }
5910
5911   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5912   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5913   ms_count_at_last_idle_notification_ = ms_count_;
5914
5915   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5916                               mark_sweeps_since_idle_round_started_;
5917
5918   if (remaining_mark_sweeps <= 0) {
5919     FinishIdleRound();
5920     return true;
5921   }
5922
5923   if (incremental_marking()->IsStopped()) {
5924     // If there are no more than two GCs left in this idle round and we are
5925     // allowed to do a full GC, then make those GCs full in order to compact
5926     // the code space.
5927     // TODO(ulan): Once we enable code compaction for incremental marking,
5928     // we can get rid of this special case and always start incremental marking.
5929     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5930       CollectAllGarbage(kReduceMemoryFootprintMask,
5931                         "idle notification: finalize idle round");
5932     } else {
5933       incremental_marking()->Start();
5934     }
5935   }
5936   if (!incremental_marking()->IsStopped()) {
5937     AdvanceIdleIncrementalMarking(step_size);
5938   }
5939   return false;
5940 }
5941
5942
5943 bool Heap::IdleGlobalGC() {
5944   static const int kIdlesBeforeScavenge = 4;
5945   static const int kIdlesBeforeMarkSweep = 7;
5946   static const int kIdlesBeforeMarkCompact = 8;
5947   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5948   static const unsigned int kGCsBetweenCleanup = 4;
5949
5950   if (!last_idle_notification_gc_count_init_) {
5951     last_idle_notification_gc_count_ = gc_count_;
5952     last_idle_notification_gc_count_init_ = true;
5953   }
5954
5955   bool uncommit = true;
5956   bool finished = false;
5957
5958   // Reset the number of idle notifications received when a number of
5959   // GCs have taken place. This allows another round of cleanup based
5960   // on idle notifications if enough work has been carried out to
5961   // provoke a number of garbage collections.
5962   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5963     number_idle_notifications_ =
5964         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5965   } else {
5966     number_idle_notifications_ = 0;
5967     last_idle_notification_gc_count_ = gc_count_;
5968   }
5969
5970   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5971     CollectGarbage(NEW_SPACE, "idle notification");
5972     new_space_.Shrink();
5973     last_idle_notification_gc_count_ = gc_count_;
5974   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5975     // Before doing the mark-sweep collections we clear the
5976     // compilation cache to avoid hanging on to source code and
5977     // generated code for cached functions.
5978     isolate_->compilation_cache()->Clear();
5979
5980     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5981     new_space_.Shrink();
5982     last_idle_notification_gc_count_ = gc_count_;
5983
5984   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5985     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5986     new_space_.Shrink();
5987     last_idle_notification_gc_count_ = gc_count_;
5988     number_idle_notifications_ = 0;
5989     finished = true;
5990   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5991     // If we have received more than kIdlesBeforeMarkCompact idle
5992     // notifications we do not perform any cleanup because we don't
5993     // expect to gain much by doing so.
5994     finished = true;
5995   }
5996
5997   if (uncommit) UncommitFromSpace();
5998
5999   return finished;
6000 }
6001
6002
6003 #ifdef DEBUG
6004
6005 void Heap::Print() {
6006   if (!HasBeenSetUp()) return;
6007   isolate()->PrintStack(stdout);
6008   AllSpaces spaces(this);
6009   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6010     space->Print();
6011   }
6012 }
6013
6014
6015 void Heap::ReportCodeStatistics(const char* title) {
6016   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6017   PagedSpace::ResetCodeStatistics();
6018   // We do not look for code in new space, map space, or old space.  If code
6019   // somehow ends up in those spaces, we would miss it here.
6020   code_space_->CollectCodeStatistics();
6021   lo_space_->CollectCodeStatistics();
6022   PagedSpace::ReportCodeStatistics();
6023 }
6024
6025
6026 // This function expects that NewSpace's allocated objects histogram is
6027 // populated (via a call to CollectStatistics or else as a side effect of a
6028 // just-completed scavenge collection).
6029 void Heap::ReportHeapStatistics(const char* title) {
6030   USE(title);
6031   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6032          title, gc_count_);
6033   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6034          old_generation_allocation_limit_);
6035
6036   PrintF("\n");
6037   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6038   isolate_->global_handles()->PrintStats();
6039   PrintF("\n");
6040
6041   PrintF("Heap statistics : ");
6042   isolate_->memory_allocator()->ReportStatistics();
6043   PrintF("To space : ");
6044   new_space_.ReportStatistics();
6045   PrintF("Old pointer space : ");
6046   old_pointer_space_->ReportStatistics();
6047   PrintF("Old data space : ");
6048   old_data_space_->ReportStatistics();
6049   PrintF("Code space : ");
6050   code_space_->ReportStatistics();
6051   PrintF("Map space : ");
6052   map_space_->ReportStatistics();
6053   PrintF("Cell space : ");
6054   cell_space_->ReportStatistics();
6055   PrintF("Large object space : ");
6056   lo_space_->ReportStatistics();
6057   PrintF(">>>>>> ========================================= >>>>>>\n");
6058 }
6059
6060 #endif  // DEBUG
6061
6062 bool Heap::Contains(HeapObject* value) {
6063   return Contains(value->address());
6064 }
6065
6066
6067 bool Heap::Contains(Address addr) {
6068   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6069   return HasBeenSetUp() &&
6070     (new_space_.ToSpaceContains(addr) ||
6071      old_pointer_space_->Contains(addr) ||
6072      old_data_space_->Contains(addr) ||
6073      code_space_->Contains(addr) ||
6074      map_space_->Contains(addr) ||
6075      cell_space_->Contains(addr) ||
6076      lo_space_->SlowContains(addr));
6077 }
6078
6079
6080 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6081   return InSpace(value->address(), space);
6082 }
6083
6084
6085 bool Heap::InSpace(Address addr, AllocationSpace space) {
6086   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6087   if (!HasBeenSetUp()) return false;
6088
6089   switch (space) {
6090     case NEW_SPACE:
6091       return new_space_.ToSpaceContains(addr);
6092     case OLD_POINTER_SPACE:
6093       return old_pointer_space_->Contains(addr);
6094     case OLD_DATA_SPACE:
6095       return old_data_space_->Contains(addr);
6096     case CODE_SPACE:
6097       return code_space_->Contains(addr);
6098     case MAP_SPACE:
6099       return map_space_->Contains(addr);
6100     case CELL_SPACE:
6101       return cell_space_->Contains(addr);
6102     case LO_SPACE:
6103       return lo_space_->SlowContains(addr);
6104   }
6105
6106   return false;
6107 }
6108
6109
6110 #ifdef VERIFY_HEAP
6111 void Heap::Verify() {
6112   CHECK(HasBeenSetUp());
6113
6114   store_buffer()->Verify();
6115
6116   VerifyPointersVisitor visitor;
6117   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6118
6119   new_space_.Verify();
6120
6121   old_pointer_space_->Verify(&visitor);
6122   map_space_->Verify(&visitor);
6123
6124   VerifyPointersVisitor no_dirty_regions_visitor;
6125   old_data_space_->Verify(&no_dirty_regions_visitor);
6126   code_space_->Verify(&no_dirty_regions_visitor);
6127   cell_space_->Verify(&no_dirty_regions_visitor);
6128
6129   lo_space_->Verify();
6130 }
6131 #endif
6132
6133
6134 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6135   Object* result = NULL;
6136   Object* new_table;
6137   { MaybeObject* maybe_new_table =
6138         string_table()->LookupUtf8String(string, &result);
6139     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6140   }
6141   // Can't use set_string_table because StringTable::cast knows that
6142   // StringTable is a singleton and checks for identity.
6143   roots_[kStringTableRootIndex] = new_table;
6144   ASSERT(result != NULL);
6145   return result;
6146 }
6147
6148
6149 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6150   Object* result = NULL;
6151   Object* new_table;
6152   { MaybeObject* maybe_new_table =
6153         string_table()->LookupOneByteString(string, &result);
6154     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6155   }
6156   // Can't use set_string_table because StringTable::cast knows that
6157   // StringTable is a singleton and checks for identity.
6158   roots_[kStringTableRootIndex] = new_table;
6159   ASSERT(result != NULL);
6160   return result;
6161 }
6162
6163
6164 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6165                                      int from,
6166                                      int length) {
6167   Object* result = NULL;
6168   Object* new_table;
6169   { MaybeObject* maybe_new_table =
6170         string_table()->LookupSubStringOneByteString(string,
6171                                                    from,
6172                                                    length,
6173                                                    &result);
6174     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6175   }
6176   // Can't use set_string_table because StringTable::cast knows that
6177   // StringTable is a singleton and checks for identity.
6178   roots_[kStringTableRootIndex] = new_table;
6179   ASSERT(result != NULL);
6180   return result;
6181 }
6182
6183
6184 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6185   Object* result = NULL;
6186   Object* new_table;
6187   { MaybeObject* maybe_new_table =
6188         string_table()->LookupTwoByteString(string, &result);
6189     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6190   }
6191   // Can't use set_string_table because StringTable::cast knows that
6192   // StringTable is a singleton and checks for identity.
6193   roots_[kStringTableRootIndex] = new_table;
6194   ASSERT(result != NULL);
6195   return result;
6196 }
6197
6198
6199 MaybeObject* Heap::InternalizeString(String* string) {
6200   if (string->IsInternalizedString()) return string;
6201   Object* result = NULL;
6202   Object* new_table;
6203   { MaybeObject* maybe_new_table =
6204         string_table()->LookupString(string, &result);
6205     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6206   }
6207   // Can't use set_string_table because StringTable::cast knows that
6208   // StringTable is a singleton and checks for identity.
6209   roots_[kStringTableRootIndex] = new_table;
6210   ASSERT(result != NULL);
6211   return result;
6212 }
6213
6214
6215 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6216   if (string->IsInternalizedString()) {
6217     *result = string;
6218     return true;
6219   }
6220   return string_table()->LookupStringIfExists(string, result);
6221 }
6222
6223
6224 void Heap::ZapFromSpace() {
6225   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6226                           new_space_.FromSpaceEnd());
6227   while (it.has_next()) {
6228     NewSpacePage* page = it.next();
6229     for (Address cursor = page->area_start(), limit = page->area_end();
6230          cursor < limit;
6231          cursor += kPointerSize) {
6232       Memory::Address_at(cursor) = kFromSpaceZapValue;
6233     }
6234   }
6235 }
6236
6237
6238 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6239                                              Address end,
6240                                              ObjectSlotCallback callback) {
6241   Address slot_address = start;
6242
6243   // We are not collecting slots on new space objects during mutation
6244   // thus we have to scan for pointers to evacuation candidates when we
6245   // promote objects. But we should not record any slots in non-black
6246   // objects. Grey object's slots would be rescanned.
6247   // White object might not survive until the end of collection
6248   // it would be a violation of the invariant to record it's slots.
6249   bool record_slots = false;
6250   if (incremental_marking()->IsCompacting()) {
6251     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6252     record_slots = Marking::IsBlack(mark_bit);
6253   }
6254
6255   while (slot_address < end) {
6256     Object** slot = reinterpret_cast<Object**>(slot_address);
6257     Object* object = *slot;
6258     // If the store buffer becomes overfull we mark pages as being exempt from
6259     // the store buffer.  These pages are scanned to find pointers that point
6260     // to the new space.  In that case we may hit newly promoted objects and
6261     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6262     if (object->IsHeapObject()) {
6263       if (Heap::InFromSpace(object)) {
6264         callback(reinterpret_cast<HeapObject**>(slot),
6265                  HeapObject::cast(object));
6266         Object* new_object = *slot;
6267         if (InNewSpace(new_object)) {
6268           SLOW_ASSERT(Heap::InToSpace(new_object));
6269           SLOW_ASSERT(new_object->IsHeapObject());
6270           store_buffer_.EnterDirectlyIntoStoreBuffer(
6271               reinterpret_cast<Address>(slot));
6272         }
6273         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6274       } else if (record_slots &&
6275                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6276         mark_compact_collector()->RecordSlot(slot, slot, object);
6277       }
6278     }
6279     slot_address += kPointerSize;
6280   }
6281 }
6282
6283
6284 #ifdef DEBUG
6285 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6286
6287
6288 bool IsAMapPointerAddress(Object** addr) {
6289   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6290   int mod = a % Map::kSize;
6291   return mod >= Map::kPointerFieldsBeginOffset &&
6292          mod < Map::kPointerFieldsEndOffset;
6293 }
6294
6295
6296 bool EverythingsAPointer(Object** addr) {
6297   return true;
6298 }
6299
6300
6301 static void CheckStoreBuffer(Heap* heap,
6302                              Object** current,
6303                              Object** limit,
6304                              Object**** store_buffer_position,
6305                              Object*** store_buffer_top,
6306                              CheckStoreBufferFilter filter,
6307                              Address special_garbage_start,
6308                              Address special_garbage_end) {
6309   Map* free_space_map = heap->free_space_map();
6310   for ( ; current < limit; current++) {
6311     Object* o = *current;
6312     Address current_address = reinterpret_cast<Address>(current);
6313     // Skip free space.
6314     if (o == free_space_map) {
6315       Address current_address = reinterpret_cast<Address>(current);
6316       FreeSpace* free_space =
6317           FreeSpace::cast(HeapObject::FromAddress(current_address));
6318       int skip = free_space->Size();
6319       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6320       ASSERT(skip > 0);
6321       current_address += skip - kPointerSize;
6322       current = reinterpret_cast<Object**>(current_address);
6323       continue;
6324     }
6325     // Skip the current linear allocation space between top and limit which is
6326     // unmarked with the free space map, but can contain junk.
6327     if (current_address == special_garbage_start &&
6328         special_garbage_end != special_garbage_start) {
6329       current_address = special_garbage_end - kPointerSize;
6330       current = reinterpret_cast<Object**>(current_address);
6331       continue;
6332     }
6333     if (!(*filter)(current)) continue;
6334     ASSERT(current_address < special_garbage_start ||
6335            current_address >= special_garbage_end);
6336     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6337     // We have to check that the pointer does not point into new space
6338     // without trying to cast it to a heap object since the hash field of
6339     // a string can contain values like 1 and 3 which are tagged null
6340     // pointers.
6341     if (!heap->InNewSpace(o)) continue;
6342     while (**store_buffer_position < current &&
6343            *store_buffer_position < store_buffer_top) {
6344       (*store_buffer_position)++;
6345     }
6346     if (**store_buffer_position != current ||
6347         *store_buffer_position == store_buffer_top) {
6348       Object** obj_start = current;
6349       while (!(*obj_start)->IsMap()) obj_start--;
6350       UNREACHABLE();
6351     }
6352   }
6353 }
6354
6355
6356 // Check that the store buffer contains all intergenerational pointers by
6357 // scanning a page and ensuring that all pointers to young space are in the
6358 // store buffer.
6359 void Heap::OldPointerSpaceCheckStoreBuffer() {
6360   OldSpace* space = old_pointer_space();
6361   PageIterator pages(space);
6362
6363   store_buffer()->SortUniq();
6364
6365   while (pages.has_next()) {
6366     Page* page = pages.next();
6367     Object** current = reinterpret_cast<Object**>(page->area_start());
6368
6369     Address end = page->area_end();
6370
6371     Object*** store_buffer_position = store_buffer()->Start();
6372     Object*** store_buffer_top = store_buffer()->Top();
6373
6374     Object** limit = reinterpret_cast<Object**>(end);
6375     CheckStoreBuffer(this,
6376                      current,
6377                      limit,
6378                      &store_buffer_position,
6379                      store_buffer_top,
6380                      &EverythingsAPointer,
6381                      space->top(),
6382                      space->limit());
6383   }
6384 }
6385
6386
6387 void Heap::MapSpaceCheckStoreBuffer() {
6388   MapSpace* space = map_space();
6389   PageIterator pages(space);
6390
6391   store_buffer()->SortUniq();
6392
6393   while (pages.has_next()) {
6394     Page* page = pages.next();
6395     Object** current = reinterpret_cast<Object**>(page->area_start());
6396
6397     Address end = page->area_end();
6398
6399     Object*** store_buffer_position = store_buffer()->Start();
6400     Object*** store_buffer_top = store_buffer()->Top();
6401
6402     Object** limit = reinterpret_cast<Object**>(end);
6403     CheckStoreBuffer(this,
6404                      current,
6405                      limit,
6406                      &store_buffer_position,
6407                      store_buffer_top,
6408                      &IsAMapPointerAddress,
6409                      space->top(),
6410                      space->limit());
6411   }
6412 }
6413
6414
6415 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6416   LargeObjectIterator it(lo_space());
6417   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6418     // We only have code, sequential strings, or fixed arrays in large
6419     // object space, and only fixed arrays can possibly contain pointers to
6420     // the young generation.
6421     if (object->IsFixedArray()) {
6422       Object*** store_buffer_position = store_buffer()->Start();
6423       Object*** store_buffer_top = store_buffer()->Top();
6424       Object** current = reinterpret_cast<Object**>(object->address());
6425       Object** limit =
6426           reinterpret_cast<Object**>(object->address() + object->Size());
6427       CheckStoreBuffer(this,
6428                        current,
6429                        limit,
6430                        &store_buffer_position,
6431                        store_buffer_top,
6432                        &EverythingsAPointer,
6433                        NULL,
6434                        NULL);
6435     }
6436   }
6437 }
6438 #endif
6439
6440
6441 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6442   IterateStrongRoots(v, mode);
6443   IterateWeakRoots(v, mode);
6444 }
6445
6446
6447 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6448   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6449   v->Synchronize(VisitorSynchronization::kStringTable);
6450   if (mode != VISIT_ALL_IN_SCAVENGE &&
6451       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6452     // Scavenge collections have special processing for this.
6453     external_string_table_.Iterate(v);
6454     error_object_list_.Iterate(v);
6455   }
6456   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6457 }
6458
6459
6460 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6461   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6462   v->Synchronize(VisitorSynchronization::kStrongRootList);
6463
6464   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6465   v->Synchronize(VisitorSynchronization::kInternalizedString);
6466
6467   isolate_->bootstrapper()->Iterate(v);
6468   v->Synchronize(VisitorSynchronization::kBootstrapper);
6469   isolate_->Iterate(v);
6470   v->Synchronize(VisitorSynchronization::kTop);
6471   Relocatable::Iterate(v);
6472   v->Synchronize(VisitorSynchronization::kRelocatable);
6473
6474 #ifdef ENABLE_DEBUGGER_SUPPORT
6475   isolate_->debug()->Iterate(v);
6476   if (isolate_->deoptimizer_data() != NULL) {
6477     isolate_->deoptimizer_data()->Iterate(v);
6478   }
6479 #endif
6480   v->Synchronize(VisitorSynchronization::kDebug);
6481   isolate_->compilation_cache()->Iterate(v);
6482   v->Synchronize(VisitorSynchronization::kCompilationCache);
6483
6484   // Iterate over local handles in handle scopes.
6485   isolate_->handle_scope_implementer()->Iterate(v);
6486   isolate_->IterateDeferredHandles(v);
6487   v->Synchronize(VisitorSynchronization::kHandleScope);
6488
6489   // Iterate over the builtin code objects and code stubs in the
6490   // heap. Note that it is not necessary to iterate over code objects
6491   // on scavenge collections.
6492   if (mode != VISIT_ALL_IN_SCAVENGE) {
6493     isolate_->builtins()->IterateBuiltins(v);
6494   }
6495   v->Synchronize(VisitorSynchronization::kBuiltins);
6496
6497   // Iterate over global handles.
6498   switch (mode) {
6499     case VISIT_ONLY_STRONG:
6500       isolate_->global_handles()->IterateStrongRoots(v);
6501       break;
6502     case VISIT_ALL_IN_SCAVENGE:
6503       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6504       break;
6505     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6506     case VISIT_ALL:
6507       isolate_->global_handles()->IterateAllRoots(v);
6508       break;
6509   }
6510   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6511
6512   // Iterate over pointers being held by inactive threads.
6513   isolate_->thread_manager()->Iterate(v);
6514   v->Synchronize(VisitorSynchronization::kThreadManager);
6515
6516   // Iterate over the pointers the Serialization/Deserialization code is
6517   // holding.
6518   // During garbage collection this keeps the partial snapshot cache alive.
6519   // During deserialization of the startup snapshot this creates the partial
6520   // snapshot cache and deserializes the objects it refers to.  During
6521   // serialization this does nothing, since the partial snapshot cache is
6522   // empty.  However the next thing we do is create the partial snapshot,
6523   // filling up the partial snapshot cache with objects it needs as we go.
6524   SerializerDeserializer::Iterate(v);
6525   // We don't do a v->Synchronize call here, because in debug mode that will
6526   // output a flag to the snapshot.  However at this point the serializer and
6527   // deserializer are deliberately a little unsynchronized (see above) so the
6528   // checking of the sync flag in the snapshot would fail.
6529 }
6530
6531
6532 // TODO(1236194): Since the heap size is configurable on the command line
6533 // and through the API, we should gracefully handle the case that the heap
6534 // size is not big enough to fit all the initial objects.
6535 bool Heap::ConfigureHeap(int max_semispace_size,
6536                          intptr_t max_old_gen_size,
6537                          intptr_t max_executable_size) {
6538   if (HasBeenSetUp()) return false;
6539
6540   if (FLAG_stress_compaction) {
6541     // This will cause more frequent GCs when stressing.
6542     max_semispace_size_ = Page::kPageSize;
6543   }
6544
6545   if (max_semispace_size > 0) {
6546     if (max_semispace_size < Page::kPageSize) {
6547       max_semispace_size = Page::kPageSize;
6548       if (FLAG_trace_gc) {
6549         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6550                  Page::kPageSize >> 10);
6551       }
6552     }
6553     max_semispace_size_ = max_semispace_size;
6554   }
6555
6556   if (Snapshot::IsEnabled()) {
6557     // If we are using a snapshot we always reserve the default amount
6558     // of memory for each semispace because code in the snapshot has
6559     // write-barrier code that relies on the size and alignment of new
6560     // space.  We therefore cannot use a larger max semispace size
6561     // than the default reserved semispace size.
6562     if (max_semispace_size_ > reserved_semispace_size_) {
6563       max_semispace_size_ = reserved_semispace_size_;
6564       if (FLAG_trace_gc) {
6565         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6566                  reserved_semispace_size_ >> 10);
6567       }
6568     }
6569   } else {
6570     // If we are not using snapshots we reserve space for the actual
6571     // max semispace size.
6572     reserved_semispace_size_ = max_semispace_size_;
6573   }
6574
6575   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6576   if (max_executable_size > 0) {
6577     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6578   }
6579
6580   // The max executable size must be less than or equal to the max old
6581   // generation size.
6582   if (max_executable_size_ > max_old_generation_size_) {
6583     max_executable_size_ = max_old_generation_size_;
6584   }
6585
6586   // The new space size must be a power of two to support single-bit testing
6587   // for containment.
6588   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6589   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6590   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6591   external_allocation_limit_ = 16 * max_semispace_size_;
6592
6593   // The old generation is paged and needs at least one page for each space.
6594   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6595   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6596                                                        Page::kPageSize),
6597                                  RoundUp(max_old_generation_size_,
6598                                          Page::kPageSize));
6599
6600   configured_ = true;
6601   return true;
6602 }
6603
6604
6605 bool Heap::ConfigureHeapDefault() {
6606   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6607                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6608                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6609 }
6610
6611
6612 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6613   *stats->start_marker = HeapStats::kStartMarker;
6614   *stats->end_marker = HeapStats::kEndMarker;
6615   *stats->new_space_size = new_space_.SizeAsInt();
6616   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6617   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6618   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6619   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6620   *stats->old_data_space_capacity = old_data_space_->Capacity();
6621   *stats->code_space_size = code_space_->SizeOfObjects();
6622   *stats->code_space_capacity = code_space_->Capacity();
6623   *stats->map_space_size = map_space_->SizeOfObjects();
6624   *stats->map_space_capacity = map_space_->Capacity();
6625   *stats->cell_space_size = cell_space_->SizeOfObjects();
6626   *stats->cell_space_capacity = cell_space_->Capacity();
6627   *stats->lo_space_size = lo_space_->Size();
6628   isolate_->global_handles()->RecordStats(stats);
6629   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6630   *stats->memory_allocator_capacity =
6631       isolate()->memory_allocator()->Size() +
6632       isolate()->memory_allocator()->Available();
6633   *stats->os_error = OS::GetLastError();
6634       isolate()->memory_allocator()->Available();
6635   if (take_snapshot) {
6636     HeapIterator iterator(this);
6637     for (HeapObject* obj = iterator.next();
6638          obj != NULL;
6639          obj = iterator.next()) {
6640       InstanceType type = obj->map()->instance_type();
6641       ASSERT(0 <= type && type <= LAST_TYPE);
6642       stats->objects_per_type[type]++;
6643       stats->size_per_type[type] += obj->Size();
6644     }
6645   }
6646 }
6647
6648
6649 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6650   return old_pointer_space_->SizeOfObjects()
6651       + old_data_space_->SizeOfObjects()
6652       + code_space_->SizeOfObjects()
6653       + map_space_->SizeOfObjects()
6654       + cell_space_->SizeOfObjects()
6655       + lo_space_->SizeOfObjects();
6656 }
6657
6658
6659 intptr_t Heap::PromotedExternalMemorySize() {
6660   if (amount_of_external_allocated_memory_
6661       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6662   return amount_of_external_allocated_memory_
6663       - amount_of_external_allocated_memory_at_last_global_gc_;
6664 }
6665
6666
6667 V8_DECLARE_ONCE(initialize_gc_once);
6668
6669 static void InitializeGCOnce() {
6670   InitializeScavengingVisitorsTables();
6671   NewSpaceScavenger::Initialize();
6672   MarkCompactCollector::Initialize();
6673 }
6674
6675 bool Heap::SetUp() {
6676 #ifdef DEBUG
6677   allocation_timeout_ = FLAG_gc_interval;
6678 #endif
6679
6680   // Initialize heap spaces and initial maps and objects. Whenever something
6681   // goes wrong, just return false. The caller should check the results and
6682   // call Heap::TearDown() to release allocated memory.
6683   //
6684   // If the heap is not yet configured (e.g. through the API), configure it.
6685   // Configuration is based on the flags new-space-size (really the semispace
6686   // size) and old-space-size if set or the initial values of semispace_size_
6687   // and old_generation_size_ otherwise.
6688   if (!configured_) {
6689     if (!ConfigureHeapDefault()) return false;
6690   }
6691
6692   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6693
6694   MarkMapPointersAsEncoded(false);
6695
6696   // Set up memory allocator.
6697   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6698       return false;
6699
6700   // Set up new space.
6701   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6702     return false;
6703   }
6704
6705   // Initialize old pointer space.
6706   old_pointer_space_ =
6707       new OldSpace(this,
6708                    max_old_generation_size_,
6709                    OLD_POINTER_SPACE,
6710                    NOT_EXECUTABLE);
6711   if (old_pointer_space_ == NULL) return false;
6712   if (!old_pointer_space_->SetUp()) return false;
6713
6714   // Initialize old data space.
6715   old_data_space_ =
6716       new OldSpace(this,
6717                    max_old_generation_size_,
6718                    OLD_DATA_SPACE,
6719                    NOT_EXECUTABLE);
6720   if (old_data_space_ == NULL) return false;
6721   if (!old_data_space_->SetUp()) return false;
6722
6723   // Initialize the code space, set its maximum capacity to the old
6724   // generation size. It needs executable memory.
6725   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6726   // virtual address space, so that they can call each other with near calls.
6727   if (code_range_size_ > 0) {
6728     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6729       return false;
6730     }
6731   }
6732
6733   code_space_ =
6734       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6735   if (code_space_ == NULL) return false;
6736   if (!code_space_->SetUp()) return false;
6737
6738   // Initialize map space.
6739   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6740   if (map_space_ == NULL) return false;
6741   if (!map_space_->SetUp()) return false;
6742
6743   // Initialize global property cell space.
6744   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6745   if (cell_space_ == NULL) return false;
6746   if (!cell_space_->SetUp()) return false;
6747
6748   // The large object code space may contain code or data.  We set the memory
6749   // to be non-executable here for safety, but this means we need to enable it
6750   // explicitly when allocating large code objects.
6751   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6752   if (lo_space_ == NULL) return false;
6753   if (!lo_space_->SetUp()) return false;
6754
6755   // Set up the seed that is used to randomize the string hash function.
6756   ASSERT(hash_seed() == 0);
6757   if (FLAG_randomize_hashes) {
6758     if (FLAG_hash_seed == 0) {
6759       set_hash_seed(
6760           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6761     } else {
6762       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6763     }
6764   }
6765
6766   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6767   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6768
6769   store_buffer()->SetUp();
6770
6771   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6772 #ifdef DEBUG
6773   relocation_mutex_locked_by_optimizer_thread_ = false;
6774 #endif  // DEBUG
6775
6776   return true;
6777 }
6778
6779 bool Heap::CreateHeapObjects() {
6780   // Create initial maps.
6781   if (!CreateInitialMaps()) return false;
6782   if (!CreateApiObjects()) return false;
6783
6784   // Create initial objects
6785   if (!CreateInitialObjects()) return false;
6786
6787   native_contexts_list_ = undefined_value();
6788   return true;
6789 }
6790
6791
6792 void Heap::SetStackLimits() {
6793   ASSERT(isolate_ != NULL);
6794   ASSERT(isolate_ == isolate());
6795   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6796   // something that looks like an out of range Smi to the GC.
6797
6798   // Set up the special root array entries containing the stack limits.
6799   // These are actually addresses, but the tag makes the GC ignore it.
6800   roots_[kStackLimitRootIndex] =
6801       reinterpret_cast<Object*>(
6802           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6803   roots_[kRealStackLimitRootIndex] =
6804       reinterpret_cast<Object*>(
6805           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6806 }
6807
6808
6809 void Heap::TearDown() {
6810 #ifdef VERIFY_HEAP
6811   if (FLAG_verify_heap) {
6812     Verify();
6813   }
6814 #endif
6815
6816   if (FLAG_print_cumulative_gc_stat) {
6817     PrintF("\n");
6818     PrintF("gc_count=%d ", gc_count_);
6819     PrintF("mark_sweep_count=%d ", ms_count_);
6820     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6821     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6822     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6823     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6824            get_max_alive_after_gc());
6825     PrintF("total_marking_time=%.1f ", marking_time());
6826     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6827     PrintF("\n\n");
6828   }
6829
6830   isolate_->global_handles()->TearDown();
6831
6832   external_string_table_.TearDown();
6833
6834   error_object_list_.TearDown();
6835
6836   new_space_.TearDown();
6837
6838   if (old_pointer_space_ != NULL) {
6839     old_pointer_space_->TearDown();
6840     delete old_pointer_space_;
6841     old_pointer_space_ = NULL;
6842   }
6843
6844   if (old_data_space_ != NULL) {
6845     old_data_space_->TearDown();
6846     delete old_data_space_;
6847     old_data_space_ = NULL;
6848   }
6849
6850   if (code_space_ != NULL) {
6851     code_space_->TearDown();
6852     delete code_space_;
6853     code_space_ = NULL;
6854   }
6855
6856   if (map_space_ != NULL) {
6857     map_space_->TearDown();
6858     delete map_space_;
6859     map_space_ = NULL;
6860   }
6861
6862   if (cell_space_ != NULL) {
6863     cell_space_->TearDown();
6864     delete cell_space_;
6865     cell_space_ = NULL;
6866   }
6867
6868   if (lo_space_ != NULL) {
6869     lo_space_->TearDown();
6870     delete lo_space_;
6871     lo_space_ = NULL;
6872   }
6873
6874   store_buffer()->TearDown();
6875   incremental_marking()->TearDown();
6876
6877   isolate_->memory_allocator()->TearDown();
6878
6879   delete relocation_mutex_;
6880 }
6881
6882
6883 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6884   ASSERT(callback != NULL);
6885   GCPrologueCallbackPair pair(callback, gc_type);
6886   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6887   return gc_prologue_callbacks_.Add(pair);
6888 }
6889
6890
6891 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6892   ASSERT(callback != NULL);
6893   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6894     if (gc_prologue_callbacks_[i].callback == callback) {
6895       gc_prologue_callbacks_.Remove(i);
6896       return;
6897     }
6898   }
6899   UNREACHABLE();
6900 }
6901
6902
6903 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6904   ASSERT(callback != NULL);
6905   GCEpilogueCallbackPair pair(callback, gc_type);
6906   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6907   return gc_epilogue_callbacks_.Add(pair);
6908 }
6909
6910
6911 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6912   ASSERT(callback != NULL);
6913   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6914     if (gc_epilogue_callbacks_[i].callback == callback) {
6915       gc_epilogue_callbacks_.Remove(i);
6916       return;
6917     }
6918   }
6919   UNREACHABLE();
6920 }
6921
6922
6923 #ifdef DEBUG
6924
6925 class PrintHandleVisitor: public ObjectVisitor {
6926  public:
6927   void VisitPointers(Object** start, Object** end) {
6928     for (Object** p = start; p < end; p++)
6929       PrintF("  handle %p to %p\n",
6930              reinterpret_cast<void*>(p),
6931              reinterpret_cast<void*>(*p));
6932   }
6933 };
6934
6935 void Heap::PrintHandles() {
6936   PrintF("Handles:\n");
6937   PrintHandleVisitor v;
6938   isolate_->handle_scope_implementer()->Iterate(&v);
6939 }
6940
6941 #endif
6942
6943
6944 Space* AllSpaces::next() {
6945   switch (counter_++) {
6946     case NEW_SPACE:
6947       return heap_->new_space();
6948     case OLD_POINTER_SPACE:
6949       return heap_->old_pointer_space();
6950     case OLD_DATA_SPACE:
6951       return heap_->old_data_space();
6952     case CODE_SPACE:
6953       return heap_->code_space();
6954     case MAP_SPACE:
6955       return heap_->map_space();
6956     case CELL_SPACE:
6957       return heap_->cell_space();
6958     case LO_SPACE:
6959       return heap_->lo_space();
6960     default:
6961       return NULL;
6962   }
6963 }
6964
6965
6966 PagedSpace* PagedSpaces::next() {
6967   switch (counter_++) {
6968     case OLD_POINTER_SPACE:
6969       return heap_->old_pointer_space();
6970     case OLD_DATA_SPACE:
6971       return heap_->old_data_space();
6972     case CODE_SPACE:
6973       return heap_->code_space();
6974     case MAP_SPACE:
6975       return heap_->map_space();
6976     case CELL_SPACE:
6977       return heap_->cell_space();
6978     default:
6979       return NULL;
6980   }
6981 }
6982
6983
6984
6985 OldSpace* OldSpaces::next() {
6986   switch (counter_++) {
6987     case OLD_POINTER_SPACE:
6988       return heap_->old_pointer_space();
6989     case OLD_DATA_SPACE:
6990       return heap_->old_data_space();
6991     case CODE_SPACE:
6992       return heap_->code_space();
6993     default:
6994       return NULL;
6995   }
6996 }
6997
6998
6999 SpaceIterator::SpaceIterator(Heap* heap)
7000     : heap_(heap),
7001       current_space_(FIRST_SPACE),
7002       iterator_(NULL),
7003       size_func_(NULL) {
7004 }
7005
7006
7007 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7008     : heap_(heap),
7009       current_space_(FIRST_SPACE),
7010       iterator_(NULL),
7011       size_func_(size_func) {
7012 }
7013
7014
7015 SpaceIterator::~SpaceIterator() {
7016   // Delete active iterator if any.
7017   delete iterator_;
7018 }
7019
7020
7021 bool SpaceIterator::has_next() {
7022   // Iterate until no more spaces.
7023   return current_space_ != LAST_SPACE;
7024 }
7025
7026
7027 ObjectIterator* SpaceIterator::next() {
7028   if (iterator_ != NULL) {
7029     delete iterator_;
7030     iterator_ = NULL;
7031     // Move to the next space
7032     current_space_++;
7033     if (current_space_ > LAST_SPACE) {
7034       return NULL;
7035     }
7036   }
7037
7038   // Return iterator for the new current space.
7039   return CreateIterator();
7040 }
7041
7042
7043 // Create an iterator for the space to iterate.
7044 ObjectIterator* SpaceIterator::CreateIterator() {
7045   ASSERT(iterator_ == NULL);
7046
7047   switch (current_space_) {
7048     case NEW_SPACE:
7049       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7050       break;
7051     case OLD_POINTER_SPACE:
7052       iterator_ =
7053           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7054       break;
7055     case OLD_DATA_SPACE:
7056       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7057       break;
7058     case CODE_SPACE:
7059       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7060       break;
7061     case MAP_SPACE:
7062       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7063       break;
7064     case CELL_SPACE:
7065       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7066       break;
7067     case LO_SPACE:
7068       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7069       break;
7070   }
7071
7072   // Return the newly allocated iterator;
7073   ASSERT(iterator_ != NULL);
7074   return iterator_;
7075 }
7076
7077
7078 class HeapObjectsFilter {
7079  public:
7080   virtual ~HeapObjectsFilter() {}
7081   virtual bool SkipObject(HeapObject* object) = 0;
7082 };
7083
7084
7085 class UnreachableObjectsFilter : public HeapObjectsFilter {
7086  public:
7087   UnreachableObjectsFilter() {
7088     MarkReachableObjects();
7089   }
7090
7091   ~UnreachableObjectsFilter() {
7092     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7093   }
7094
7095   bool SkipObject(HeapObject* object) {
7096     MarkBit mark_bit = Marking::MarkBitFrom(object);
7097     return !mark_bit.Get();
7098   }
7099
7100  private:
7101   class MarkingVisitor : public ObjectVisitor {
7102    public:
7103     MarkingVisitor() : marking_stack_(10) {}
7104
7105     void VisitPointers(Object** start, Object** end) {
7106       for (Object** p = start; p < end; p++) {
7107         if (!(*p)->IsHeapObject()) continue;
7108         HeapObject* obj = HeapObject::cast(*p);
7109         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7110         if (!mark_bit.Get()) {
7111           mark_bit.Set();
7112           marking_stack_.Add(obj);
7113         }
7114       }
7115     }
7116
7117     void TransitiveClosure() {
7118       while (!marking_stack_.is_empty()) {
7119         HeapObject* obj = marking_stack_.RemoveLast();
7120         obj->Iterate(this);
7121       }
7122     }
7123
7124    private:
7125     List<HeapObject*> marking_stack_;
7126   };
7127
7128   void MarkReachableObjects() {
7129     Heap* heap = Isolate::Current()->heap();
7130     MarkingVisitor visitor;
7131     heap->IterateRoots(&visitor, VISIT_ALL);
7132     visitor.TransitiveClosure();
7133   }
7134
7135   DisallowHeapAllocation no_allocation_;
7136 };
7137
7138
7139 HeapIterator::HeapIterator(Heap* heap)
7140     : heap_(heap),
7141       filtering_(HeapIterator::kNoFiltering),
7142       filter_(NULL) {
7143   Init();
7144 }
7145
7146
7147 HeapIterator::HeapIterator(Heap* heap,
7148                            HeapIterator::HeapObjectsFiltering filtering)
7149     : heap_(heap),
7150       filtering_(filtering),
7151       filter_(NULL) {
7152   Init();
7153 }
7154
7155
7156 HeapIterator::~HeapIterator() {
7157   Shutdown();
7158 }
7159
7160
7161 void HeapIterator::Init() {
7162   // Start the iteration.
7163   space_iterator_ = new SpaceIterator(heap_);
7164   switch (filtering_) {
7165     case kFilterUnreachable:
7166       filter_ = new UnreachableObjectsFilter;
7167       break;
7168     default:
7169       break;
7170   }
7171   object_iterator_ = space_iterator_->next();
7172 }
7173
7174
7175 void HeapIterator::Shutdown() {
7176 #ifdef DEBUG
7177   // Assert that in filtering mode we have iterated through all
7178   // objects. Otherwise, heap will be left in an inconsistent state.
7179   if (filtering_ != kNoFiltering) {
7180     ASSERT(object_iterator_ == NULL);
7181   }
7182 #endif
7183   // Make sure the last iterator is deallocated.
7184   delete space_iterator_;
7185   space_iterator_ = NULL;
7186   object_iterator_ = NULL;
7187   delete filter_;
7188   filter_ = NULL;
7189 }
7190
7191
7192 HeapObject* HeapIterator::next() {
7193   if (filter_ == NULL) return NextObject();
7194
7195   HeapObject* obj = NextObject();
7196   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7197   return obj;
7198 }
7199
7200
7201 HeapObject* HeapIterator::NextObject() {
7202   // No iterator means we are done.
7203   if (object_iterator_ == NULL) return NULL;
7204
7205   if (HeapObject* obj = object_iterator_->next_object()) {
7206     // If the current iterator has more objects we are fine.
7207     return obj;
7208   } else {
7209     // Go though the spaces looking for one that has objects.
7210     while (space_iterator_->has_next()) {
7211       object_iterator_ = space_iterator_->next();
7212       if (HeapObject* obj = object_iterator_->next_object()) {
7213         return obj;
7214       }
7215     }
7216   }
7217   // Done with the last space.
7218   object_iterator_ = NULL;
7219   return NULL;
7220 }
7221
7222
7223 void HeapIterator::reset() {
7224   // Restart the iterator.
7225   Shutdown();
7226   Init();
7227 }
7228
7229
7230 #ifdef DEBUG
7231
7232 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7233
7234 class PathTracer::MarkVisitor: public ObjectVisitor {
7235  public:
7236   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7237   void VisitPointers(Object** start, Object** end) {
7238     // Scan all HeapObject pointers in [start, end)
7239     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7240       if ((*p)->IsHeapObject())
7241         tracer_->MarkRecursively(p, this);
7242     }
7243   }
7244
7245  private:
7246   PathTracer* tracer_;
7247 };
7248
7249
7250 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7251  public:
7252   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7253   void VisitPointers(Object** start, Object** end) {
7254     // Scan all HeapObject pointers in [start, end)
7255     for (Object** p = start; p < end; p++) {
7256       if ((*p)->IsHeapObject())
7257         tracer_->UnmarkRecursively(p, this);
7258     }
7259   }
7260
7261  private:
7262   PathTracer* tracer_;
7263 };
7264
7265
7266 void PathTracer::VisitPointers(Object** start, Object** end) {
7267   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7268   // Visit all HeapObject pointers in [start, end)
7269   for (Object** p = start; !done && (p < end); p++) {
7270     if ((*p)->IsHeapObject()) {
7271       TracePathFrom(p);
7272       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7273     }
7274   }
7275 }
7276
7277
7278 void PathTracer::Reset() {
7279   found_target_ = false;
7280   object_stack_.Clear();
7281 }
7282
7283
7284 void PathTracer::TracePathFrom(Object** root) {
7285   ASSERT((search_target_ == kAnyGlobalObject) ||
7286          search_target_->IsHeapObject());
7287   found_target_in_trace_ = false;
7288   Reset();
7289
7290   MarkVisitor mark_visitor(this);
7291   MarkRecursively(root, &mark_visitor);
7292
7293   UnmarkVisitor unmark_visitor(this);
7294   UnmarkRecursively(root, &unmark_visitor);
7295
7296   ProcessResults();
7297 }
7298
7299
7300 static bool SafeIsNativeContext(HeapObject* obj) {
7301   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7302 }
7303
7304
7305 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7306   if (!(*p)->IsHeapObject()) return;
7307
7308   HeapObject* obj = HeapObject::cast(*p);
7309
7310   Object* map = obj->map();
7311
7312   if (!map->IsHeapObject()) return;  // visited before
7313
7314   if (found_target_in_trace_) return;  // stop if target found
7315   object_stack_.Add(obj);
7316   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7317       (obj == search_target_)) {
7318     found_target_in_trace_ = true;
7319     found_target_ = true;
7320     return;
7321   }
7322
7323   bool is_native_context = SafeIsNativeContext(obj);
7324
7325   // not visited yet
7326   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7327
7328   Address map_addr = map_p->address();
7329
7330   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7331
7332   // Scan the object body.
7333   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7334     // This is specialized to scan Context's properly.
7335     Object** start = reinterpret_cast<Object**>(obj->address() +
7336                                                 Context::kHeaderSize);
7337     Object** end = reinterpret_cast<Object**>(obj->address() +
7338         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7339     mark_visitor->VisitPointers(start, end);
7340   } else {
7341     obj->IterateBody(map_p->instance_type(),
7342                      obj->SizeFromMap(map_p),
7343                      mark_visitor);
7344   }
7345
7346   // Scan the map after the body because the body is a lot more interesting
7347   // when doing leak detection.
7348   MarkRecursively(&map, mark_visitor);
7349
7350   if (!found_target_in_trace_)  // don't pop if found the target
7351     object_stack_.RemoveLast();
7352 }
7353
7354
7355 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7356   if (!(*p)->IsHeapObject()) return;
7357
7358   HeapObject* obj = HeapObject::cast(*p);
7359
7360   Object* map = obj->map();
7361
7362   if (map->IsHeapObject()) return;  // unmarked already
7363
7364   Address map_addr = reinterpret_cast<Address>(map);
7365
7366   map_addr -= kMarkTag;
7367
7368   ASSERT_TAG_ALIGNED(map_addr);
7369
7370   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7371
7372   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7373
7374   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7375
7376   obj->IterateBody(Map::cast(map_p)->instance_type(),
7377                    obj->SizeFromMap(Map::cast(map_p)),
7378                    unmark_visitor);
7379 }
7380
7381
7382 void PathTracer::ProcessResults() {
7383   if (found_target_) {
7384     PrintF("=====================================\n");
7385     PrintF("====        Path to object       ====\n");
7386     PrintF("=====================================\n\n");
7387
7388     ASSERT(!object_stack_.is_empty());
7389     for (int i = 0; i < object_stack_.length(); i++) {
7390       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7391       Object* obj = object_stack_[i];
7392       obj->Print();
7393     }
7394     PrintF("=====================================\n");
7395   }
7396 }
7397
7398
7399 // Triggers a depth-first traversal of reachable objects from one
7400 // given root object and finds a path to a specific heap object and
7401 // prints it.
7402 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7403   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7404   tracer.VisitPointer(&root);
7405 }
7406
7407
7408 // Triggers a depth-first traversal of reachable objects from roots
7409 // and finds a path to a specific heap object and prints it.
7410 void Heap::TracePathToObject(Object* target) {
7411   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7412   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7413 }
7414
7415
7416 // Triggers a depth-first traversal of reachable objects from roots
7417 // and finds a path to any global object and prints it. Useful for
7418 // determining the source for leaks of global objects.
7419 void Heap::TracePathToGlobal() {
7420   PathTracer tracer(PathTracer::kAnyGlobalObject,
7421                     PathTracer::FIND_ALL,
7422                     VISIT_ALL);
7423   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7424 }
7425 #endif
7426
7427
7428 static intptr_t CountTotalHolesSize(Heap* heap) {
7429   intptr_t holes_size = 0;
7430   OldSpaces spaces(heap);
7431   for (OldSpace* space = spaces.next();
7432        space != NULL;
7433        space = spaces.next()) {
7434     holes_size += space->Waste() + space->Available();
7435   }
7436   return holes_size;
7437 }
7438
7439
7440 GCTracer::GCTracer(Heap* heap,
7441                    const char* gc_reason,
7442                    const char* collector_reason)
7443     : start_time_(0.0),
7444       start_object_size_(0),
7445       start_memory_size_(0),
7446       gc_count_(0),
7447       full_gc_count_(0),
7448       allocated_since_last_gc_(0),
7449       spent_in_mutator_(0),
7450       promoted_objects_size_(0),
7451       nodes_died_in_new_space_(0),
7452       nodes_copied_in_new_space_(0),
7453       nodes_promoted_(0),
7454       heap_(heap),
7455       gc_reason_(gc_reason),
7456       collector_reason_(collector_reason) {
7457   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7458   start_time_ = OS::TimeCurrentMillis();
7459   start_object_size_ = heap_->SizeOfObjects();
7460   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7461
7462   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7463     scopes_[i] = 0;
7464   }
7465
7466   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7467
7468   allocated_since_last_gc_ =
7469       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7470
7471   if (heap_->last_gc_end_timestamp_ > 0) {
7472     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7473   }
7474
7475   steps_count_ = heap_->incremental_marking()->steps_count();
7476   steps_took_ = heap_->incremental_marking()->steps_took();
7477   longest_step_ = heap_->incremental_marking()->longest_step();
7478   steps_count_since_last_gc_ =
7479       heap_->incremental_marking()->steps_count_since_last_gc();
7480   steps_took_since_last_gc_ =
7481       heap_->incremental_marking()->steps_took_since_last_gc();
7482 }
7483
7484
7485 GCTracer::~GCTracer() {
7486   // Printf ONE line iff flag is set.
7487   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7488
7489   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7490
7491   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7492   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7493
7494   double time = heap_->last_gc_end_timestamp_ - start_time_;
7495
7496   // Update cumulative GC statistics if required.
7497   if (FLAG_print_cumulative_gc_stat) {
7498     heap_->total_gc_time_ms_ += time;
7499     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7500     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7501                                      heap_->alive_after_last_gc_);
7502     if (!first_gc) {
7503       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7504                                    spent_in_mutator_);
7505     }
7506   } else if (FLAG_trace_gc_verbose) {
7507     heap_->total_gc_time_ms_ += time;
7508   }
7509
7510   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7511
7512   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7513
7514   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7515   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7516
7517   if (!FLAG_trace_gc_nvp) {
7518     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7519
7520     double end_memory_size_mb =
7521         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7522
7523     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7524            CollectorString(),
7525            static_cast<double>(start_object_size_) / MB,
7526            static_cast<double>(start_memory_size_) / MB,
7527            SizeOfHeapObjects(),
7528            end_memory_size_mb);
7529
7530     if (external_time > 0) PrintF("%d / ", external_time);
7531     PrintF("%.1f ms", time);
7532     if (steps_count_ > 0) {
7533       if (collector_ == SCAVENGER) {
7534         PrintF(" (+ %.1f ms in %d steps since last GC)",
7535                steps_took_since_last_gc_,
7536                steps_count_since_last_gc_);
7537       } else {
7538         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7539                    "biggest step %.1f ms)",
7540                steps_took_,
7541                steps_count_,
7542                longest_step_);
7543       }
7544     }
7545
7546     if (gc_reason_ != NULL) {
7547       PrintF(" [%s]", gc_reason_);
7548     }
7549
7550     if (collector_reason_ != NULL) {
7551       PrintF(" [%s]", collector_reason_);
7552     }
7553
7554     PrintF(".\n");
7555   } else {
7556     PrintF("pause=%.1f ", time);
7557     PrintF("mutator=%.1f ", spent_in_mutator_);
7558     PrintF("gc=");
7559     switch (collector_) {
7560       case SCAVENGER:
7561         PrintF("s");
7562         break;
7563       case MARK_COMPACTOR:
7564         PrintF("ms");
7565         break;
7566       default:
7567         UNREACHABLE();
7568     }
7569     PrintF(" ");
7570
7571     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7572     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7573     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7574     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7575     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7576     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7577     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7578     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7579     PrintF("compaction_ptrs=%.1f ",
7580         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7581     PrintF("intracompaction_ptrs=%.1f ",
7582         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7583     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7584     PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
7585     PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
7586
7587     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7588     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7589     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7590            in_free_list_or_wasted_before_gc_);
7591     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7592
7593     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7594     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7595     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7596     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7597     PrintF("nodes_promoted=%d ", nodes_promoted_);
7598
7599     if (collector_ == SCAVENGER) {
7600       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7601       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7602     } else {
7603       PrintF("stepscount=%d ", steps_count_);
7604       PrintF("stepstook=%.1f ", steps_took_);
7605       PrintF("longeststep=%.1f ", longest_step_);
7606     }
7607
7608     PrintF("\n");
7609   }
7610
7611   heap_->PrintShortHeapStatistics();
7612 }
7613
7614
7615 const char* GCTracer::CollectorString() {
7616   switch (collector_) {
7617     case SCAVENGER:
7618       return "Scavenge";
7619     case MARK_COMPACTOR:
7620       return "Mark-sweep";
7621   }
7622   return "Unknown GC";
7623 }
7624
7625
7626 int KeyedLookupCache::Hash(Map* map, Name* name) {
7627   // Uses only lower 32 bits if pointers are larger.
7628   uintptr_t addr_hash =
7629       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7630   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7631 }
7632
7633
7634 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7635   int index = (Hash(map, name) & kHashMask);
7636   for (int i = 0; i < kEntriesPerBucket; i++) {
7637     Key& key = keys_[index + i];
7638     if ((key.map == map) && key.name->Equals(name)) {
7639       return field_offsets_[index + i];
7640     }
7641   }
7642   return kNotFound;
7643 }
7644
7645
7646 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7647   if (!name->IsUniqueName()) {
7648     String* internalized_string;
7649     if (!HEAP->InternalizeStringIfExists(
7650             String::cast(name), &internalized_string)) {
7651       return;
7652     }
7653     name = internalized_string;
7654   }
7655   // This cache is cleared only between mark compact passes, so we expect the
7656   // cache to only contain old space names.
7657   ASSERT(!HEAP->InNewSpace(name));
7658
7659   int index = (Hash(map, name) & kHashMask);
7660   // After a GC there will be free slots, so we use them in order (this may
7661   // help to get the most frequently used one in position 0).
7662   for (int i = 0; i< kEntriesPerBucket; i++) {
7663     Key& key = keys_[index];
7664     Object* free_entry_indicator = NULL;
7665     if (key.map == free_entry_indicator) {
7666       key.map = map;
7667       key.name = name;
7668       field_offsets_[index + i] = field_offset;
7669       return;
7670     }
7671   }
7672   // No free entry found in this bucket, so we move them all down one and
7673   // put the new entry at position zero.
7674   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7675     Key& key = keys_[index + i];
7676     Key& key2 = keys_[index + i - 1];
7677     key = key2;
7678     field_offsets_[index + i] = field_offsets_[index + i - 1];
7679   }
7680
7681   // Write the new first entry.
7682   Key& key = keys_[index];
7683   key.map = map;
7684   key.name = name;
7685   field_offsets_[index] = field_offset;
7686 }
7687
7688
7689 void KeyedLookupCache::Clear() {
7690   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7691 }
7692
7693
7694 void DescriptorLookupCache::Clear() {
7695   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7696 }
7697
7698
7699 #ifdef DEBUG
7700 void Heap::GarbageCollectionGreedyCheck() {
7701   ASSERT(FLAG_gc_greedy);
7702   if (isolate_->bootstrapper()->IsActive()) return;
7703   if (disallow_allocation_failure()) return;
7704   CollectGarbage(NEW_SPACE);
7705 }
7706 #endif
7707
7708
7709 TranscendentalCache::SubCache::SubCache(Type t)
7710   : type_(t),
7711     isolate_(Isolate::Current()) {
7712   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7713   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7714   for (int i = 0; i < kCacheSize; i++) {
7715     elements_[i].in[0] = in0;
7716     elements_[i].in[1] = in1;
7717     elements_[i].output = NULL;
7718   }
7719 }
7720
7721
7722 void TranscendentalCache::Clear() {
7723   for (int i = 0; i < kNumberOfCaches; i++) {
7724     if (caches_[i] != NULL) {
7725       delete caches_[i];
7726       caches_[i] = NULL;
7727     }
7728   }
7729 }
7730
7731
7732 void ExternalStringTable::CleanUp() {
7733   int last = 0;
7734   for (int i = 0; i < new_space_strings_.length(); ++i) {
7735     if (new_space_strings_[i] == heap_->the_hole_value()) {
7736       continue;
7737     }
7738     if (heap_->InNewSpace(new_space_strings_[i])) {
7739       new_space_strings_[last++] = new_space_strings_[i];
7740     } else {
7741       old_space_strings_.Add(new_space_strings_[i]);
7742     }
7743   }
7744   new_space_strings_.Rewind(last);
7745   new_space_strings_.Trim();
7746
7747   last = 0;
7748   for (int i = 0; i < old_space_strings_.length(); ++i) {
7749     if (old_space_strings_[i] == heap_->the_hole_value()) {
7750       continue;
7751     }
7752     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7753     old_space_strings_[last++] = old_space_strings_[i];
7754   }
7755   old_space_strings_.Rewind(last);
7756   old_space_strings_.Trim();
7757 #ifdef VERIFY_HEAP
7758   if (FLAG_verify_heap) {
7759     Verify();
7760   }
7761 #endif
7762 }
7763
7764
7765 void ExternalStringTable::TearDown() {
7766   new_space_strings_.Free();
7767   old_space_strings_.Free();
7768 }
7769
7770
7771 // Update all references.
7772 void ErrorObjectList::UpdateReferences() {
7773   for (int i = 0; i < list_.length(); i++) {
7774     HeapObject* object = HeapObject::cast(list_[i]);
7775     MapWord first_word = object->map_word();
7776     if (first_word.IsForwardingAddress()) {
7777       list_[i] = first_word.ToForwardingAddress();
7778     }
7779   }
7780 }
7781
7782
7783 // Unforwarded objects in new space are dead and removed from the list.
7784 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7785   if (list_.is_empty()) return;
7786   if (!nested_) {
7787     int write_index = 0;
7788     for (int i = 0; i < list_.length(); i++) {
7789       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7790       if (first_word.IsForwardingAddress()) {
7791         list_[write_index++] = first_word.ToForwardingAddress();
7792       }
7793     }
7794     list_.Rewind(write_index);
7795   } else {
7796     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7797     // objects in the list, just remove dead ones, as to not confuse the
7798     // loop in DeferredFormatStackTrace.
7799     for (int i = 0; i < list_.length(); i++) {
7800       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7801       list_[i] = first_word.IsForwardingAddress()
7802                      ? first_word.ToForwardingAddress()
7803                      : heap->the_hole_value();
7804     }
7805   }
7806 }
7807
7808
7809 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7810   // If formatting the stack trace causes a GC, this method will be
7811   // recursively called.  In that case, skip the recursive call, since
7812   // the loop modifies the list while iterating over it.
7813   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7814   nested_ = true;
7815   HandleScope scope(isolate);
7816   Handle<String> stack_key = isolate->factory()->stack_string();
7817   int write_index = 0;
7818   int budget = kBudgetPerGC;
7819   for (int i = 0; i < list_.length(); i++) {
7820     Object* object = list_[i];
7821     JSFunction* getter_fun;
7822
7823     { DisallowHeapAllocation no_gc;
7824       // Skip possible holes in the list.
7825       if (object->IsTheHole()) continue;
7826       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7827         list_[write_index++] = object;
7828         continue;
7829       }
7830
7831       // Check whether the stack property is backed by the original getter.
7832       LookupResult lookup(isolate);
7833       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7834       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7835       Object* callback = lookup.GetCallbackObject();
7836       if (!callback->IsAccessorPair()) continue;
7837       Object* getter_obj = AccessorPair::cast(callback)->getter();
7838       if (!getter_obj->IsJSFunction()) continue;
7839       getter_fun = JSFunction::cast(getter_obj);
7840       String* key = isolate->heap()->hidden_stack_trace_string();
7841       Object* value = getter_fun->GetHiddenProperty(key);
7842       if (key != value) continue;
7843     }
7844
7845     budget--;
7846     HandleScope scope(isolate);
7847     bool has_exception = false;
7848 #ifdef DEBUG
7849     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7850 #endif
7851     Handle<Object> object_handle(object, isolate);
7852     Handle<Object> getter_handle(getter_fun, isolate);
7853     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7854     ASSERT(*map == HeapObject::cast(*object_handle)->map());
7855     if (has_exception) {
7856       // Hit an exception (most likely a stack overflow).
7857       // Wrap up this pass and retry after another GC.
7858       isolate->clear_pending_exception();
7859       // We use the handle since calling the getter might have caused a GC.
7860       list_[write_index++] = *object_handle;
7861       budget = 0;
7862     }
7863   }
7864   list_.Rewind(write_index);
7865   list_.Trim();
7866   nested_ = false;
7867 }
7868
7869
7870 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7871   for (int i = 0; i < list_.length(); i++) {
7872     HeapObject* object = HeapObject::cast(list_[i]);
7873     if (!Marking::MarkBitFrom(object).Get()) {
7874       list_[i] = heap->the_hole_value();
7875     }
7876   }
7877 }
7878
7879
7880 void ErrorObjectList::TearDown() {
7881   list_.Free();
7882 }
7883
7884
7885 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7886   chunk->set_next_chunk(chunks_queued_for_free_);
7887   chunks_queued_for_free_ = chunk;
7888 }
7889
7890
7891 void Heap::FreeQueuedChunks() {
7892   if (chunks_queued_for_free_ == NULL) return;
7893   MemoryChunk* next;
7894   MemoryChunk* chunk;
7895   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7896     next = chunk->next_chunk();
7897     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7898
7899     if (chunk->owner()->identity() == LO_SPACE) {
7900       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7901       // If FromAnyPointerAddress encounters a slot that belongs to a large
7902       // chunk queued for deletion it will fail to find the chunk because
7903       // it try to perform a search in the list of pages owned by of the large
7904       // object space and queued chunks were detached from that list.
7905       // To work around this we split large chunk into normal kPageSize aligned
7906       // pieces and initialize size, owner and flags field of every piece.
7907       // If FromAnyPointerAddress encounters a slot that belongs to one of
7908       // these smaller pieces it will treat it as a slot on a normal Page.
7909       Address chunk_end = chunk->address() + chunk->size();
7910       MemoryChunk* inner = MemoryChunk::FromAddress(
7911           chunk->address() + Page::kPageSize);
7912       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7913       while (inner <= inner_last) {
7914         // Size of a large chunk is always a multiple of
7915         // OS::AllocateAlignment() so there is always
7916         // enough space for a fake MemoryChunk header.
7917         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7918         // Guard against overflow.
7919         if (area_end < inner->address()) area_end = chunk_end;
7920         inner->SetArea(inner->address(), area_end);
7921         inner->set_size(Page::kPageSize);
7922         inner->set_owner(lo_space());
7923         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7924         inner = MemoryChunk::FromAddress(
7925             inner->address() + Page::kPageSize);
7926       }
7927     }
7928   }
7929   isolate_->heap()->store_buffer()->Compact();
7930   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7931   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7932     next = chunk->next_chunk();
7933     isolate_->memory_allocator()->Free(chunk);
7934   }
7935   chunks_queued_for_free_ = NULL;
7936 }
7937
7938
7939 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7940   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7941   // Tag the page pointer to make it findable in the dump file.
7942   if (compacted) {
7943     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7944   } else {
7945     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7946   }
7947   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7948       reinterpret_cast<Address>(p);
7949   remembered_unmapped_pages_index_++;
7950   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7951 }
7952
7953
7954 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7955   memset(object_counts_, 0, sizeof(object_counts_));
7956   memset(object_sizes_, 0, sizeof(object_sizes_));
7957   if (clear_last_time_stats) {
7958     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7959     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7960   }
7961 }
7962
7963
7964 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7965
7966
7967 void Heap::CheckpointObjectStats() {
7968   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7969   Counters* counters = isolate()->counters();
7970 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7971   counters->count_of_##name()->Increment(                                      \
7972       static_cast<int>(object_counts_[name]));                                 \
7973   counters->count_of_##name()->Decrement(                                      \
7974       static_cast<int>(object_counts_last_time_[name]));                       \
7975   counters->size_of_##name()->Increment(                                       \
7976       static_cast<int>(object_sizes_[name]));                                  \
7977   counters->size_of_##name()->Decrement(                                       \
7978       static_cast<int>(object_sizes_last_time_[name]));
7979   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7980 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7981   int index;
7982 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7983   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7984   counters->count_of_CODE_TYPE_##name()->Increment(       \
7985       static_cast<int>(object_counts_[index]));           \
7986   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7987       static_cast<int>(object_counts_last_time_[index])); \
7988   counters->size_of_CODE_TYPE_##name()->Increment(        \
7989       static_cast<int>(object_sizes_[index]));            \
7990   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7991       static_cast<int>(object_sizes_last_time_[index]));
7992   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7993 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7994 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7995   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7996   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7997       static_cast<int>(object_counts_[index]));           \
7998   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7999       static_cast<int>(object_counts_last_time_[index])); \
8000   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8001       static_cast<int>(object_sizes_[index]));            \
8002   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8003       static_cast<int>(object_sizes_last_time_[index]));
8004   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8005 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8006
8007   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8008   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8009   ClearObjectStats();
8010 }
8011
8012
8013 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8014   if (FLAG_parallel_recompilation) {
8015     heap_->relocation_mutex_->Lock();
8016 #ifdef DEBUG
8017     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8018         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8019 #endif  // DEBUG
8020   }
8021 }
8022
8023 } }  // namespace v8::internal