Merge remote-tracking branch 'ry/v0.8' into master
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if defined(V8_TARGET_ARCH_X64)
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID)
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       lo_space_(NULL),
110       gc_state_(NOT_IN_GC),
111       gc_post_processing_depth_(0),
112       ms_count_(0),
113       gc_count_(0),
114       remembered_unmapped_pages_index_(0),
115       unflattened_strings_length_(0),
116 #ifdef DEBUG
117       allocation_allowed_(true),
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_gen_promotion_limit_(kMinimumPromotionLimit),
123       old_gen_allocation_limit_(kMinimumAllocationLimit),
124       old_gen_limit_factor_(1),
125       size_of_old_gen_at_last_old_space_gc_(0),
126       external_allocation_limit_(0),
127       amount_of_external_allocated_memory_(0),
128       amount_of_external_allocated_memory_at_last_global_gc_(0),
129       old_gen_exhausted_(false),
130       store_buffer_rebuilder_(store_buffer()),
131       hidden_symbol_(NULL),
132       global_gc_prologue_callback_(NULL),
133       global_gc_epilogue_callback_(NULL),
134       gc_safe_size_of_old_object_(NULL),
135       total_regexp_code_generated_(0),
136       tracer_(NULL),
137       young_survivors_after_last_gc_(0),
138       high_survival_rate_period_length_(0),
139       low_survival_rate_period_length_(0),
140       survival_rate_(0),
141       previous_survival_rate_trend_(Heap::STABLE),
142       survival_rate_trend_(Heap::STABLE),
143       max_gc_pause_(0),
144       total_gc_time_ms_(0),
145       max_alive_after_gc_(0),
146       min_in_mutator_(kMaxInt),
147       alive_after_last_gc_(0),
148       last_gc_end_timestamp_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       ms_count_at_last_idle_notification_(0),
157       gc_count_at_last_idle_gc_(0),
158       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
159       promotion_queue_(this),
160       configured_(false),
161       chunks_queued_for_free_(NULL),
162       relocation_mutex_(NULL) {
163   // Allow build-time customization of the max semispace size. Building
164   // V8 with snapshots and a non-default max semispace size is much
165   // easier if you can define it as part of the build environment.
166 #if defined(V8_MAX_SEMISPACE_SIZE)
167   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
168 #endif
169
170   intptr_t max_virtual = OS::MaxVirtualMemory();
171
172   if (max_virtual > 0) {
173     if (code_range_size_ > 0) {
174       // Reserve no more than 1/8 of the memory for the code range.
175       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
176     }
177   }
178
179   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
180   native_contexts_list_ = NULL;
181   mark_compact_collector_.heap_ = this;
182   external_string_table_.heap_ = this;
183   // Put a dummy entry in the remembered pages so we can find the list the
184   // minidump even if there are no real unmapped pages.
185   RememberUnmappedPage(NULL, false);
186
187   ClearObjectStats(true);
188 }
189
190
191 intptr_t Heap::Capacity() {
192   if (!HasBeenSetUp()) return 0;
193
194   return new_space_.Capacity() +
195       old_pointer_space_->Capacity() +
196       old_data_space_->Capacity() +
197       code_space_->Capacity() +
198       map_space_->Capacity() +
199       cell_space_->Capacity();
200 }
201
202
203 intptr_t Heap::CommittedMemory() {
204   if (!HasBeenSetUp()) return 0;
205
206   return new_space_.CommittedMemory() +
207       old_pointer_space_->CommittedMemory() +
208       old_data_space_->CommittedMemory() +
209       code_space_->CommittedMemory() +
210       map_space_->CommittedMemory() +
211       cell_space_->CommittedMemory() +
212       lo_space_->Size();
213 }
214
215
216 size_t Heap::CommittedPhysicalMemory() {
217   if (!HasBeenSetUp()) return 0;
218
219   return new_space_.CommittedPhysicalMemory() +
220       old_pointer_space_->CommittedPhysicalMemory() +
221       old_data_space_->CommittedPhysicalMemory() +
222       code_space_->CommittedPhysicalMemory() +
223       map_space_->CommittedPhysicalMemory() +
224       cell_space_->CommittedPhysicalMemory() +
225       lo_space_->CommittedPhysicalMemory();
226 }
227
228
229 intptr_t Heap::CommittedMemoryExecutable() {
230   if (!HasBeenSetUp()) return 0;
231
232   return isolate()->memory_allocator()->SizeExecutable();
233 }
234
235
236 intptr_t Heap::Available() {
237   if (!HasBeenSetUp()) return 0;
238
239   return new_space_.Available() +
240       old_pointer_space_->Available() +
241       old_data_space_->Available() +
242       code_space_->Available() +
243       map_space_->Available() +
244       cell_space_->Available();
245 }
246
247
248 bool Heap::HasBeenSetUp() {
249   return old_pointer_space_ != NULL &&
250          old_data_space_ != NULL &&
251          code_space_ != NULL &&
252          map_space_ != NULL &&
253          cell_space_ != NULL &&
254          lo_space_ != NULL;
255 }
256
257
258 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
259   if (IntrusiveMarking::IsMarked(object)) {
260     return IntrusiveMarking::SizeOfMarkedObject(object);
261   }
262   return object->SizeFromMap(object->map());
263 }
264
265
266 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
267                                               const char** reason) {
268   // Is global GC requested?
269   if (space != NEW_SPACE) {
270     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
271     *reason = "GC in old space requested";
272     return MARK_COMPACTOR;
273   }
274
275   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
276     *reason = "GC in old space forced by flags";
277     return MARK_COMPACTOR;
278   }
279
280   // Is enough data promoted to justify a global GC?
281   if (OldGenerationPromotionLimitReached()) {
282     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
283     *reason = "promotion limit reached";
284     return MARK_COMPACTOR;
285   }
286
287   // Have allocation in OLD and LO failed?
288   if (old_gen_exhausted_) {
289     isolate_->counters()->
290         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
291     *reason = "old generations exhausted";
292     return MARK_COMPACTOR;
293   }
294
295   // Is there enough space left in OLD to guarantee that a scavenge can
296   // succeed?
297   //
298   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
299   // for object promotion. It counts only the bytes that the memory
300   // allocator has not yet allocated from the OS and assigned to any space,
301   // and does not count available bytes already in the old space or code
302   // space.  Undercounting is safe---we may get an unrequested full GC when
303   // a scavenge would have succeeded.
304   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
305     isolate_->counters()->
306         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
307     *reason = "scavenge might not succeed";
308     return MARK_COMPACTOR;
309   }
310
311   // Default
312   *reason = NULL;
313   return SCAVENGER;
314 }
315
316
317 // TODO(1238405): Combine the infrastructure for --heap-stats and
318 // --log-gc to avoid the complicated preprocessor and flag testing.
319 void Heap::ReportStatisticsBeforeGC() {
320   // Heap::ReportHeapStatistics will also log NewSpace statistics when
321   // compiled --log-gc is set.  The following logic is used to avoid
322   // double logging.
323 #ifdef DEBUG
324   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
325   if (FLAG_heap_stats) {
326     ReportHeapStatistics("Before GC");
327   } else if (FLAG_log_gc) {
328     new_space_.ReportStatistics();
329   }
330   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
331 #else
332   if (FLAG_log_gc) {
333     new_space_.CollectStatistics();
334     new_space_.ReportStatistics();
335     new_space_.ClearHistograms();
336   }
337 #endif  // DEBUG
338 }
339
340
341 void Heap::PrintShortHeapStatistics() {
342   if (!FLAG_trace_gc_verbose) return;
343   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
344                ", available: %6" V8_PTR_PREFIX "d KB\n",
345            isolate_->memory_allocator()->Size() / KB,
346            isolate_->memory_allocator()->Available() / KB);
347   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
348                ", available: %6" V8_PTR_PREFIX "d KB"
349                ", committed: %6" V8_PTR_PREFIX "d KB\n",
350            new_space_.Size() / KB,
351            new_space_.Available() / KB,
352            new_space_.CommittedMemory() / KB);
353   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
354                ", available: %6" V8_PTR_PREFIX "d KB"
355                ", committed: %6" V8_PTR_PREFIX "d KB\n",
356            old_pointer_space_->SizeOfObjects() / KB,
357            old_pointer_space_->Available() / KB,
358            old_pointer_space_->CommittedMemory() / KB);
359   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
360                ", available: %6" V8_PTR_PREFIX "d KB"
361                ", committed: %6" V8_PTR_PREFIX "d KB\n",
362            old_data_space_->SizeOfObjects() / KB,
363            old_data_space_->Available() / KB,
364            old_data_space_->CommittedMemory() / KB);
365   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
366                ", available: %6" V8_PTR_PREFIX "d KB"
367                ", committed: %6" V8_PTR_PREFIX "d KB\n",
368            code_space_->SizeOfObjects() / KB,
369            code_space_->Available() / KB,
370            code_space_->CommittedMemory() / KB);
371   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
372                ", available: %6" V8_PTR_PREFIX "d KB"
373                ", committed: %6" V8_PTR_PREFIX "d KB\n",
374            map_space_->SizeOfObjects() / KB,
375            map_space_->Available() / KB,
376            map_space_->CommittedMemory() / KB);
377   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
378                ", available: %6" V8_PTR_PREFIX "d KB"
379                ", committed: %6" V8_PTR_PREFIX "d KB\n",
380            cell_space_->SizeOfObjects() / KB,
381            cell_space_->Available() / KB,
382            cell_space_->CommittedMemory() / KB);
383   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
384                ", available: %6" V8_PTR_PREFIX "d KB"
385                ", committed: %6" V8_PTR_PREFIX "d KB\n",
386            lo_space_->SizeOfObjects() / KB,
387            lo_space_->Available() / KB,
388            lo_space_->CommittedMemory() / KB);
389   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
390                ", available: %6" V8_PTR_PREFIX "d KB"
391                ", committed: %6" V8_PTR_PREFIX "d KB\n",
392            this->SizeOfObjects() / KB,
393            this->Available() / KB,
394            this->CommittedMemory() / KB);
395   PrintPID("Total time spent in GC  : %d ms\n", total_gc_time_ms_);
396 }
397
398
399 // TODO(1238405): Combine the infrastructure for --heap-stats and
400 // --log-gc to avoid the complicated preprocessor and flag testing.
401 void Heap::ReportStatisticsAfterGC() {
402   // Similar to the before GC, we use some complicated logic to ensure that
403   // NewSpace statistics are logged exactly once when --log-gc is turned on.
404 #if defined(DEBUG)
405   if (FLAG_heap_stats) {
406     new_space_.CollectStatistics();
407     ReportHeapStatistics("After GC");
408   } else if (FLAG_log_gc) {
409     new_space_.ReportStatistics();
410   }
411 #else
412   if (FLAG_log_gc) new_space_.ReportStatistics();
413 #endif  // DEBUG
414 }
415
416
417 void Heap::GarbageCollectionPrologue() {
418   isolate_->transcendental_cache()->Clear();
419   ClearJSFunctionResultCaches();
420   gc_count_++;
421   unflattened_strings_length_ = 0;
422
423   if (FLAG_flush_code && FLAG_flush_code_incrementally) {
424     mark_compact_collector()->EnableCodeFlushing(true);
425   }
426
427 #ifdef VERIFY_HEAP
428   if (FLAG_verify_heap) {
429     Verify();
430   }
431 #endif
432
433 #ifdef DEBUG
434   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
435   allow_allocation(false);
436
437   if (FLAG_gc_verbose) Print();
438
439   ReportStatisticsBeforeGC();
440 #endif  // DEBUG
441
442   LiveObjectList::GCPrologue();
443   store_buffer()->GCPrologue();
444 }
445
446
447 intptr_t Heap::SizeOfObjects() {
448   intptr_t total = 0;
449   AllSpaces spaces;
450   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
451     total += space->SizeOfObjects();
452   }
453   return total;
454 }
455
456
457 void Heap::RepairFreeListsAfterBoot() {
458   PagedSpaces spaces;
459   for (PagedSpace* space = spaces.next();
460        space != NULL;
461        space = spaces.next()) {
462     space->RepairFreeListsAfterBoot();
463   }
464 }
465
466
467 void Heap::GarbageCollectionEpilogue() {
468   store_buffer()->GCEpilogue();
469   LiveObjectList::GCEpilogue();
470
471   // In release mode, we only zap the from space under heap verification.
472   if (Heap::ShouldZapGarbage()) {
473     ZapFromSpace();
474   }
475
476 #ifdef VERIFY_HEAP
477   if (FLAG_verify_heap) {
478     Verify();
479   }
480 #endif
481
482 #ifdef DEBUG
483   allow_allocation(true);
484   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
485   if (FLAG_print_handles) PrintHandles();
486   if (FLAG_gc_verbose) Print();
487   if (FLAG_code_stats) ReportCodeStatistics("After GC");
488 #endif
489
490   isolate_->counters()->alive_after_last_gc()->Set(
491       static_cast<int>(SizeOfObjects()));
492
493   isolate_->counters()->symbol_table_capacity()->Set(
494       symbol_table()->Capacity());
495   isolate_->counters()->number_of_symbols()->Set(
496       symbol_table()->NumberOfElements());
497
498   if (CommittedMemory() > 0) {
499     isolate_->counters()->external_fragmentation_total()->AddSample(
500         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
501
502     isolate_->counters()->heap_fraction_map_space()->AddSample(
503         static_cast<int>(
504             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
505     isolate_->counters()->heap_fraction_cell_space()->AddSample(
506         static_cast<int>(
507             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
508
509     isolate_->counters()->heap_sample_total_committed()->AddSample(
510         static_cast<int>(CommittedMemory() / KB));
511     isolate_->counters()->heap_sample_total_used()->AddSample(
512         static_cast<int>(SizeOfObjects() / KB));
513     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
514         static_cast<int>(map_space()->CommittedMemory() / KB));
515     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
516         static_cast<int>(cell_space()->CommittedMemory() / KB));
517   }
518
519 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
520   isolate_->counters()->space##_bytes_available()->Set(                        \
521       static_cast<int>(space()->Available()));                                 \
522   isolate_->counters()->space##_bytes_committed()->Set(                        \
523       static_cast<int>(space()->CommittedMemory()));                           \
524   isolate_->counters()->space##_bytes_used()->Set(                             \
525       static_cast<int>(space()->SizeOfObjects()));
526 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
527   if (space()->CommittedMemory() > 0) {                                        \
528     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
529         static_cast<int>(100 -                                                 \
530             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
531   }
532 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
533   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
534   UPDATE_FRAGMENTATION_FOR_SPACE(space)
535
536   UPDATE_COUNTERS_FOR_SPACE(new_space)
537   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
538   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
539   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
540   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
541   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
542   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
543 #undef UPDATE_COUNTERS_FOR_SPACE
544 #undef UPDATE_FRAGMENTATION_FOR_SPACE
545 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
546
547 #if defined(DEBUG)
548   ReportStatisticsAfterGC();
549 #endif  // DEBUG
550 #ifdef ENABLE_DEBUGGER_SUPPORT
551   isolate_->debug()->AfterGarbageCollection();
552 #endif  // ENABLE_DEBUGGER_SUPPORT
553 }
554
555
556 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
557   // Since we are ignoring the return value, the exact choice of space does
558   // not matter, so long as we do not specify NEW_SPACE, which would not
559   // cause a full GC.
560   mark_compact_collector_.SetFlags(flags);
561   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
562   mark_compact_collector_.SetFlags(kNoGCFlags);
563 }
564
565
566 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
567   // Since we are ignoring the return value, the exact choice of space does
568   // not matter, so long as we do not specify NEW_SPACE, which would not
569   // cause a full GC.
570   // Major GC would invoke weak handle callbacks on weakly reachable
571   // handles, but won't collect weakly reachable objects until next
572   // major GC.  Therefore if we collect aggressively and weak handle callback
573   // has been invoked, we rerun major GC to release objects which become
574   // garbage.
575   // Note: as weak callbacks can execute arbitrary code, we cannot
576   // hope that eventually there will be no weak callbacks invocations.
577   // Therefore stop recollecting after several attempts.
578   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
579                                      kReduceMemoryFootprintMask);
580   isolate_->compilation_cache()->Clear();
581   const int kMaxNumberOfAttempts = 7;
582   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
583     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
584       break;
585     }
586   }
587   mark_compact_collector()->SetFlags(kNoGCFlags);
588   new_space_.Shrink();
589   UncommitFromSpace();
590   Shrink();
591   incremental_marking()->UncommitMarkingDeque();
592 }
593
594
595 bool Heap::CollectGarbage(AllocationSpace space,
596                           GarbageCollector collector,
597                           const char* gc_reason,
598                           const char* collector_reason) {
599   // The VM is in the GC state until exiting this function.
600   VMState state(isolate_, GC);
601
602 #ifdef DEBUG
603   // Reset the allocation timeout to the GC interval, but make sure to
604   // allow at least a few allocations after a collection. The reason
605   // for this is that we have a lot of allocation sequences and we
606   // assume that a garbage collection will allow the subsequent
607   // allocation attempts to go through.
608   allocation_timeout_ = Max(6, FLAG_gc_interval);
609 #endif
610
611   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
612     if (FLAG_trace_incremental_marking) {
613       PrintF("[IncrementalMarking] Scavenge during marking.\n");
614     }
615   }
616
617   if (collector == MARK_COMPACTOR &&
618       !mark_compact_collector()->abort_incremental_marking() &&
619       !incremental_marking()->IsStopped() &&
620       !incremental_marking()->should_hurry() &&
621       FLAG_incremental_marking_steps) {
622     // Make progress in incremental marking.
623     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
624     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
625                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
626     if (!incremental_marking()->IsComplete()) {
627       if (FLAG_trace_incremental_marking) {
628         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
629       }
630       collector = SCAVENGER;
631       collector_reason = "incremental marking delaying mark-sweep";
632     }
633   }
634
635   bool next_gc_likely_to_collect_more = false;
636
637   { GCTracer tracer(this, gc_reason, collector_reason);
638     GarbageCollectionPrologue();
639     // The GC count was incremented in the prologue.  Tell the tracer about
640     // it.
641     tracer.set_gc_count(gc_count_);
642
643     // Tell the tracer which collector we've selected.
644     tracer.set_collector(collector);
645
646     {
647       HistogramTimerScope histogram_timer_scope(
648           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
649                                    : isolate_->counters()->gc_compactor());
650       next_gc_likely_to_collect_more =
651           PerformGarbageCollection(collector, &tracer);
652     }
653
654     GarbageCollectionEpilogue();
655   }
656
657   // Start incremental marking for the next cycle. The heap snapshot
658   // generator needs incremental marking to stay off after it aborted.
659   if (!mark_compact_collector()->abort_incremental_marking() &&
660       incremental_marking()->IsStopped() &&
661       incremental_marking()->WorthActivating() &&
662       NextGCIsLikelyToBeFull()) {
663     incremental_marking()->Start();
664   }
665
666   return next_gc_likely_to_collect_more;
667 }
668
669
670 void Heap::PerformScavenge() {
671   GCTracer tracer(this, NULL, NULL);
672   if (incremental_marking()->IsStopped()) {
673     PerformGarbageCollection(SCAVENGER, &tracer);
674   } else {
675     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
676   }
677 }
678
679
680 #ifdef VERIFY_HEAP
681 // Helper class for verifying the symbol table.
682 class SymbolTableVerifier : public ObjectVisitor {
683  public:
684   void VisitPointers(Object** start, Object** end) {
685     // Visit all HeapObject pointers in [start, end).
686     for (Object** p = start; p < end; p++) {
687       if ((*p)->IsHeapObject()) {
688         // Check that the symbol is actually a symbol.
689         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
690       }
691     }
692   }
693 };
694
695
696 static void VerifySymbolTable() {
697   SymbolTableVerifier verifier;
698   HEAP->symbol_table()->IterateElements(&verifier);
699 }
700 #endif  // VERIFY_HEAP
701
702
703 static bool AbortIncrementalMarkingAndCollectGarbage(
704     Heap* heap,
705     AllocationSpace space,
706     const char* gc_reason = NULL) {
707   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
708   bool result = heap->CollectGarbage(space, gc_reason);
709   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
710   return result;
711 }
712
713
714 void Heap::ReserveSpace(
715     int *sizes,
716     Address *locations_out) {
717   bool gc_performed = true;
718   int counter = 0;
719   static const int kThreshold = 20;
720   while (gc_performed && counter++ < kThreshold) {
721     gc_performed = false;
722     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
723     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
724       if (sizes[space] != 0) {
725         MaybeObject* allocation;
726         if (space == NEW_SPACE) {
727           allocation = new_space()->AllocateRaw(sizes[space]);
728         } else {
729           allocation = paged_space(space)->AllocateRaw(sizes[space]);
730         }
731         FreeListNode* node;
732         if (!allocation->To<FreeListNode>(&node)) {
733           if (space == NEW_SPACE) {
734             Heap::CollectGarbage(NEW_SPACE,
735                                  "failed to reserve space in the new space");
736           } else {
737             AbortIncrementalMarkingAndCollectGarbage(
738                 this,
739                 static_cast<AllocationSpace>(space),
740                 "failed to reserve space in paged space");
741           }
742           gc_performed = true;
743           break;
744         } else {
745           // Mark with a free list node, in case we have a GC before
746           // deserializing.
747           node->set_size(this, sizes[space]);
748           locations_out[space] = node->address();
749         }
750       }
751     }
752   }
753
754   if (gc_performed) {
755     // Failed to reserve the space after several attempts.
756     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
757   }
758 }
759
760
761 void Heap::EnsureFromSpaceIsCommitted() {
762   if (new_space_.CommitFromSpaceIfNeeded()) return;
763
764   // Committing memory to from space failed.
765   // Try shrinking and try again.
766   Shrink();
767   if (new_space_.CommitFromSpaceIfNeeded()) return;
768
769   // Committing memory to from space failed again.
770   // Memory is exhausted and we will die.
771   V8::FatalProcessOutOfMemory("Committing semi space failed.");
772 }
773
774
775 void Heap::ClearJSFunctionResultCaches() {
776   if (isolate_->bootstrapper()->IsActive()) return;
777
778   Object* context = native_contexts_list_;
779   while (!context->IsUndefined()) {
780     // Get the caches for this context. GC can happen when the context
781     // is not fully initialized, so the caches can be undefined.
782     Object* caches_or_undefined =
783         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
784     if (!caches_or_undefined->IsUndefined()) {
785       FixedArray* caches = FixedArray::cast(caches_or_undefined);
786       // Clear the caches:
787       int length = caches->length();
788       for (int i = 0; i < length; i++) {
789         JSFunctionResultCache::cast(caches->get(i))->Clear();
790       }
791     }
792     // Get the next context:
793     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
794   }
795 }
796
797
798
799 void Heap::ClearNormalizedMapCaches() {
800   if (isolate_->bootstrapper()->IsActive() &&
801       !incremental_marking()->IsMarking()) {
802     return;
803   }
804
805   Object* context = native_contexts_list_;
806   while (!context->IsUndefined()) {
807     // GC can happen when the context is not fully initialized,
808     // so the cache can be undefined.
809     Object* cache =
810         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
811     if (!cache->IsUndefined()) {
812       NormalizedMapCache::cast(cache)->Clear();
813     }
814     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
815   }
816 }
817
818
819 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
820   double survival_rate =
821       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
822       start_new_space_size;
823
824   if (survival_rate > kYoungSurvivalRateHighThreshold) {
825     high_survival_rate_period_length_++;
826   } else {
827     high_survival_rate_period_length_ = 0;
828   }
829
830   if (survival_rate < kYoungSurvivalRateLowThreshold) {
831     low_survival_rate_period_length_++;
832   } else {
833     low_survival_rate_period_length_ = 0;
834   }
835
836   double survival_rate_diff = survival_rate_ - survival_rate;
837
838   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
839     set_survival_rate_trend(DECREASING);
840   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
841     set_survival_rate_trend(INCREASING);
842   } else {
843     set_survival_rate_trend(STABLE);
844   }
845
846   survival_rate_ = survival_rate;
847 }
848
849 bool Heap::PerformGarbageCollection(GarbageCollector collector,
850                                     GCTracer* tracer) {
851   bool next_gc_likely_to_collect_more = false;
852
853   if (collector != SCAVENGER) {
854     PROFILE(isolate_, CodeMovingGCEvent());
855   }
856
857 #ifdef VERIFY_HEAP
858   if (FLAG_verify_heap) {
859     VerifySymbolTable();
860   }
861 #endif
862
863   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
864     ASSERT(!allocation_allowed_);
865     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
866     global_gc_prologue_callback_();
867   }
868
869   GCType gc_type =
870       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
871
872   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
873     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
874       gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
875     }
876   }
877
878   EnsureFromSpaceIsCommitted();
879
880   int start_new_space_size = Heap::new_space()->SizeAsInt();
881
882   if (IsHighSurvivalRate()) {
883     // We speed up the incremental marker if it is running so that it
884     // does not fall behind the rate of promotion, which would cause a
885     // constantly growing old space.
886     incremental_marking()->NotifyOfHighPromotionRate();
887   }
888
889   if (collector == MARK_COMPACTOR) {
890     // Perform mark-sweep with optional compaction.
891     MarkCompact(tracer);
892     sweep_generation_++;
893     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
894         IsStableOrIncreasingSurvivalTrend();
895
896     UpdateSurvivalRateTrend(start_new_space_size);
897
898     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
899
900     if (high_survival_rate_during_scavenges &&
901         IsStableOrIncreasingSurvivalTrend()) {
902       // Stable high survival rates of young objects both during partial and
903       // full collection indicate that mutator is either building or modifying
904       // a structure with a long lifetime.
905       // In this case we aggressively raise old generation memory limits to
906       // postpone subsequent mark-sweep collection and thus trade memory
907       // space for the mutation speed.
908       old_gen_limit_factor_ = 2;
909     } else {
910       old_gen_limit_factor_ = 1;
911     }
912
913     old_gen_promotion_limit_ =
914         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
915     old_gen_allocation_limit_ =
916         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
917
918     old_gen_exhausted_ = false;
919   } else {
920     tracer_ = tracer;
921     Scavenge();
922     tracer_ = NULL;
923
924     UpdateSurvivalRateTrend(start_new_space_size);
925   }
926
927   if (!new_space_high_promotion_mode_active_ &&
928       new_space_.Capacity() == new_space_.MaximumCapacity() &&
929       IsStableOrIncreasingSurvivalTrend() &&
930       IsHighSurvivalRate()) {
931     // Stable high survival rates even though young generation is at
932     // maximum capacity indicates that most objects will be promoted.
933     // To decrease scavenger pauses and final mark-sweep pauses, we
934     // have to limit maximal capacity of the young generation.
935     new_space_high_promotion_mode_active_ = true;
936     if (FLAG_trace_gc) {
937       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
938                new_space_.InitialCapacity() / MB);
939     }
940   } else if (new_space_high_promotion_mode_active_ &&
941       IsStableOrDecreasingSurvivalTrend() &&
942       IsLowSurvivalRate()) {
943     // Decreasing low survival rates might indicate that the above high
944     // promotion mode is over and we should allow the young generation
945     // to grow again.
946     new_space_high_promotion_mode_active_ = false;
947     if (FLAG_trace_gc) {
948       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
949                new_space_.MaximumCapacity() / MB);
950     }
951   }
952
953   if (new_space_high_promotion_mode_active_ &&
954       new_space_.Capacity() > new_space_.InitialCapacity()) {
955     new_space_.Shrink();
956   }
957
958   isolate_->counters()->objs_since_last_young()->Set(0);
959
960   // Callbacks that fire after this point might trigger nested GCs and
961   // restart incremental marking, the assertion can't be moved down.
962   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
963
964   gc_post_processing_depth_++;
965   { DisableAssertNoAllocation allow_allocation;
966     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
967     next_gc_likely_to_collect_more =
968         isolate_->global_handles()->PostGarbageCollectionProcessing(
969             collector, tracer);
970   }
971   gc_post_processing_depth_--;
972
973   // Update relocatables.
974   Relocatable::PostGarbageCollectionProcessing();
975
976   if (collector == MARK_COMPACTOR) {
977     // Register the amount of external allocated memory.
978     amount_of_external_allocated_memory_at_last_global_gc_ =
979         amount_of_external_allocated_memory_;
980   }
981
982   GCCallbackFlags callback_flags = kNoGCCallbackFlags;
983   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
984     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
985       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
986     }
987   }
988
989   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
990     ASSERT(!allocation_allowed_);
991     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
992     global_gc_epilogue_callback_();
993   }
994
995 #ifdef VERIFY_HEAP
996   if (FLAG_verify_heap) {
997     VerifySymbolTable();
998   }
999 #endif
1000
1001   return next_gc_likely_to_collect_more;
1002 }
1003
1004
1005 void Heap::MarkCompact(GCTracer* tracer) {
1006   gc_state_ = MARK_COMPACT;
1007   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1008
1009   mark_compact_collector_.Prepare(tracer);
1010
1011   ms_count_++;
1012   tracer->set_full_gc_count(ms_count_);
1013
1014   MarkCompactPrologue();
1015
1016   mark_compact_collector_.CollectGarbage();
1017
1018   LOG(isolate_, ResourceEvent("markcompact", "end"));
1019
1020   gc_state_ = NOT_IN_GC;
1021
1022   isolate_->counters()->objs_since_last_full()->Set(0);
1023
1024   contexts_disposed_ = 0;
1025
1026   flush_monomorphic_ics_ = false;
1027 }
1028
1029
1030 void Heap::MarkCompactPrologue() {
1031   // At any old GC clear the keyed lookup cache to enable collection of unused
1032   // maps.
1033   isolate_->keyed_lookup_cache()->Clear();
1034   isolate_->context_slot_cache()->Clear();
1035   isolate_->descriptor_lookup_cache()->Clear();
1036   RegExpResultsCache::Clear(string_split_cache());
1037   RegExpResultsCache::Clear(regexp_multiple_cache());
1038
1039   isolate_->compilation_cache()->MarkCompactPrologue();
1040
1041   CompletelyClearInstanceofCache();
1042
1043   FlushNumberStringCache();
1044   if (FLAG_cleanup_code_caches_at_gc) {
1045     polymorphic_code_cache()->set_cache(undefined_value());
1046   }
1047
1048   ClearNormalizedMapCaches();
1049 }
1050
1051
1052 Object* Heap::FindCodeObject(Address a) {
1053   return isolate()->inner_pointer_to_code_cache()->
1054       GcSafeFindCodeForInnerPointer(a);
1055 }
1056
1057
1058 // Helper class for copying HeapObjects
1059 class ScavengeVisitor: public ObjectVisitor {
1060  public:
1061   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1062
1063   void VisitPointer(Object** p) { ScavengePointer(p); }
1064
1065   void VisitPointers(Object** start, Object** end) {
1066     // Copy all HeapObject pointers in [start, end)
1067     for (Object** p = start; p < end; p++) ScavengePointer(p);
1068   }
1069
1070  private:
1071   void ScavengePointer(Object** p) {
1072     Object* object = *p;
1073     if (!heap_->InNewSpace(object)) return;
1074     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1075                          reinterpret_cast<HeapObject*>(object));
1076   }
1077
1078   Heap* heap_;
1079 };
1080
1081
1082 #ifdef VERIFY_HEAP
1083 // Visitor class to verify pointers in code or data space do not point into
1084 // new space.
1085 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1086  public:
1087   void VisitPointers(Object** start, Object**end) {
1088     for (Object** current = start; current < end; current++) {
1089       if ((*current)->IsHeapObject()) {
1090         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1091       }
1092     }
1093   }
1094 };
1095
1096
1097 static void VerifyNonPointerSpacePointers() {
1098   // Verify that there are no pointers to new space in spaces where we
1099   // do not expect them.
1100   VerifyNonPointerSpacePointersVisitor v;
1101   HeapObjectIterator code_it(HEAP->code_space());
1102   for (HeapObject* object = code_it.Next();
1103        object != NULL; object = code_it.Next())
1104     object->Iterate(&v);
1105
1106   // The old data space was normally swept conservatively so that the iterator
1107   // doesn't work, so we normally skip the next bit.
1108   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1109     HeapObjectIterator data_it(HEAP->old_data_space());
1110     for (HeapObject* object = data_it.Next();
1111          object != NULL; object = data_it.Next())
1112       object->Iterate(&v);
1113   }
1114 }
1115 #endif  // VERIFY_HEAP
1116
1117
1118 void Heap::CheckNewSpaceExpansionCriteria() {
1119   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1120       survived_since_last_expansion_ > new_space_.Capacity() &&
1121       !new_space_high_promotion_mode_active_) {
1122     // Grow the size of new space if there is room to grow, enough data
1123     // has survived scavenge since the last expansion and we are not in
1124     // high promotion mode.
1125     new_space_.Grow();
1126     survived_since_last_expansion_ = 0;
1127   }
1128 }
1129
1130
1131 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1132   return heap->InNewSpace(*p) &&
1133       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1134 }
1135
1136
1137 void Heap::ScavengeStoreBufferCallback(
1138     Heap* heap,
1139     MemoryChunk* page,
1140     StoreBufferEvent event) {
1141   heap->store_buffer_rebuilder_.Callback(page, event);
1142 }
1143
1144
1145 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1146   if (event == kStoreBufferStartScanningPagesEvent) {
1147     start_of_current_page_ = NULL;
1148     current_page_ = NULL;
1149   } else if (event == kStoreBufferScanningPageEvent) {
1150     if (current_page_ != NULL) {
1151       // If this page already overflowed the store buffer during this iteration.
1152       if (current_page_->scan_on_scavenge()) {
1153         // Then we should wipe out the entries that have been added for it.
1154         store_buffer_->SetTop(start_of_current_page_);
1155       } else if (store_buffer_->Top() - start_of_current_page_ >=
1156                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1157         // Did we find too many pointers in the previous page?  The heuristic is
1158         // that no page can take more then 1/5 the remaining slots in the store
1159         // buffer.
1160         current_page_->set_scan_on_scavenge(true);
1161         store_buffer_->SetTop(start_of_current_page_);
1162       } else {
1163         // In this case the page we scanned took a reasonable number of slots in
1164         // the store buffer.  It has now been rehabilitated and is no longer
1165         // marked scan_on_scavenge.
1166         ASSERT(!current_page_->scan_on_scavenge());
1167       }
1168     }
1169     start_of_current_page_ = store_buffer_->Top();
1170     current_page_ = page;
1171   } else if (event == kStoreBufferFullEvent) {
1172     // The current page overflowed the store buffer again.  Wipe out its entries
1173     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1174     // several times while scanning.
1175     if (current_page_ == NULL) {
1176       // Store Buffer overflowed while scanning promoted objects.  These are not
1177       // in any particular page, though they are likely to be clustered by the
1178       // allocation routines.
1179       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1180     } else {
1181       // Store Buffer overflowed while scanning a particular old space page for
1182       // pointers to new space.
1183       ASSERT(current_page_ == page);
1184       ASSERT(page != NULL);
1185       current_page_->set_scan_on_scavenge(true);
1186       ASSERT(start_of_current_page_ != store_buffer_->Top());
1187       store_buffer_->SetTop(start_of_current_page_);
1188     }
1189   } else {
1190     UNREACHABLE();
1191   }
1192 }
1193
1194
1195 void PromotionQueue::Initialize() {
1196   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1197   // entries (where each is a pair of intptr_t). This allows us to simplify
1198   // the test fpr when to switch pages.
1199   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1200          == 0);
1201   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1202   front_ = rear_ =
1203       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1204   emergency_stack_ = NULL;
1205   guard_ = false;
1206 }
1207
1208
1209 void PromotionQueue::RelocateQueueHead() {
1210   ASSERT(emergency_stack_ == NULL);
1211
1212   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1213   intptr_t* head_start = rear_;
1214   intptr_t* head_end =
1215       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1216
1217   int entries_count =
1218       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1219
1220   emergency_stack_ = new List<Entry>(2 * entries_count);
1221
1222   while (head_start != head_end) {
1223     int size = static_cast<int>(*(head_start++));
1224     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1225     emergency_stack_->Add(Entry(obj, size));
1226   }
1227   rear_ = head_end;
1228 }
1229
1230
1231 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1232  public:
1233   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1234
1235   virtual Object* RetainAs(Object* object) {
1236     if (!heap_->InFromSpace(object)) {
1237       return object;
1238     }
1239
1240     MapWord map_word = HeapObject::cast(object)->map_word();
1241     if (map_word.IsForwardingAddress()) {
1242       return map_word.ToForwardingAddress();
1243     }
1244     return NULL;
1245   }
1246
1247  private:
1248   Heap* heap_;
1249 };
1250
1251
1252 void Heap::Scavenge() {
1253   RelocationLock relocation_lock(this);
1254
1255 #ifdef VERIFY_HEAP
1256   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1257 #endif
1258
1259   gc_state_ = SCAVENGE;
1260
1261   // Implements Cheney's copying algorithm
1262   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1263
1264   // Clear descriptor cache.
1265   isolate_->descriptor_lookup_cache()->Clear();
1266
1267   // Used for updating survived_since_last_expansion_ at function end.
1268   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1269
1270   CheckNewSpaceExpansionCriteria();
1271
1272   SelectScavengingVisitorsTable();
1273
1274   incremental_marking()->PrepareForScavenge();
1275
1276   AdvanceSweepers(static_cast<int>(new_space_.Size()));
1277
1278   // Flip the semispaces.  After flipping, to space is empty, from space has
1279   // live objects.
1280   new_space_.Flip();
1281   new_space_.ResetAllocationInfo();
1282
1283   // We need to sweep newly copied objects which can be either in the
1284   // to space or promoted to the old generation.  For to-space
1285   // objects, we treat the bottom of the to space as a queue.  Newly
1286   // copied and unswept objects lie between a 'front' mark and the
1287   // allocation pointer.
1288   //
1289   // Promoted objects can go into various old-generation spaces, and
1290   // can be allocated internally in the spaces (from the free list).
1291   // We treat the top of the to space as a queue of addresses of
1292   // promoted objects.  The addresses of newly promoted and unswept
1293   // objects lie between a 'front' mark and a 'rear' mark that is
1294   // updated as a side effect of promoting an object.
1295   //
1296   // There is guaranteed to be enough room at the top of the to space
1297   // for the addresses of promoted objects: every object promoted
1298   // frees up its size in bytes from the top of the new space, and
1299   // objects are at least one pointer in size.
1300   Address new_space_front = new_space_.ToSpaceStart();
1301   promotion_queue_.Initialize();
1302
1303 #ifdef DEBUG
1304   store_buffer()->Clean();
1305 #endif
1306
1307   ScavengeVisitor scavenge_visitor(this);
1308   // Copy roots.
1309   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1310
1311   // Copy objects reachable from the old generation.
1312   {
1313     StoreBufferRebuildScope scope(this,
1314                                   store_buffer(),
1315                                   &ScavengeStoreBufferCallback);
1316     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1317   }
1318
1319   // Copy objects reachable from cells by scavenging cell values directly.
1320   HeapObjectIterator cell_iterator(cell_space_);
1321   for (HeapObject* heap_object = cell_iterator.Next();
1322        heap_object != NULL;
1323        heap_object = cell_iterator.Next()) {
1324     if (heap_object->IsJSGlobalPropertyCell()) {
1325       JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1326       Address value_address = cell->ValueAddress();
1327       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1328     }
1329   }
1330
1331   // Copy objects reachable from the code flushing candidates list.
1332   MarkCompactCollector* collector = mark_compact_collector();
1333   if (collector->is_code_flushing_enabled()) {
1334     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1335   }
1336
1337   // Scavenge object reachable from the native contexts list directly.
1338   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1339
1340   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1341
1342   while (isolate()->global_handles()->IterateObjectGroups(
1343       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1344     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1345   }
1346   isolate()->global_handles()->RemoveObjectGroups();
1347
1348   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1349       &IsUnscavengedHeapObject);
1350   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1351       &scavenge_visitor);
1352   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1353
1354   UpdateNewSpaceReferencesInExternalStringTable(
1355       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1356
1357   promotion_queue_.Destroy();
1358
1359   LiveObjectList::UpdateReferencesForScavengeGC();
1360   if (!FLAG_watch_ic_patching) {
1361     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1362   }
1363   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1364
1365   ScavengeWeakObjectRetainer weak_object_retainer(this);
1366   ProcessWeakReferences(&weak_object_retainer);
1367
1368   ASSERT(new_space_front == new_space_.top());
1369
1370   // Set age mark.
1371   new_space_.set_age_mark(new_space_.top());
1372
1373   new_space_.LowerInlineAllocationLimit(
1374       new_space_.inline_allocation_limit_step());
1375
1376   // Update how much has survived scavenge.
1377   IncrementYoungSurvivorsCounter(static_cast<int>(
1378       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1379
1380   LOG(isolate_, ResourceEvent("scavenge", "end"));
1381
1382   gc_state_ = NOT_IN_GC;
1383
1384   scavenges_since_last_idle_round_++;
1385 }
1386
1387
1388 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1389                                                                 Object** p) {
1390   MapWord first_word = HeapObject::cast(*p)->map_word();
1391
1392   if (!first_word.IsForwardingAddress()) {
1393     // Unreachable external string can be finalized.
1394     heap->FinalizeExternalString(String::cast(*p));
1395     return NULL;
1396   }
1397
1398   // String is still reachable.
1399   return String::cast(first_word.ToForwardingAddress());
1400 }
1401
1402
1403 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1404     ExternalStringTableUpdaterCallback updater_func) {
1405 #ifdef VERIFY_HEAP
1406   if (FLAG_verify_heap) {
1407     external_string_table_.Verify();
1408   }
1409 #endif
1410
1411   if (external_string_table_.new_space_strings_.is_empty()) return;
1412
1413   Object** start = &external_string_table_.new_space_strings_[0];
1414   Object** end = start + external_string_table_.new_space_strings_.length();
1415   Object** last = start;
1416
1417   for (Object** p = start; p < end; ++p) {
1418     ASSERT(InFromSpace(*p));
1419     String* target = updater_func(this, p);
1420
1421     if (target == NULL) continue;
1422
1423     ASSERT(target->IsExternalString());
1424
1425     if (InNewSpace(target)) {
1426       // String is still in new space.  Update the table entry.
1427       *last = target;
1428       ++last;
1429     } else {
1430       // String got promoted.  Move it to the old string list.
1431       external_string_table_.AddOldString(target);
1432     }
1433   }
1434
1435   ASSERT(last <= end);
1436   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1437 }
1438
1439
1440 void Heap::UpdateReferencesInExternalStringTable(
1441     ExternalStringTableUpdaterCallback updater_func) {
1442
1443   // Update old space string references.
1444   if (external_string_table_.old_space_strings_.length() > 0) {
1445     Object** start = &external_string_table_.old_space_strings_[0];
1446     Object** end = start + external_string_table_.old_space_strings_.length();
1447     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1448   }
1449
1450   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1451 }
1452
1453
1454 static Object* ProcessFunctionWeakReferences(Heap* heap,
1455                                              Object* function,
1456                                              WeakObjectRetainer* retainer,
1457                                              bool record_slots) {
1458   Object* undefined = heap->undefined_value();
1459   Object* head = undefined;
1460   JSFunction* tail = NULL;
1461   Object* candidate = function;
1462   while (candidate != undefined) {
1463     // Check whether to keep the candidate in the list.
1464     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1465     Object* retain = retainer->RetainAs(candidate);
1466     if (retain != NULL) {
1467       if (head == undefined) {
1468         // First element in the list.
1469         head = retain;
1470       } else {
1471         // Subsequent elements in the list.
1472         ASSERT(tail != NULL);
1473         tail->set_next_function_link(retain);
1474         if (record_slots) {
1475           Object** next_function =
1476               HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1477           heap->mark_compact_collector()->RecordSlot(
1478               next_function, next_function, retain);
1479         }
1480       }
1481       // Retained function is new tail.
1482       candidate_function = reinterpret_cast<JSFunction*>(retain);
1483       tail = candidate_function;
1484
1485       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1486
1487       if (retain == undefined) break;
1488     }
1489
1490     // Move to next element in the list.
1491     candidate = candidate_function->next_function_link();
1492   }
1493
1494   // Terminate the list if there is one or more elements.
1495   if (tail != NULL) {
1496     tail->set_next_function_link(undefined);
1497   }
1498
1499   return head;
1500 }
1501
1502
1503 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1504   Object* undefined = undefined_value();
1505   Object* head = undefined;
1506   Context* tail = NULL;
1507   Object* candidate = native_contexts_list_;
1508
1509   // We don't record weak slots during marking or scavenges.
1510   // Instead we do it once when we complete mark-compact cycle.
1511   // Note that write barrier has no effect if we are already in the middle of
1512   // compacting mark-sweep cycle and we have to record slots manually.
1513   bool record_slots =
1514       gc_state() == MARK_COMPACT &&
1515       mark_compact_collector()->is_compacting();
1516
1517   while (candidate != undefined) {
1518     // Check whether to keep the candidate in the list.
1519     Context* candidate_context = reinterpret_cast<Context*>(candidate);
1520     Object* retain = retainer->RetainAs(candidate);
1521     if (retain != NULL) {
1522       if (head == undefined) {
1523         // First element in the list.
1524         head = retain;
1525       } else {
1526         // Subsequent elements in the list.
1527         ASSERT(tail != NULL);
1528         tail->set_unchecked(this,
1529                             Context::NEXT_CONTEXT_LINK,
1530                             retain,
1531                             UPDATE_WRITE_BARRIER);
1532
1533         if (record_slots) {
1534           Object** next_context =
1535               HeapObject::RawField(
1536                   tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1537           mark_compact_collector()->RecordSlot(
1538               next_context, next_context, retain);
1539         }
1540       }
1541       // Retained context is new tail.
1542       candidate_context = reinterpret_cast<Context*>(retain);
1543       tail = candidate_context;
1544
1545       if (retain == undefined) break;
1546
1547       // Process the weak list of optimized functions for the context.
1548       Object* function_list_head =
1549           ProcessFunctionWeakReferences(
1550               this,
1551               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1552               retainer,
1553               record_slots);
1554       candidate_context->set_unchecked(this,
1555                                        Context::OPTIMIZED_FUNCTIONS_LIST,
1556                                        function_list_head,
1557                                        UPDATE_WRITE_BARRIER);
1558       if (record_slots) {
1559         Object** optimized_functions =
1560             HeapObject::RawField(
1561                 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1562         mark_compact_collector()->RecordSlot(
1563             optimized_functions, optimized_functions, function_list_head);
1564       }
1565     }
1566
1567     // Move to next element in the list.
1568     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1569   }
1570
1571   // Terminate the list if there is one or more elements.
1572   if (tail != NULL) {
1573     tail->set_unchecked(this,
1574                         Context::NEXT_CONTEXT_LINK,
1575                         Heap::undefined_value(),
1576                         UPDATE_WRITE_BARRIER);
1577   }
1578
1579   // Update the head of the list of contexts.
1580   native_contexts_list_ = head;
1581 }
1582
1583
1584 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1585   AssertNoAllocation no_allocation;
1586
1587   // Both the external string table and the symbol table may contain
1588   // external strings, but neither lists them exhaustively, nor is the
1589   // intersection set empty.  Therefore we iterate over the external string
1590   // table first, ignoring symbols, and then over the symbol table.
1591
1592   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1593    public:
1594     explicit ExternalStringTableVisitorAdapter(
1595         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1596     virtual void VisitPointers(Object** start, Object** end) {
1597       for (Object** p = start; p < end; p++) {
1598         // Visit non-symbol external strings,
1599         // since symbols are listed in the symbol table.
1600         if (!(*p)->IsSymbol()) {
1601           ASSERT((*p)->IsExternalString());
1602           visitor_->VisitExternalString(Utils::ToLocal(
1603               Handle<String>(String::cast(*p))));
1604         }
1605       }
1606     }
1607    private:
1608     v8::ExternalResourceVisitor* visitor_;
1609   } external_string_table_visitor(visitor);
1610
1611   external_string_table_.Iterate(&external_string_table_visitor);
1612
1613   class SymbolTableVisitorAdapter : public ObjectVisitor {
1614    public:
1615     explicit SymbolTableVisitorAdapter(
1616         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1617     virtual void VisitPointers(Object** start, Object** end) {
1618       for (Object** p = start; p < end; p++) {
1619         if ((*p)->IsExternalString()) {
1620           ASSERT((*p)->IsSymbol());
1621           visitor_->VisitExternalString(Utils::ToLocal(
1622               Handle<String>(String::cast(*p))));
1623         }
1624       }
1625     }
1626    private:
1627     v8::ExternalResourceVisitor* visitor_;
1628   } symbol_table_visitor(visitor);
1629
1630   symbol_table()->IterateElements(&symbol_table_visitor);
1631 }
1632
1633
1634 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1635  public:
1636   static inline void VisitPointer(Heap* heap, Object** p) {
1637     Object* object = *p;
1638     if (!heap->InNewSpace(object)) return;
1639     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1640                          reinterpret_cast<HeapObject*>(object));
1641   }
1642 };
1643
1644
1645 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1646                          Address new_space_front) {
1647   do {
1648     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1649     // The addresses new_space_front and new_space_.top() define a
1650     // queue of unprocessed copied objects.  Process them until the
1651     // queue is empty.
1652     while (new_space_front != new_space_.top()) {
1653       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1654         HeapObject* object = HeapObject::FromAddress(new_space_front);
1655         new_space_front +=
1656           NewSpaceScavenger::IterateBody(object->map(), object);
1657       } else {
1658         new_space_front =
1659             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1660       }
1661     }
1662
1663     // Promote and process all the to-be-promoted objects.
1664     {
1665       StoreBufferRebuildScope scope(this,
1666                                     store_buffer(),
1667                                     &ScavengeStoreBufferCallback);
1668       while (!promotion_queue()->is_empty()) {
1669         HeapObject* target;
1670         int size;
1671         promotion_queue()->remove(&target, &size);
1672
1673         // Promoted object might be already partially visited
1674         // during old space pointer iteration. Thus we search specificly
1675         // for pointers to from semispace instead of looking for pointers
1676         // to new space.
1677         ASSERT(!target->IsMap());
1678         IterateAndMarkPointersToFromSpace(target->address(),
1679                                           target->address() + size,
1680                                           &ScavengeObject);
1681       }
1682     }
1683
1684     // Take another spin if there are now unswept objects in new space
1685     // (there are currently no more unswept promoted objects).
1686   } while (new_space_front != new_space_.top());
1687
1688   return new_space_front;
1689 }
1690
1691
1692 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1693
1694
1695 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1696                                               HeapObject* object,
1697                                               int size));
1698
1699 static HeapObject* EnsureDoubleAligned(Heap* heap,
1700                                        HeapObject* object,
1701                                        int size) {
1702   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1703     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1704     return HeapObject::FromAddress(object->address() + kPointerSize);
1705   } else {
1706     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1707                                kPointerSize);
1708     return object;
1709   }
1710 }
1711
1712
1713 enum LoggingAndProfiling {
1714   LOGGING_AND_PROFILING_ENABLED,
1715   LOGGING_AND_PROFILING_DISABLED
1716 };
1717
1718
1719 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1720
1721
1722 template<MarksHandling marks_handling,
1723          LoggingAndProfiling logging_and_profiling_mode>
1724 class ScavengingVisitor : public StaticVisitorBase {
1725  public:
1726   static void Initialize() {
1727     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1728     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1729     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1730     table_.Register(kVisitByteArray, &EvacuateByteArray);
1731     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1732     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1733
1734     table_.Register(kVisitNativeContext,
1735                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1736                         template VisitSpecialized<Context::kSize>);
1737
1738     table_.Register(kVisitConsString,
1739                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1740                         template VisitSpecialized<ConsString::kSize>);
1741
1742     table_.Register(kVisitSlicedString,
1743                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1744                         template VisitSpecialized<SlicedString::kSize>);
1745
1746     table_.Register(kVisitSharedFunctionInfo,
1747                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1748                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1749
1750     table_.Register(kVisitJSWeakMap,
1751                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1752                     Visit);
1753
1754     table_.Register(kVisitJSRegExp,
1755                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1756                     Visit);
1757
1758     if (marks_handling == IGNORE_MARKS) {
1759       table_.Register(kVisitJSFunction,
1760                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1761                           template VisitSpecialized<JSFunction::kSize>);
1762     } else {
1763       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1764     }
1765
1766     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1767                                    kVisitDataObject,
1768                                    kVisitDataObjectGeneric>();
1769
1770     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1771                                    kVisitJSObject,
1772                                    kVisitJSObjectGeneric>();
1773
1774     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1775                                    kVisitStruct,
1776                                    kVisitStructGeneric>();
1777   }
1778
1779   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1780     return &table_;
1781   }
1782
1783  private:
1784   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1785   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1786
1787   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1788     bool should_record = false;
1789 #ifdef DEBUG
1790     should_record = FLAG_heap_stats;
1791 #endif
1792     should_record = should_record || FLAG_log_gc;
1793     if (should_record) {
1794       if (heap->new_space()->Contains(obj)) {
1795         heap->new_space()->RecordAllocation(obj);
1796       } else {
1797         heap->new_space()->RecordPromotion(obj);
1798       }
1799     }
1800   }
1801
1802   // Helper function used by CopyObject to copy a source object to an
1803   // allocated target object and update the forwarding pointer in the source
1804   // object.  Returns the target object.
1805   INLINE(static void MigrateObject(Heap* heap,
1806                                    HeapObject* source,
1807                                    HeapObject* target,
1808                                    int size)) {
1809     // Copy the content of source to target.
1810     heap->CopyBlock(target->address(), source->address(), size);
1811
1812     // Set the forwarding address.
1813     source->set_map_word(MapWord::FromForwardingAddress(target));
1814
1815     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1816       // Update NewSpace stats if necessary.
1817       RecordCopiedObject(heap, target);
1818       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1819       Isolate* isolate = heap->isolate();
1820       if (isolate->logger()->is_logging_code_events() ||
1821           CpuProfiler::is_profiling(isolate)) {
1822         if (target->IsSharedFunctionInfo()) {
1823           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1824               source->address(), target->address()));
1825         }
1826       }
1827     }
1828
1829     if (marks_handling == TRANSFER_MARKS) {
1830       if (Marking::TransferColor(source, target)) {
1831         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1832       }
1833     }
1834   }
1835
1836
1837   template<ObjectContents object_contents,
1838            SizeRestriction size_restriction,
1839            int alignment>
1840   static inline void EvacuateObject(Map* map,
1841                                     HeapObject** slot,
1842                                     HeapObject* object,
1843                                     int object_size) {
1844     SLOW_ASSERT((size_restriction != SMALL) ||
1845                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1846     SLOW_ASSERT(object->Size() == object_size);
1847
1848     int allocation_size = object_size;
1849     if (alignment != kObjectAlignment) {
1850       ASSERT(alignment == kDoubleAlignment);
1851       allocation_size += kPointerSize;
1852     }
1853
1854     Heap* heap = map->GetHeap();
1855     if (heap->ShouldBePromoted(object->address(), object_size)) {
1856       MaybeObject* maybe_result;
1857
1858       if ((size_restriction != SMALL) &&
1859           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1860         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1861                                                      NOT_EXECUTABLE);
1862       } else {
1863         if (object_contents == DATA_OBJECT) {
1864           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1865         } else {
1866           maybe_result =
1867               heap->old_pointer_space()->AllocateRaw(allocation_size);
1868         }
1869       }
1870
1871       Object* result = NULL;  // Initialization to please compiler.
1872       if (maybe_result->ToObject(&result)) {
1873         HeapObject* target = HeapObject::cast(result);
1874
1875         if (alignment != kObjectAlignment) {
1876           target = EnsureDoubleAligned(heap, target, allocation_size);
1877         }
1878
1879         // Order is important: slot might be inside of the target if target
1880         // was allocated over a dead object and slot comes from the store
1881         // buffer.
1882         *slot = target;
1883         MigrateObject(heap, object, target, object_size);
1884
1885         if (object_contents == POINTER_OBJECT) {
1886           if (map->instance_type() == JS_FUNCTION_TYPE) {
1887             heap->promotion_queue()->insert(
1888                 target, JSFunction::kNonWeakFieldsEndOffset);
1889           } else {
1890             heap->promotion_queue()->insert(target, object_size);
1891           }
1892         }
1893
1894         heap->tracer()->increment_promoted_objects_size(object_size);
1895         return;
1896       }
1897     }
1898     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1899     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1900     Object* result = allocation->ToObjectUnchecked();
1901     HeapObject* target = HeapObject::cast(result);
1902
1903     if (alignment != kObjectAlignment) {
1904       target = EnsureDoubleAligned(heap, target, allocation_size);
1905     }
1906
1907     // Order is important: slot might be inside of the target if target
1908     // was allocated over a dead object and slot comes from the store
1909     // buffer.
1910     *slot = target;
1911     MigrateObject(heap, object, target, object_size);
1912     return;
1913   }
1914
1915
1916   static inline void EvacuateJSFunction(Map* map,
1917                                         HeapObject** slot,
1918                                         HeapObject* object) {
1919     ObjectEvacuationStrategy<POINTER_OBJECT>::
1920         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1921
1922     HeapObject* target = *slot;
1923     MarkBit mark_bit = Marking::MarkBitFrom(target);
1924     if (Marking::IsBlack(mark_bit)) {
1925       // This object is black and it might not be rescanned by marker.
1926       // We should explicitly record code entry slot for compaction because
1927       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1928       // miss it as it is not HeapObject-tagged.
1929       Address code_entry_slot =
1930           target->address() + JSFunction::kCodeEntryOffset;
1931       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1932       map->GetHeap()->mark_compact_collector()->
1933           RecordCodeEntrySlot(code_entry_slot, code);
1934     }
1935   }
1936
1937
1938   static inline void EvacuateFixedArray(Map* map,
1939                                         HeapObject** slot,
1940                                         HeapObject* object) {
1941     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1942     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1943                                                  slot,
1944                                                  object,
1945                                                  object_size);
1946   }
1947
1948
1949   static inline void EvacuateFixedDoubleArray(Map* map,
1950                                               HeapObject** slot,
1951                                               HeapObject* object) {
1952     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1953     int object_size = FixedDoubleArray::SizeFor(length);
1954     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1955         map,
1956         slot,
1957         object,
1958         object_size);
1959   }
1960
1961
1962   static inline void EvacuateByteArray(Map* map,
1963                                        HeapObject** slot,
1964                                        HeapObject* object) {
1965     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1966     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1967         map, slot, object, object_size);
1968   }
1969
1970
1971   static inline void EvacuateSeqOneByteString(Map* map,
1972                                             HeapObject** slot,
1973                                             HeapObject* object) {
1974     int object_size = SeqOneByteString::cast(object)->
1975         SeqOneByteStringSize(map->instance_type());
1976     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1977         map, slot, object, object_size);
1978   }
1979
1980
1981   static inline void EvacuateSeqTwoByteString(Map* map,
1982                                               HeapObject** slot,
1983                                               HeapObject* object) {
1984     int object_size = SeqTwoByteString::cast(object)->
1985         SeqTwoByteStringSize(map->instance_type());
1986     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1987         map, slot, object, object_size);
1988   }
1989
1990
1991   static inline bool IsShortcutCandidate(int type) {
1992     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1993   }
1994
1995   static inline void EvacuateShortcutCandidate(Map* map,
1996                                                HeapObject** slot,
1997                                                HeapObject* object) {
1998     ASSERT(IsShortcutCandidate(map->instance_type()));
1999
2000     Heap* heap = map->GetHeap();
2001
2002     if (marks_handling == IGNORE_MARKS &&
2003         ConsString::cast(object)->unchecked_second() ==
2004         heap->empty_string()) {
2005       HeapObject* first =
2006           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2007
2008       *slot = first;
2009
2010       if (!heap->InNewSpace(first)) {
2011         object->set_map_word(MapWord::FromForwardingAddress(first));
2012         return;
2013       }
2014
2015       MapWord first_word = first->map_word();
2016       if (first_word.IsForwardingAddress()) {
2017         HeapObject* target = first_word.ToForwardingAddress();
2018
2019         *slot = target;
2020         object->set_map_word(MapWord::FromForwardingAddress(target));
2021         return;
2022       }
2023
2024       heap->DoScavengeObject(first->map(), slot, first);
2025       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2026       return;
2027     }
2028
2029     int object_size = ConsString::kSize;
2030     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2031         map, slot, object, object_size);
2032   }
2033
2034   template<ObjectContents object_contents>
2035   class ObjectEvacuationStrategy {
2036    public:
2037     template<int object_size>
2038     static inline void VisitSpecialized(Map* map,
2039                                         HeapObject** slot,
2040                                         HeapObject* object) {
2041       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2042           map, slot, object, object_size);
2043     }
2044
2045     static inline void Visit(Map* map,
2046                              HeapObject** slot,
2047                              HeapObject* object) {
2048       int object_size = map->instance_size();
2049       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2050           map, slot, object, object_size);
2051     }
2052   };
2053
2054   static VisitorDispatchTable<ScavengingCallback> table_;
2055 };
2056
2057
2058 template<MarksHandling marks_handling,
2059          LoggingAndProfiling logging_and_profiling_mode>
2060 VisitorDispatchTable<ScavengingCallback>
2061     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2062
2063
2064 static void InitializeScavengingVisitorsTables() {
2065   ScavengingVisitor<TRANSFER_MARKS,
2066                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2067   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2068   ScavengingVisitor<TRANSFER_MARKS,
2069                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2070   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2071 }
2072
2073
2074 void Heap::SelectScavengingVisitorsTable() {
2075   bool logging_and_profiling =
2076       isolate()->logger()->is_logging() ||
2077       CpuProfiler::is_profiling(isolate()) ||
2078       (isolate()->heap_profiler() != NULL &&
2079        isolate()->heap_profiler()->is_profiling());
2080
2081   if (!incremental_marking()->IsMarking()) {
2082     if (!logging_and_profiling) {
2083       scavenging_visitors_table_.CopyFrom(
2084           ScavengingVisitor<IGNORE_MARKS,
2085                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2086     } else {
2087       scavenging_visitors_table_.CopyFrom(
2088           ScavengingVisitor<IGNORE_MARKS,
2089                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2090     }
2091   } else {
2092     if (!logging_and_profiling) {
2093       scavenging_visitors_table_.CopyFrom(
2094           ScavengingVisitor<TRANSFER_MARKS,
2095                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2096     } else {
2097       scavenging_visitors_table_.CopyFrom(
2098           ScavengingVisitor<TRANSFER_MARKS,
2099                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2100     }
2101
2102     if (incremental_marking()->IsCompacting()) {
2103       // When compacting forbid short-circuiting of cons-strings.
2104       // Scavenging code relies on the fact that new space object
2105       // can't be evacuated into evacuation candidate but
2106       // short-circuiting violates this assumption.
2107       scavenging_visitors_table_.Register(
2108           StaticVisitorBase::kVisitShortcutCandidate,
2109           scavenging_visitors_table_.GetVisitorById(
2110               StaticVisitorBase::kVisitConsString));
2111     }
2112   }
2113 }
2114
2115
2116 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2117   SLOW_ASSERT(HEAP->InFromSpace(object));
2118   MapWord first_word = object->map_word();
2119   SLOW_ASSERT(!first_word.IsForwardingAddress());
2120   Map* map = first_word.ToMap();
2121   map->GetHeap()->DoScavengeObject(map, p, object);
2122 }
2123
2124
2125 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2126                                       int instance_size) {
2127   Object* result;
2128   MaybeObject* maybe_result = AllocateRawMap();
2129   if (!maybe_result->ToObject(&result)) return maybe_result;
2130
2131   // Map::cast cannot be used due to uninitialized map field.
2132   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2133   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2134   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2135   reinterpret_cast<Map*>(result)->set_visitor_id(
2136         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2137   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2138   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2139   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2140   reinterpret_cast<Map*>(result)->set_bit_field(0);
2141   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2142   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2143                    Map::OwnsDescriptors::encode(true);
2144   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2145   return result;
2146 }
2147
2148
2149 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2150                                int instance_size,
2151                                ElementsKind elements_kind) {
2152   Object* result;
2153   MaybeObject* maybe_result = AllocateRawMap();
2154   if (!maybe_result->To(&result)) return maybe_result;
2155
2156   Map* map = reinterpret_cast<Map*>(result);
2157   map->set_map_no_write_barrier(meta_map());
2158   map->set_instance_type(instance_type);
2159   map->set_visitor_id(
2160       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2161   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2162   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2163   map->set_instance_size(instance_size);
2164   map->set_inobject_properties(0);
2165   map->set_pre_allocated_property_fields(0);
2166   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2167   map->init_back_pointer(undefined_value());
2168   map->set_unused_property_fields(0);
2169   map->set_instance_descriptors(empty_descriptor_array());
2170   map->set_bit_field(0);
2171   map->set_bit_field2(1 << Map::kIsExtensible);
2172   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2173                    Map::OwnsDescriptors::encode(true);
2174   map->set_bit_field3(bit_field3);
2175   map->set_elements_kind(elements_kind);
2176
2177   return map;
2178 }
2179
2180
2181 MaybeObject* Heap::AllocateCodeCache() {
2182   CodeCache* code_cache;
2183   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2184     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2185   }
2186   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2187   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2188   return code_cache;
2189 }
2190
2191
2192 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2193   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2194 }
2195
2196
2197 MaybeObject* Heap::AllocateAccessorPair() {
2198   AccessorPair* accessors;
2199   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2200     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2201   }
2202   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2203   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2204   return accessors;
2205 }
2206
2207
2208 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2209   TypeFeedbackInfo* info;
2210   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2211     if (!maybe_info->To(&info)) return maybe_info;
2212   }
2213   info->initialize_storage();
2214   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2215                                 SKIP_WRITE_BARRIER);
2216   return info;
2217 }
2218
2219
2220 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2221   AliasedArgumentsEntry* entry;
2222   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2223     if (!maybe_entry->To(&entry)) return maybe_entry;
2224   }
2225   entry->set_aliased_context_slot(aliased_context_slot);
2226   return entry;
2227 }
2228
2229
2230 const Heap::StringTypeTable Heap::string_type_table[] = {
2231 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2232   {type, size, k##camel_name##MapRootIndex},
2233   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2234 #undef STRING_TYPE_ELEMENT
2235 };
2236
2237
2238 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
2239 #define CONSTANT_SYMBOL_ELEMENT(name, contents)                                \
2240   {contents, k##name##RootIndex},
2241   SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
2242 #undef CONSTANT_SYMBOL_ELEMENT
2243 };
2244
2245
2246 const Heap::StructTable Heap::struct_table[] = {
2247 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2248   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2249   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2250 #undef STRUCT_TABLE_ELEMENT
2251 };
2252
2253
2254 bool Heap::CreateInitialMaps() {
2255   Object* obj;
2256   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2257     if (!maybe_obj->ToObject(&obj)) return false;
2258   }
2259   // Map::cast cannot be used due to uninitialized map field.
2260   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2261   set_meta_map(new_meta_map);
2262   new_meta_map->set_map(new_meta_map);
2263
2264   { MaybeObject* maybe_obj =
2265         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2266     if (!maybe_obj->ToObject(&obj)) return false;
2267   }
2268   set_fixed_array_map(Map::cast(obj));
2269
2270   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2271     if (!maybe_obj->ToObject(&obj)) return false;
2272   }
2273   set_oddball_map(Map::cast(obj));
2274
2275   // Allocate the empty array.
2276   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2277     if (!maybe_obj->ToObject(&obj)) return false;
2278   }
2279   set_empty_fixed_array(FixedArray::cast(obj));
2280
2281   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2282     if (!maybe_obj->ToObject(&obj)) return false;
2283   }
2284   set_null_value(Oddball::cast(obj));
2285   Oddball::cast(obj)->set_kind(Oddball::kNull);
2286
2287   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2288     if (!maybe_obj->ToObject(&obj)) return false;
2289   }
2290   set_undefined_value(Oddball::cast(obj));
2291   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2292   ASSERT(!InNewSpace(undefined_value()));
2293
2294   // Allocate the empty descriptor array.
2295   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2296     if (!maybe_obj->ToObject(&obj)) return false;
2297   }
2298   set_empty_descriptor_array(DescriptorArray::cast(obj));
2299
2300   // Fix the instance_descriptors for the existing maps.
2301   meta_map()->set_code_cache(empty_fixed_array());
2302   meta_map()->init_back_pointer(undefined_value());
2303   meta_map()->set_instance_descriptors(empty_descriptor_array());
2304
2305   fixed_array_map()->set_code_cache(empty_fixed_array());
2306   fixed_array_map()->init_back_pointer(undefined_value());
2307   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2308
2309   oddball_map()->set_code_cache(empty_fixed_array());
2310   oddball_map()->init_back_pointer(undefined_value());
2311   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2312
2313   // Fix prototype object for existing maps.
2314   meta_map()->set_prototype(null_value());
2315   meta_map()->set_constructor(null_value());
2316
2317   fixed_array_map()->set_prototype(null_value());
2318   fixed_array_map()->set_constructor(null_value());
2319
2320   oddball_map()->set_prototype(null_value());
2321   oddball_map()->set_constructor(null_value());
2322
2323   { MaybeObject* maybe_obj =
2324         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2325     if (!maybe_obj->ToObject(&obj)) return false;
2326   }
2327   set_fixed_cow_array_map(Map::cast(obj));
2328   ASSERT(fixed_array_map() != fixed_cow_array_map());
2329
2330   { MaybeObject* maybe_obj =
2331         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2332     if (!maybe_obj->ToObject(&obj)) return false;
2333   }
2334   set_scope_info_map(Map::cast(obj));
2335
2336   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2337     if (!maybe_obj->ToObject(&obj)) return false;
2338   }
2339   set_heap_number_map(Map::cast(obj));
2340
2341   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2342     if (!maybe_obj->ToObject(&obj)) return false;
2343   }
2344   set_foreign_map(Map::cast(obj));
2345
2346   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2347     const StringTypeTable& entry = string_type_table[i];
2348     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2349       if (!maybe_obj->ToObject(&obj)) return false;
2350     }
2351     roots_[entry.index] = Map::cast(obj);
2352   }
2353
2354   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2355     if (!maybe_obj->ToObject(&obj)) return false;
2356   }
2357   set_undetectable_string_map(Map::cast(obj));
2358   Map::cast(obj)->set_is_undetectable();
2359
2360   { MaybeObject* maybe_obj =
2361         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2362     if (!maybe_obj->ToObject(&obj)) return false;
2363   }
2364   set_undetectable_ascii_string_map(Map::cast(obj));
2365   Map::cast(obj)->set_is_undetectable();
2366
2367   { MaybeObject* maybe_obj =
2368         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2369     if (!maybe_obj->ToObject(&obj)) return false;
2370   }
2371   set_fixed_double_array_map(Map::cast(obj));
2372
2373   { MaybeObject* maybe_obj =
2374         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2375     if (!maybe_obj->ToObject(&obj)) return false;
2376   }
2377   set_byte_array_map(Map::cast(obj));
2378
2379   { MaybeObject* maybe_obj =
2380         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2381     if (!maybe_obj->ToObject(&obj)) return false;
2382   }
2383   set_free_space_map(Map::cast(obj));
2384
2385   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2386     if (!maybe_obj->ToObject(&obj)) return false;
2387   }
2388   set_empty_byte_array(ByteArray::cast(obj));
2389
2390   { MaybeObject* maybe_obj =
2391         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2392     if (!maybe_obj->ToObject(&obj)) return false;
2393   }
2394   set_external_pixel_array_map(Map::cast(obj));
2395
2396   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2397                                          ExternalArray::kAlignedSize);
2398     if (!maybe_obj->ToObject(&obj)) return false;
2399   }
2400   set_external_byte_array_map(Map::cast(obj));
2401
2402   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2403                                          ExternalArray::kAlignedSize);
2404     if (!maybe_obj->ToObject(&obj)) return false;
2405   }
2406   set_external_unsigned_byte_array_map(Map::cast(obj));
2407
2408   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2409                                          ExternalArray::kAlignedSize);
2410     if (!maybe_obj->ToObject(&obj)) return false;
2411   }
2412   set_external_short_array_map(Map::cast(obj));
2413
2414   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2415                                          ExternalArray::kAlignedSize);
2416     if (!maybe_obj->ToObject(&obj)) return false;
2417   }
2418   set_external_unsigned_short_array_map(Map::cast(obj));
2419
2420   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2421                                          ExternalArray::kAlignedSize);
2422     if (!maybe_obj->ToObject(&obj)) return false;
2423   }
2424   set_external_int_array_map(Map::cast(obj));
2425
2426   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2427                                          ExternalArray::kAlignedSize);
2428     if (!maybe_obj->ToObject(&obj)) return false;
2429   }
2430   set_external_unsigned_int_array_map(Map::cast(obj));
2431
2432   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2433                                          ExternalArray::kAlignedSize);
2434     if (!maybe_obj->ToObject(&obj)) return false;
2435   }
2436   set_external_float_array_map(Map::cast(obj));
2437
2438   { MaybeObject* maybe_obj =
2439         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2440     if (!maybe_obj->ToObject(&obj)) return false;
2441   }
2442   set_non_strict_arguments_elements_map(Map::cast(obj));
2443
2444   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2445                                          ExternalArray::kAlignedSize);
2446     if (!maybe_obj->ToObject(&obj)) return false;
2447   }
2448   set_external_double_array_map(Map::cast(obj));
2449
2450   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2451     if (!maybe_obj->ToObject(&obj)) return false;
2452   }
2453   set_code_map(Map::cast(obj));
2454
2455   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2456                                          JSGlobalPropertyCell::kSize);
2457     if (!maybe_obj->ToObject(&obj)) return false;
2458   }
2459   set_global_property_cell_map(Map::cast(obj));
2460
2461   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2462     if (!maybe_obj->ToObject(&obj)) return false;
2463   }
2464   set_one_pointer_filler_map(Map::cast(obj));
2465
2466   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2467     if (!maybe_obj->ToObject(&obj)) return false;
2468   }
2469   set_two_pointer_filler_map(Map::cast(obj));
2470
2471   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2472     const StructTable& entry = struct_table[i];
2473     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2474       if (!maybe_obj->ToObject(&obj)) return false;
2475     }
2476     roots_[entry.index] = Map::cast(obj);
2477   }
2478
2479   { MaybeObject* maybe_obj =
2480         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2481     if (!maybe_obj->ToObject(&obj)) return false;
2482   }
2483   set_hash_table_map(Map::cast(obj));
2484
2485   { MaybeObject* maybe_obj =
2486         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2487     if (!maybe_obj->ToObject(&obj)) return false;
2488   }
2489   set_function_context_map(Map::cast(obj));
2490
2491   { MaybeObject* maybe_obj =
2492         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2493     if (!maybe_obj->ToObject(&obj)) return false;
2494   }
2495   set_catch_context_map(Map::cast(obj));
2496
2497   { MaybeObject* maybe_obj =
2498         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2499     if (!maybe_obj->ToObject(&obj)) return false;
2500   }
2501   set_with_context_map(Map::cast(obj));
2502
2503   { MaybeObject* maybe_obj =
2504         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_block_context_map(Map::cast(obj));
2508
2509   { MaybeObject* maybe_obj =
2510         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2511     if (!maybe_obj->ToObject(&obj)) return false;
2512   }
2513   set_module_context_map(Map::cast(obj));
2514
2515   { MaybeObject* maybe_obj =
2516         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2517     if (!maybe_obj->ToObject(&obj)) return false;
2518   }
2519   set_global_context_map(Map::cast(obj));
2520
2521   { MaybeObject* maybe_obj =
2522         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2523     if (!maybe_obj->ToObject(&obj)) return false;
2524   }
2525   Map* native_context_map = Map::cast(obj);
2526   native_context_map->set_dictionary_map(true);
2527   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2528   set_native_context_map(native_context_map);
2529
2530   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2531                                          SharedFunctionInfo::kAlignedSize);
2532     if (!maybe_obj->ToObject(&obj)) return false;
2533   }
2534   set_shared_function_info_map(Map::cast(obj));
2535
2536   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2537                                          JSMessageObject::kSize);
2538     if (!maybe_obj->ToObject(&obj)) return false;
2539   }
2540   set_message_object_map(Map::cast(obj));
2541
2542   Map* external_map;
2543   { MaybeObject* maybe_obj =
2544         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2545     if (!maybe_obj->To(&external_map)) return false;
2546   }
2547   external_map->set_is_extensible(false);
2548   set_external_map(external_map);
2549
2550   ASSERT(!InNewSpace(empty_fixed_array()));
2551   return true;
2552 }
2553
2554
2555 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2556   // Statically ensure that it is safe to allocate heap numbers in paged
2557   // spaces.
2558   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2559   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2560
2561   Object* result;
2562   { MaybeObject* maybe_result =
2563         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2564     if (!maybe_result->ToObject(&result)) return maybe_result;
2565   }
2566
2567   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2568   HeapNumber::cast(result)->set_value(value);
2569   return result;
2570 }
2571
2572
2573 MaybeObject* Heap::AllocateHeapNumber(double value) {
2574   // Use general version, if we're forced to always allocate.
2575   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2576
2577   // This version of AllocateHeapNumber is optimized for
2578   // allocation in new space.
2579   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2580   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2581   Object* result;
2582   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2583     if (!maybe_result->ToObject(&result)) return maybe_result;
2584   }
2585   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2586   HeapNumber::cast(result)->set_value(value);
2587   return result;
2588 }
2589
2590
2591 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2592   Object* result;
2593   { MaybeObject* maybe_result = AllocateRawCell();
2594     if (!maybe_result->ToObject(&result)) return maybe_result;
2595   }
2596   HeapObject::cast(result)->set_map_no_write_barrier(
2597       global_property_cell_map());
2598   JSGlobalPropertyCell::cast(result)->set_value(value);
2599   return result;
2600 }
2601
2602
2603 MaybeObject* Heap::CreateOddball(const char* to_string,
2604                                  Object* to_number,
2605                                  byte kind) {
2606   Object* result;
2607   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2608     if (!maybe_result->ToObject(&result)) return maybe_result;
2609   }
2610   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2611 }
2612
2613
2614 bool Heap::CreateApiObjects() {
2615   Object* obj;
2616
2617   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2618     if (!maybe_obj->ToObject(&obj)) return false;
2619   }
2620   // Don't use Smi-only elements optimizations for objects with the neander
2621   // map. There are too many cases where element values are set directly with a
2622   // bottleneck to trap the Smi-only -> fast elements transition, and there
2623   // appears to be no benefit for optimize this case.
2624   Map* new_neander_map = Map::cast(obj);
2625   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2626   set_neander_map(new_neander_map);
2627
2628   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2629     if (!maybe_obj->ToObject(&obj)) return false;
2630   }
2631   Object* elements;
2632   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2633     if (!maybe_elements->ToObject(&elements)) return false;
2634   }
2635   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2636   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2637   set_message_listeners(JSObject::cast(obj));
2638
2639   return true;
2640 }
2641
2642
2643 void Heap::CreateJSEntryStub() {
2644   JSEntryStub stub;
2645   set_js_entry_code(*stub.GetCode());
2646 }
2647
2648
2649 void Heap::CreateJSConstructEntryStub() {
2650   JSConstructEntryStub stub;
2651   set_js_construct_entry_code(*stub.GetCode());
2652 }
2653
2654
2655 void Heap::CreateFixedStubs() {
2656   // Here we create roots for fixed stubs. They are needed at GC
2657   // for cooking and uncooking (check out frames.cc).
2658   // The eliminates the need for doing dictionary lookup in the
2659   // stub cache for these stubs.
2660   HandleScope scope;
2661   // gcc-4.4 has problem generating correct code of following snippet:
2662   // {  JSEntryStub stub;
2663   //    js_entry_code_ = *stub.GetCode();
2664   // }
2665   // {  JSConstructEntryStub stub;
2666   //    js_construct_entry_code_ = *stub.GetCode();
2667   // }
2668   // To workaround the problem, make separate functions without inlining.
2669   Heap::CreateJSEntryStub();
2670   Heap::CreateJSConstructEntryStub();
2671
2672   // Create stubs that should be there, so we don't unexpectedly have to
2673   // create them if we need them during the creation of another stub.
2674   // Stub creation mixes raw pointers and handles in an unsafe manner so
2675   // we cannot create stubs while we are creating stubs.
2676   CodeStub::GenerateStubsAheadOfTime();
2677 }
2678
2679
2680 bool Heap::CreateInitialObjects() {
2681   Object* obj;
2682
2683   // The -0 value must be set before NumberFromDouble works.
2684   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2685     if (!maybe_obj->ToObject(&obj)) return false;
2686   }
2687   set_minus_zero_value(HeapNumber::cast(obj));
2688   ASSERT(signbit(minus_zero_value()->Number()) != 0);
2689
2690   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2691     if (!maybe_obj->ToObject(&obj)) return false;
2692   }
2693   set_nan_value(HeapNumber::cast(obj));
2694
2695   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2696     if (!maybe_obj->ToObject(&obj)) return false;
2697   }
2698   set_infinity_value(HeapNumber::cast(obj));
2699
2700   // The hole has not been created yet, but we want to put something
2701   // predictable in the gaps in the symbol table, so lets make that Smi zero.
2702   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2703
2704   // Allocate initial symbol table.
2705   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2706     if (!maybe_obj->ToObject(&obj)) return false;
2707   }
2708   // Don't use set_symbol_table() due to asserts.
2709   roots_[kSymbolTableRootIndex] = obj;
2710
2711   // Finish initializing oddballs after creating symboltable.
2712   { MaybeObject* maybe_obj =
2713         undefined_value()->Initialize("undefined",
2714                                       nan_value(),
2715                                       Oddball::kUndefined);
2716     if (!maybe_obj->ToObject(&obj)) return false;
2717   }
2718
2719   // Initialize the null_value.
2720   { MaybeObject* maybe_obj =
2721         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2722     if (!maybe_obj->ToObject(&obj)) return false;
2723   }
2724
2725   { MaybeObject* maybe_obj = CreateOddball("true",
2726                                            Smi::FromInt(1),
2727                                            Oddball::kTrue);
2728     if (!maybe_obj->ToObject(&obj)) return false;
2729   }
2730   set_true_value(Oddball::cast(obj));
2731
2732   { MaybeObject* maybe_obj = CreateOddball("false",
2733                                            Smi::FromInt(0),
2734                                            Oddball::kFalse);
2735     if (!maybe_obj->ToObject(&obj)) return false;
2736   }
2737   set_false_value(Oddball::cast(obj));
2738
2739   { MaybeObject* maybe_obj = CreateOddball("hole",
2740                                            Smi::FromInt(-1),
2741                                            Oddball::kTheHole);
2742     if (!maybe_obj->ToObject(&obj)) return false;
2743   }
2744   set_the_hole_value(Oddball::cast(obj));
2745
2746   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2747                                            Smi::FromInt(-4),
2748                                            Oddball::kArgumentMarker);
2749     if (!maybe_obj->ToObject(&obj)) return false;
2750   }
2751   set_arguments_marker(Oddball::cast(obj));
2752
2753   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2754                                            Smi::FromInt(-2),
2755                                            Oddball::kOther);
2756     if (!maybe_obj->ToObject(&obj)) return false;
2757   }
2758   set_no_interceptor_result_sentinel(obj);
2759
2760   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2761                                            Smi::FromInt(-3),
2762                                            Oddball::kOther);
2763     if (!maybe_obj->ToObject(&obj)) return false;
2764   }
2765   set_termination_exception(obj);
2766
2767   // Allocate the empty string.
2768   { MaybeObject* maybe_obj = AllocateRawOneByteString(0, TENURED);
2769     if (!maybe_obj->ToObject(&obj)) return false;
2770   }
2771   set_empty_string(String::cast(obj));
2772
2773   for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2774     { MaybeObject* maybe_obj =
2775           LookupAsciiSymbol(constant_symbol_table[i].contents);
2776       if (!maybe_obj->ToObject(&obj)) return false;
2777     }
2778     roots_[constant_symbol_table[i].index] = String::cast(obj);
2779   }
2780
2781   // Allocate the hidden symbol which is used to identify the hidden properties
2782   // in JSObjects. The hash code has a special value so that it will not match
2783   // the empty string when searching for the property. It cannot be part of the
2784   // loop above because it needs to be allocated manually with the special
2785   // hash code in place. The hash code for the hidden_symbol is zero to ensure
2786   // that it will always be at the first entry in property descriptors.
2787   { MaybeObject* maybe_obj =
2788         AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
2789     if (!maybe_obj->ToObject(&obj)) return false;
2790   }
2791   hidden_symbol_ = String::cast(obj);
2792
2793   // Allocate the foreign for __proto__.
2794   { MaybeObject* maybe_obj =
2795         AllocateForeign((Address) &Accessors::ObjectPrototype);
2796     if (!maybe_obj->ToObject(&obj)) return false;
2797   }
2798   set_prototype_accessors(Foreign::cast(obj));
2799
2800   // Allocate the code_stubs dictionary. The initial size is set to avoid
2801   // expanding the dictionary during bootstrapping.
2802   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2803     if (!maybe_obj->ToObject(&obj)) return false;
2804   }
2805   set_code_stubs(UnseededNumberDictionary::cast(obj));
2806
2807
2808   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2809   // is set to avoid expanding the dictionary during bootstrapping.
2810   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2811     if (!maybe_obj->ToObject(&obj)) return false;
2812   }
2813   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2814
2815   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2816     if (!maybe_obj->ToObject(&obj)) return false;
2817   }
2818   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2819
2820   set_instanceof_cache_function(Smi::FromInt(0));
2821   set_instanceof_cache_map(Smi::FromInt(0));
2822   set_instanceof_cache_answer(Smi::FromInt(0));
2823
2824   CreateFixedStubs();
2825
2826   // Allocate the dictionary of intrinsic function names.
2827   { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2828     if (!maybe_obj->ToObject(&obj)) return false;
2829   }
2830   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2831                                                                        obj);
2832     if (!maybe_obj->ToObject(&obj)) return false;
2833   }
2834   set_intrinsic_function_names(StringDictionary::cast(obj));
2835
2836   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2837     if (!maybe_obj->ToObject(&obj)) return false;
2838   }
2839   set_number_string_cache(FixedArray::cast(obj));
2840
2841   // Allocate cache for single character ASCII strings.
2842   { MaybeObject* maybe_obj =
2843         AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2844     if (!maybe_obj->ToObject(&obj)) return false;
2845   }
2846   set_single_character_string_cache(FixedArray::cast(obj));
2847
2848   // Allocate cache for string split.
2849   { MaybeObject* maybe_obj = AllocateFixedArray(
2850       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2851     if (!maybe_obj->ToObject(&obj)) return false;
2852   }
2853   set_string_split_cache(FixedArray::cast(obj));
2854
2855   { MaybeObject* maybe_obj = AllocateFixedArray(
2856       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2857     if (!maybe_obj->ToObject(&obj)) return false;
2858   }
2859   set_regexp_multiple_cache(FixedArray::cast(obj));
2860
2861   // Allocate cache for external strings pointing to native source code.
2862   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2863     if (!maybe_obj->ToObject(&obj)) return false;
2864   }
2865   set_natives_source_cache(FixedArray::cast(obj));
2866
2867   // Allocate object to hold object observation state.
2868   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2869     if (!maybe_obj->ToObject(&obj)) return false;
2870   }
2871   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2872     if (!maybe_obj->ToObject(&obj)) return false;
2873   }
2874   set_observation_state(JSObject::cast(obj));
2875
2876   // Handling of script id generation is in FACTORY->NewScript.
2877   set_last_script_id(undefined_value());
2878
2879   // Initialize keyed lookup cache.
2880   isolate_->keyed_lookup_cache()->Clear();
2881
2882   // Initialize context slot cache.
2883   isolate_->context_slot_cache()->Clear();
2884
2885   // Initialize descriptor cache.
2886   isolate_->descriptor_lookup_cache()->Clear();
2887
2888   // Initialize compilation cache.
2889   isolate_->compilation_cache()->Clear();
2890
2891   return true;
2892 }
2893
2894
2895 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2896   RootListIndex writable_roots[] = {
2897     kStoreBufferTopRootIndex,
2898     kStackLimitRootIndex,
2899     kInstanceofCacheFunctionRootIndex,
2900     kInstanceofCacheMapRootIndex,
2901     kInstanceofCacheAnswerRootIndex,
2902     kCodeStubsRootIndex,
2903     kNonMonomorphicCacheRootIndex,
2904     kPolymorphicCodeCacheRootIndex,
2905     kLastScriptIdRootIndex,
2906     kEmptyScriptRootIndex,
2907     kRealStackLimitRootIndex,
2908     kArgumentsAdaptorDeoptPCOffsetRootIndex,
2909     kConstructStubDeoptPCOffsetRootIndex,
2910     kGetterStubDeoptPCOffsetRootIndex,
2911     kSetterStubDeoptPCOffsetRootIndex,
2912     kSymbolTableRootIndex,
2913   };
2914
2915   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2916     if (root_index == writable_roots[i])
2917       return true;
2918   }
2919   return false;
2920 }
2921
2922
2923 Object* RegExpResultsCache::Lookup(Heap* heap,
2924                                    String* key_string,
2925                                    Object* key_pattern,
2926                                    ResultsCacheType type) {
2927   FixedArray* cache;
2928   if (!key_string->IsSymbol()) return Smi::FromInt(0);
2929   if (type == STRING_SPLIT_SUBSTRINGS) {
2930     ASSERT(key_pattern->IsString());
2931     if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
2932     cache = heap->string_split_cache();
2933   } else {
2934     ASSERT(type == REGEXP_MULTIPLE_INDICES);
2935     ASSERT(key_pattern->IsFixedArray());
2936     cache = heap->regexp_multiple_cache();
2937   }
2938
2939   uint32_t hash = key_string->Hash();
2940   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2941       ~(kArrayEntriesPerCacheEntry - 1));
2942   if (cache->get(index + kStringOffset) == key_string &&
2943       cache->get(index + kPatternOffset) == key_pattern) {
2944     return cache->get(index + kArrayOffset);
2945   }
2946   index =
2947       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2948   if (cache->get(index + kStringOffset) == key_string &&
2949       cache->get(index + kPatternOffset) == key_pattern) {
2950     return cache->get(index + kArrayOffset);
2951   }
2952   return Smi::FromInt(0);
2953 }
2954
2955
2956 void RegExpResultsCache::Enter(Heap* heap,
2957                                String* key_string,
2958                                Object* key_pattern,
2959                                FixedArray* value_array,
2960                                ResultsCacheType type) {
2961   FixedArray* cache;
2962   if (!key_string->IsSymbol()) return;
2963   if (type == STRING_SPLIT_SUBSTRINGS) {
2964     ASSERT(key_pattern->IsString());
2965     if (!key_pattern->IsSymbol()) return;
2966     cache = heap->string_split_cache();
2967   } else {
2968     ASSERT(type == REGEXP_MULTIPLE_INDICES);
2969     ASSERT(key_pattern->IsFixedArray());
2970     cache = heap->regexp_multiple_cache();
2971   }
2972
2973   uint32_t hash = key_string->Hash();
2974   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2975       ~(kArrayEntriesPerCacheEntry - 1));
2976   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2977     cache->set(index + kStringOffset, key_string);
2978     cache->set(index + kPatternOffset, key_pattern);
2979     cache->set(index + kArrayOffset, value_array);
2980   } else {
2981     uint32_t index2 =
2982         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2983     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2984       cache->set(index2 + kStringOffset, key_string);
2985       cache->set(index2 + kPatternOffset, key_pattern);
2986       cache->set(index2 + kArrayOffset, value_array);
2987     } else {
2988       cache->set(index2 + kStringOffset, Smi::FromInt(0));
2989       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2990       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2991       cache->set(index + kStringOffset, key_string);
2992       cache->set(index + kPatternOffset, key_pattern);
2993       cache->set(index + kArrayOffset, value_array);
2994     }
2995   }
2996   // If the array is a reasonably short list of substrings, convert it into a
2997   // list of symbols.
2998   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
2999     for (int i = 0; i < value_array->length(); i++) {
3000       String* str = String::cast(value_array->get(i));
3001       Object* symbol;
3002       MaybeObject* maybe_symbol = heap->LookupSymbol(str);
3003       if (maybe_symbol->ToObject(&symbol)) {
3004         value_array->set(i, symbol);
3005       }
3006     }
3007   }
3008   // Convert backing store to a copy-on-write array.
3009   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3010 }
3011
3012
3013 void RegExpResultsCache::Clear(FixedArray* cache) {
3014   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3015     cache->set(i, Smi::FromInt(0));
3016   }
3017 }
3018
3019
3020 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3021   MaybeObject* maybe_obj =
3022       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3023   return maybe_obj;
3024 }
3025
3026
3027 int Heap::FullSizeNumberStringCacheLength() {
3028   // Compute the size of the number string cache based on the max newspace size.
3029   // The number string cache has a minimum size based on twice the initial cache
3030   // size to ensure that it is bigger after being made 'full size'.
3031   int number_string_cache_size = max_semispace_size_ / 512;
3032   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3033                                  Min(0x4000, number_string_cache_size));
3034   // There is a string and a number per entry so the length is twice the number
3035   // of entries.
3036   return number_string_cache_size * 2;
3037 }
3038
3039
3040 void Heap::AllocateFullSizeNumberStringCache() {
3041   // The idea is to have a small number string cache in the snapshot to keep
3042   // boot-time memory usage down.  If we expand the number string cache already
3043   // while creating the snapshot then that didn't work out.
3044   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3045   MaybeObject* maybe_obj =
3046       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3047   Object* new_cache;
3048   if (maybe_obj->ToObject(&new_cache)) {
3049     // We don't bother to repopulate the cache with entries from the old cache.
3050     // It will be repopulated soon enough with new strings.
3051     set_number_string_cache(FixedArray::cast(new_cache));
3052   }
3053   // If allocation fails then we just return without doing anything.  It is only
3054   // a cache, so best effort is OK here.
3055 }
3056
3057
3058 void Heap::FlushNumberStringCache() {
3059   // Flush the number to string cache.
3060   int len = number_string_cache()->length();
3061   for (int i = 0; i < len; i++) {
3062     number_string_cache()->set_undefined(this, i);
3063   }
3064 }
3065
3066
3067 static inline int double_get_hash(double d) {
3068   DoubleRepresentation rep(d);
3069   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3070 }
3071
3072
3073 static inline int smi_get_hash(Smi* smi) {
3074   return smi->value();
3075 }
3076
3077
3078 Object* Heap::GetNumberStringCache(Object* number) {
3079   int hash;
3080   int mask = (number_string_cache()->length() >> 1) - 1;
3081   if (number->IsSmi()) {
3082     hash = smi_get_hash(Smi::cast(number)) & mask;
3083   } else {
3084     hash = double_get_hash(number->Number()) & mask;
3085   }
3086   Object* key = number_string_cache()->get(hash * 2);
3087   if (key == number) {
3088     return String::cast(number_string_cache()->get(hash * 2 + 1));
3089   } else if (key->IsHeapNumber() &&
3090              number->IsHeapNumber() &&
3091              key->Number() == number->Number()) {
3092     return String::cast(number_string_cache()->get(hash * 2 + 1));
3093   }
3094   return undefined_value();
3095 }
3096
3097
3098 void Heap::SetNumberStringCache(Object* number, String* string) {
3099   int hash;
3100   int mask = (number_string_cache()->length() >> 1) - 1;
3101   if (number->IsSmi()) {
3102     hash = smi_get_hash(Smi::cast(number)) & mask;
3103   } else {
3104     hash = double_get_hash(number->Number()) & mask;
3105   }
3106   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3107       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3108     // The first time we have a hash collision, we move to the full sized
3109     // number string cache.
3110     AllocateFullSizeNumberStringCache();
3111     return;
3112   }
3113   number_string_cache()->set(hash * 2, number);
3114   number_string_cache()->set(hash * 2 + 1, string);
3115 }
3116
3117
3118 MaybeObject* Heap::NumberToString(Object* number,
3119                                   bool check_number_string_cache) {
3120   isolate_->counters()->number_to_string_runtime()->Increment();
3121   if (check_number_string_cache) {
3122     Object* cached = GetNumberStringCache(number);
3123     if (cached != undefined_value()) {
3124       return cached;
3125     }
3126   }
3127
3128   char arr[100];
3129   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3130   const char* str;
3131   if (number->IsSmi()) {
3132     int num = Smi::cast(number)->value();
3133     str = IntToCString(num, buffer);
3134   } else {
3135     double num = HeapNumber::cast(number)->value();
3136     str = DoubleToCString(num, buffer);
3137   }
3138
3139   Object* js_string;
3140   MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
3141   if (maybe_js_string->ToObject(&js_string)) {
3142     SetNumberStringCache(number, String::cast(js_string));
3143   }
3144   return maybe_js_string;
3145 }
3146
3147
3148 MaybeObject* Heap::Uint32ToString(uint32_t value,
3149                                   bool check_number_string_cache) {
3150   Object* number;
3151   MaybeObject* maybe = NumberFromUint32(value);
3152   if (!maybe->To<Object>(&number)) return maybe;
3153   return NumberToString(number, check_number_string_cache);
3154 }
3155
3156
3157 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3158   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3159 }
3160
3161
3162 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3163     ExternalArrayType array_type) {
3164   switch (array_type) {
3165     case kExternalByteArray:
3166       return kExternalByteArrayMapRootIndex;
3167     case kExternalUnsignedByteArray:
3168       return kExternalUnsignedByteArrayMapRootIndex;
3169     case kExternalShortArray:
3170       return kExternalShortArrayMapRootIndex;
3171     case kExternalUnsignedShortArray:
3172       return kExternalUnsignedShortArrayMapRootIndex;
3173     case kExternalIntArray:
3174       return kExternalIntArrayMapRootIndex;
3175     case kExternalUnsignedIntArray:
3176       return kExternalUnsignedIntArrayMapRootIndex;
3177     case kExternalFloatArray:
3178       return kExternalFloatArrayMapRootIndex;
3179     case kExternalDoubleArray:
3180       return kExternalDoubleArrayMapRootIndex;
3181     case kExternalPixelArray:
3182       return kExternalPixelArrayMapRootIndex;
3183     default:
3184       UNREACHABLE();
3185       return kUndefinedValueRootIndex;
3186   }
3187 }
3188
3189
3190 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3191   // We need to distinguish the minus zero value and this cannot be
3192   // done after conversion to int. Doing this by comparing bit
3193   // patterns is faster than using fpclassify() et al.
3194   static const DoubleRepresentation minus_zero(-0.0);
3195
3196   DoubleRepresentation rep(value);
3197   if (rep.bits == minus_zero.bits) {
3198     return AllocateHeapNumber(-0.0, pretenure);
3199   }
3200
3201   int int_value = FastD2I(value);
3202   if (value == int_value && Smi::IsValid(int_value)) {
3203     return Smi::FromInt(int_value);
3204   }
3205
3206   // Materialize the value in the heap.
3207   return AllocateHeapNumber(value, pretenure);
3208 }
3209
3210
3211 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3212   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3213   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3214   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3215   Foreign* result;
3216   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3217   if (!maybe_result->To(&result)) return maybe_result;
3218   result->set_foreign_address(address);
3219   return result;
3220 }
3221
3222
3223 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3224   SharedFunctionInfo* share;
3225   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3226   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3227
3228   // Set pointer fields.
3229   share->set_name(name);
3230   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3231   share->set_code(illegal);
3232   share->ClearOptimizedCodeMap();
3233   share->set_scope_info(ScopeInfo::Empty());
3234   Code* construct_stub =
3235       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3236   share->set_construct_stub(construct_stub);
3237   share->set_instance_class_name(Object_symbol());
3238   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3239   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3240   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3241   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3242   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3243   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3244   share->set_ast_node_count(0);
3245   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3246   share->set_counters(0);
3247
3248   // Set integer fields (smi or int, depending on the architecture).
3249   share->set_length(0);
3250   share->set_formal_parameter_count(0);
3251   share->set_expected_nof_properties(0);
3252   share->set_num_literals(0);
3253   share->set_start_position_and_type(0);
3254   share->set_end_position(0);
3255   share->set_function_token_position(0);
3256   // All compiler hints default to false or 0.
3257   share->set_compiler_hints(0);
3258   share->set_this_property_assignments_count(0);
3259   share->set_opt_count(0);
3260
3261   return share;
3262 }
3263
3264
3265 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3266                                            JSArray* arguments,
3267                                            int start_position,
3268                                            int end_position,
3269                                            Object* script,
3270                                            Object* stack_trace,
3271                                            Object* stack_frames) {
3272   Object* result;
3273   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3274     if (!maybe_result->ToObject(&result)) return maybe_result;
3275   }
3276   JSMessageObject* message = JSMessageObject::cast(result);
3277   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3278   message->initialize_elements();
3279   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3280   message->set_type(type);
3281   message->set_arguments(arguments);
3282   message->set_start_position(start_position);
3283   message->set_end_position(end_position);
3284   message->set_script(script);
3285   message->set_stack_trace(stack_trace);
3286   message->set_stack_frames(stack_frames);
3287   return result;
3288 }
3289
3290
3291
3292 // Returns true for a character in a range.  Both limits are inclusive.
3293 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3294   // This makes uses of the the unsigned wraparound.
3295   return character - from <= to - from;
3296 }
3297
3298
3299 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3300     Heap* heap,
3301     uint32_t c1,
3302     uint32_t c2) {
3303   String* symbol;
3304   // Numeric strings have a different hash algorithm not known by
3305   // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
3306   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3307       heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
3308     return symbol;
3309   // Now we know the length is 2, we might as well make use of that fact
3310   // when building the new string.
3311   } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) {  // We can do this
3312     ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1));  // because of this.
3313     Object* result;
3314     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3315       if (!maybe_result->ToObject(&result)) return maybe_result;
3316     }
3317     char* dest = SeqOneByteString::cast(result)->GetChars();
3318     dest[0] = c1;
3319     dest[1] = c2;
3320     return result;
3321   } else {
3322     Object* result;
3323     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3324       if (!maybe_result->ToObject(&result)) return maybe_result;
3325     }
3326     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3327     dest[0] = c1;
3328     dest[1] = c2;
3329     return result;
3330   }
3331 }
3332
3333
3334 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3335   int first_length = first->length();
3336   if (first_length == 0) {
3337     return second;
3338   }
3339
3340   int second_length = second->length();
3341   if (second_length == 0) {
3342     return first;
3343   }
3344
3345   int length = first_length + second_length;
3346
3347   // Optimization for 2-byte strings often used as keys in a decompression
3348   // dictionary.  Check whether we already have the string in the symbol
3349   // table to prevent creation of many unneccesary strings.
3350   if (length == 2) {
3351     unsigned c1 = first->Get(0);
3352     unsigned c2 = second->Get(0);
3353     return MakeOrFindTwoCharacterString(this, c1, c2);
3354   }
3355
3356   bool first_is_ascii = first->IsOneByteRepresentation();
3357   bool second_is_ascii = second->IsOneByteRepresentation();
3358   bool is_ascii = first_is_ascii && second_is_ascii;
3359
3360   // Make sure that an out of memory exception is thrown if the length
3361   // of the new cons string is too large.
3362   if (length > String::kMaxLength || length < 0) {
3363     isolate()->context()->mark_out_of_memory();
3364     return Failure::OutOfMemoryException();
3365   }
3366
3367   bool is_ascii_data_in_two_byte_string = false;
3368   if (!is_ascii) {
3369     // At least one of the strings uses two-byte representation so we
3370     // can't use the fast case code for short ASCII strings below, but
3371     // we can try to save memory if all chars actually fit in ASCII.
3372     is_ascii_data_in_two_byte_string =
3373         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
3374     if (is_ascii_data_in_two_byte_string) {
3375       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3376     }
3377   }
3378
3379   // If the resulting string is small make a flat string.
3380   if (length < ConsString::kMinLength) {
3381     // Note that neither of the two inputs can be a slice because:
3382     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3383     ASSERT(first->IsFlat());
3384     ASSERT(second->IsFlat());
3385     if (is_ascii) {
3386       Object* result;
3387       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3388         if (!maybe_result->ToObject(&result)) return maybe_result;
3389       }
3390       // Copy the characters into the new object.
3391       char* dest = SeqOneByteString::cast(result)->GetChars();
3392       // Copy first part.
3393       const char* src;
3394       if (first->IsExternalString()) {
3395         src = ExternalAsciiString::cast(first)->GetChars();
3396       } else {
3397         src = SeqOneByteString::cast(first)->GetChars();
3398       }
3399       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3400       // Copy second part.
3401       if (second->IsExternalString()) {
3402         src = ExternalAsciiString::cast(second)->GetChars();
3403       } else {
3404         src = SeqOneByteString::cast(second)->GetChars();
3405       }
3406       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3407       return result;
3408     } else {
3409       if (is_ascii_data_in_two_byte_string) {
3410         Object* result;
3411         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3412           if (!maybe_result->ToObject(&result)) return maybe_result;
3413         }
3414         // Copy the characters into the new object.
3415         char* dest = SeqOneByteString::cast(result)->GetChars();
3416         String::WriteToFlat(first, dest, 0, first_length);
3417         String::WriteToFlat(second, dest + first_length, 0, second_length);
3418         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3419         return result;
3420       }
3421
3422       Object* result;
3423       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3424         if (!maybe_result->ToObject(&result)) return maybe_result;
3425       }
3426       // Copy the characters into the new object.
3427       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3428       String::WriteToFlat(first, dest, 0, first_length);
3429       String::WriteToFlat(second, dest + first_length, 0, second_length);
3430       return result;
3431     }
3432   }
3433
3434   Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
3435       cons_ascii_string_map() : cons_string_map();
3436
3437   Object* result;
3438   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3439     if (!maybe_result->ToObject(&result)) return maybe_result;
3440   }
3441
3442   AssertNoAllocation no_gc;
3443   ConsString* cons_string = ConsString::cast(result);
3444   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3445   cons_string->set_length(length);
3446   cons_string->set_hash_field(String::kEmptyHashField);
3447   cons_string->set_first(first, mode);
3448   cons_string->set_second(second, mode);
3449   return result;
3450 }
3451
3452
3453 MaybeObject* Heap::AllocateSubString(String* buffer,
3454                                      int start,
3455                                      int end,
3456                                      PretenureFlag pretenure) {
3457   int length = end - start;
3458   if (length <= 0) {
3459     return empty_string();
3460   } else if (length == 1) {
3461     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3462   } else if (length == 2) {
3463     // Optimization for 2-byte strings often used as keys in a decompression
3464     // dictionary.  Check whether we already have the string in the symbol
3465     // table to prevent creation of many unneccesary strings.
3466     unsigned c1 = buffer->Get(start);
3467     unsigned c2 = buffer->Get(start + 1);
3468     return MakeOrFindTwoCharacterString(this, c1, c2);
3469   }
3470
3471   // Make an attempt to flatten the buffer to reduce access time.
3472   buffer = buffer->TryFlattenGetString();
3473
3474   if (!FLAG_string_slices ||
3475       !buffer->IsFlat() ||
3476       length < SlicedString::kMinLength ||
3477       pretenure == TENURED) {
3478     Object* result;
3479     // WriteToFlat takes care of the case when an indirect string has a
3480     // different encoding from its underlying string.  These encodings may
3481     // differ because of externalization.
3482     bool is_ascii = buffer->IsOneByteRepresentation();
3483     { MaybeObject* maybe_result = is_ascii
3484                                   ? AllocateRawOneByteString(length, pretenure)
3485                                   : AllocateRawTwoByteString(length, pretenure);
3486       if (!maybe_result->ToObject(&result)) return maybe_result;
3487     }
3488     String* string_result = String::cast(result);
3489     // Copy the characters into the new object.
3490     if (is_ascii) {
3491       ASSERT(string_result->IsOneByteRepresentation());
3492       char* dest = SeqOneByteString::cast(string_result)->GetChars();
3493       String::WriteToFlat(buffer, dest, start, end);
3494     } else {
3495       ASSERT(string_result->IsTwoByteRepresentation());
3496       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3497       String::WriteToFlat(buffer, dest, start, end);
3498     }
3499     return result;
3500   }
3501
3502   ASSERT(buffer->IsFlat());
3503 #if VERIFY_HEAP
3504   if (FLAG_verify_heap) {
3505     buffer->StringVerify();
3506   }
3507 #endif
3508
3509   Object* result;
3510   // When slicing an indirect string we use its encoding for a newly created
3511   // slice and don't check the encoding of the underlying string.  This is safe
3512   // even if the encodings are different because of externalization.  If an
3513   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3514   // codes of the underlying string must still fit into ASCII (because
3515   // externalization must not change char codes).
3516   { Map* map = buffer->IsOneByteRepresentation()
3517                  ? sliced_ascii_string_map()
3518                  : sliced_string_map();
3519     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3520     if (!maybe_result->ToObject(&result)) return maybe_result;
3521   }
3522
3523   AssertNoAllocation no_gc;
3524   SlicedString* sliced_string = SlicedString::cast(result);
3525   sliced_string->set_length(length);
3526   sliced_string->set_hash_field(String::kEmptyHashField);
3527   if (buffer->IsConsString()) {
3528     ConsString* cons = ConsString::cast(buffer);
3529     ASSERT(cons->second()->length() == 0);
3530     sliced_string->set_parent(cons->first());
3531     sliced_string->set_offset(start);
3532   } else if (buffer->IsSlicedString()) {
3533     // Prevent nesting sliced strings.
3534     SlicedString* parent_slice = SlicedString::cast(buffer);
3535     sliced_string->set_parent(parent_slice->parent());
3536     sliced_string->set_offset(start + parent_slice->offset());
3537   } else {
3538     sliced_string->set_parent(buffer);
3539     sliced_string->set_offset(start);
3540   }
3541   ASSERT(sliced_string->parent()->IsSeqString() ||
3542          sliced_string->parent()->IsExternalString());
3543   return result;
3544 }
3545
3546
3547 MaybeObject* Heap::AllocateExternalStringFromAscii(
3548     const ExternalAsciiString::Resource* resource) {
3549   size_t length = resource->length();
3550   if (length > static_cast<size_t>(String::kMaxLength)) {
3551     isolate()->context()->mark_out_of_memory();
3552     return Failure::OutOfMemoryException();
3553   }
3554
3555   ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
3556
3557   Map* map = external_ascii_string_map();
3558   Object* result;
3559   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3560     if (!maybe_result->ToObject(&result)) return maybe_result;
3561   }
3562
3563   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3564   external_string->set_length(static_cast<int>(length));
3565   external_string->set_hash_field(String::kEmptyHashField);
3566   external_string->set_resource(resource);
3567
3568   return result;
3569 }
3570
3571
3572 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3573     const ExternalTwoByteString::Resource* resource) {
3574   size_t length = resource->length();
3575   if (length > static_cast<size_t>(String::kMaxLength)) {
3576     isolate()->context()->mark_out_of_memory();
3577     return Failure::OutOfMemoryException();
3578   }
3579
3580   // For small strings we check whether the resource contains only
3581   // ASCII characters.  If yes, we use a different string map.
3582   static const size_t kAsciiCheckLengthLimit = 32;
3583   bool is_ascii = length <= kAsciiCheckLengthLimit &&
3584       String::IsAscii(resource->data(), static_cast<int>(length));
3585   Map* map = is_ascii ?
3586       external_string_with_ascii_data_map() : external_string_map();
3587   Object* result;
3588   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3589     if (!maybe_result->ToObject(&result)) return maybe_result;
3590   }
3591
3592   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3593   external_string->set_length(static_cast<int>(length));
3594   external_string->set_hash_field(String::kEmptyHashField);
3595   external_string->set_resource(resource);
3596
3597   return result;
3598 }
3599
3600
3601 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3602   if (code <= String::kMaxAsciiCharCode) {
3603     Object* value = single_character_string_cache()->get(code);
3604     if (value != undefined_value()) return value;
3605
3606     char buffer[1];
3607     buffer[0] = static_cast<char>(code);
3608     Object* result;
3609     MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3610
3611     if (!maybe_result->ToObject(&result)) return maybe_result;
3612     single_character_string_cache()->set(code, result);
3613     return result;
3614   }
3615
3616   Object* result;
3617   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3618     if (!maybe_result->ToObject(&result)) return maybe_result;
3619   }
3620   String* answer = String::cast(result);
3621   answer->Set(0, code);
3622   return answer;
3623 }
3624
3625
3626 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3627   if (length < 0 || length > ByteArray::kMaxLength) {
3628     return Failure::OutOfMemoryException();
3629   }
3630   if (pretenure == NOT_TENURED) {
3631     return AllocateByteArray(length);
3632   }
3633   int size = ByteArray::SizeFor(length);
3634   Object* result;
3635   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3636                    ? old_data_space_->AllocateRaw(size)
3637                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3638     if (!maybe_result->ToObject(&result)) return maybe_result;
3639   }
3640
3641   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3642       byte_array_map());
3643   reinterpret_cast<ByteArray*>(result)->set_length(length);
3644   return result;
3645 }
3646
3647
3648 MaybeObject* Heap::AllocateByteArray(int length) {
3649   if (length < 0 || length > ByteArray::kMaxLength) {
3650     return Failure::OutOfMemoryException();
3651   }
3652   int size = ByteArray::SizeFor(length);
3653   AllocationSpace space =
3654       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3655   Object* result;
3656   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3657     if (!maybe_result->ToObject(&result)) return maybe_result;
3658   }
3659
3660   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3661       byte_array_map());
3662   reinterpret_cast<ByteArray*>(result)->set_length(length);
3663   return result;
3664 }
3665
3666
3667 void Heap::CreateFillerObjectAt(Address addr, int size) {
3668   if (size == 0) return;
3669   HeapObject* filler = HeapObject::FromAddress(addr);
3670   if (size == kPointerSize) {
3671     filler->set_map_no_write_barrier(one_pointer_filler_map());
3672   } else if (size == 2 * kPointerSize) {
3673     filler->set_map_no_write_barrier(two_pointer_filler_map());
3674   } else {
3675     filler->set_map_no_write_barrier(free_space_map());
3676     FreeSpace::cast(filler)->set_size(size);
3677   }
3678 }
3679
3680
3681 MaybeObject* Heap::AllocateExternalArray(int length,
3682                                          ExternalArrayType array_type,
3683                                          void* external_pointer,
3684                                          PretenureFlag pretenure) {
3685   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3686   Object* result;
3687   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3688                                             space,
3689                                             OLD_DATA_SPACE);
3690     if (!maybe_result->ToObject(&result)) return maybe_result;
3691   }
3692
3693   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3694       MapForExternalArrayType(array_type));
3695   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3696   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3697       external_pointer);
3698
3699   return result;
3700 }
3701
3702
3703 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3704                               Code::Flags flags,
3705                               Handle<Object> self_reference,
3706                               bool immovable) {
3707   // Allocate ByteArray before the Code object, so that we do not risk
3708   // leaving uninitialized Code object (and breaking the heap).
3709   ByteArray* reloc_info;
3710   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3711   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3712
3713   // Compute size.
3714   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3715   int obj_size = Code::SizeFor(body_size);
3716   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3717   MaybeObject* maybe_result;
3718   // Large code objects and code objects which should stay at a fixed address
3719   // are allocated in large object space.
3720   HeapObject* result;
3721   bool force_lo_space = obj_size > code_space()->AreaSize();
3722   if (force_lo_space) {
3723     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3724   } else {
3725     maybe_result = code_space_->AllocateRaw(obj_size);
3726   }
3727   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3728
3729   if (immovable && !force_lo_space &&
3730       // Objects on the first page of each space are never moved.
3731       !code_space_->FirstPage()->Contains(result->address())) {
3732     // Discard the first code allocation, which was on a page where it could be
3733     // moved.
3734     CreateFillerObjectAt(result->address(), obj_size);
3735     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3736     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3737   }
3738
3739   // Initialize the object
3740   result->set_map_no_write_barrier(code_map());
3741   Code* code = Code::cast(result);
3742   ASSERT(!isolate_->code_range()->exists() ||
3743       isolate_->code_range()->contains(code->address()));
3744   code->set_instruction_size(desc.instr_size);
3745   code->set_relocation_info(reloc_info);
3746   code->set_flags(flags);
3747   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3748     code->set_check_type(RECEIVER_MAP_CHECK);
3749   }
3750   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3751   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3752   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3753   code->set_gc_metadata(Smi::FromInt(0));
3754   code->set_ic_age(global_ic_age_);
3755   code->set_prologue_offset(kPrologueOffsetNotSet);
3756   // Allow self references to created code object by patching the handle to
3757   // point to the newly allocated Code object.
3758   if (!self_reference.is_null()) {
3759     *(self_reference.location()) = code;
3760   }
3761   // Migrate generated code.
3762   // The generated code can contain Object** values (typically from handles)
3763   // that are dereferenced during the copy to point directly to the actual heap
3764   // objects. These pointers can include references to the code object itself,
3765   // through the self_reference parameter.
3766   code->CopyFrom(desc);
3767
3768 #ifdef VERIFY_HEAP
3769   if (FLAG_verify_heap) {
3770     code->Verify();
3771   }
3772 #endif
3773   return code;
3774 }
3775
3776
3777 MaybeObject* Heap::CopyCode(Code* code) {
3778   // Allocate an object the same size as the code object.
3779   int obj_size = code->Size();
3780   MaybeObject* maybe_result;
3781   if (obj_size > code_space()->AreaSize()) {
3782     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3783   } else {
3784     maybe_result = code_space_->AllocateRaw(obj_size);
3785   }
3786
3787   Object* result;
3788   if (!maybe_result->ToObject(&result)) return maybe_result;
3789
3790   // Copy code object.
3791   Address old_addr = code->address();
3792   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3793   CopyBlock(new_addr, old_addr, obj_size);
3794   // Relocate the copy.
3795   Code* new_code = Code::cast(result);
3796   ASSERT(!isolate_->code_range()->exists() ||
3797       isolate_->code_range()->contains(code->address()));
3798   new_code->Relocate(new_addr - old_addr);
3799   return new_code;
3800 }
3801
3802
3803 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3804   // Allocate ByteArray before the Code object, so that we do not risk
3805   // leaving uninitialized Code object (and breaking the heap).
3806   Object* reloc_info_array;
3807   { MaybeObject* maybe_reloc_info_array =
3808         AllocateByteArray(reloc_info.length(), TENURED);
3809     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3810       return maybe_reloc_info_array;
3811     }
3812   }
3813
3814   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3815
3816   int new_obj_size = Code::SizeFor(new_body_size);
3817
3818   Address old_addr = code->address();
3819
3820   size_t relocation_offset =
3821       static_cast<size_t>(code->instruction_end() - old_addr);
3822
3823   MaybeObject* maybe_result;
3824   if (new_obj_size > code_space()->AreaSize()) {
3825     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3826   } else {
3827     maybe_result = code_space_->AllocateRaw(new_obj_size);
3828   }
3829
3830   Object* result;
3831   if (!maybe_result->ToObject(&result)) return maybe_result;
3832
3833   // Copy code object.
3834   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3835
3836   // Copy header and instructions.
3837   memcpy(new_addr, old_addr, relocation_offset);
3838
3839   Code* new_code = Code::cast(result);
3840   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3841
3842   // Copy patched rinfo.
3843   memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3844
3845   // Relocate the copy.
3846   ASSERT(!isolate_->code_range()->exists() ||
3847       isolate_->code_range()->contains(code->address()));
3848   new_code->Relocate(new_addr - old_addr);
3849
3850 #ifdef VERIFY_HEAP
3851   if (FLAG_verify_heap) {
3852     code->Verify();
3853   }
3854 #endif
3855   return new_code;
3856 }
3857
3858
3859 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3860   ASSERT(gc_state_ == NOT_IN_GC);
3861   ASSERT(map->instance_type() != MAP_TYPE);
3862   // If allocation failures are disallowed, we may allocate in a different
3863   // space when new space is full and the object is not a large object.
3864   AllocationSpace retry_space =
3865       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3866   Object* result;
3867   { MaybeObject* maybe_result =
3868         AllocateRaw(map->instance_size(), space, retry_space);
3869     if (!maybe_result->ToObject(&result)) return maybe_result;
3870   }
3871   // No need for write barrier since object is white and map is in old space.
3872   HeapObject::cast(result)->set_map_no_write_barrier(map);
3873   return result;
3874 }
3875
3876
3877 void Heap::InitializeFunction(JSFunction* function,
3878                               SharedFunctionInfo* shared,
3879                               Object* prototype) {
3880   ASSERT(!prototype->IsMap());
3881   function->initialize_properties();
3882   function->initialize_elements();
3883   function->set_shared(shared);
3884   function->set_code(shared->code());
3885   function->set_prototype_or_initial_map(prototype);
3886   function->set_context(undefined_value());
3887   function->set_literals_or_bindings(empty_fixed_array());
3888   function->set_next_function_link(undefined_value());
3889 }
3890
3891
3892 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3893   // Allocate the prototype.  Make sure to use the object function
3894   // from the function's context, since the function can be from a
3895   // different context.
3896   JSFunction* object_function =
3897       function->context()->native_context()->object_function();
3898
3899   // Each function prototype gets a copy of the object function map.
3900   // This avoid unwanted sharing of maps between prototypes of different
3901   // constructors.
3902   Map* new_map;
3903   ASSERT(object_function->has_initial_map());
3904   MaybeObject* maybe_map = object_function->initial_map()->Copy();
3905   if (!maybe_map->To(&new_map)) return maybe_map;
3906
3907   Object* prototype;
3908   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3909   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3910
3911   // When creating the prototype for the function we must set its
3912   // constructor to the function.
3913   MaybeObject* maybe_failure =
3914       JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3915           constructor_symbol(), function, DONT_ENUM);
3916   if (maybe_failure->IsFailure()) return maybe_failure;
3917
3918   return prototype;
3919 }
3920
3921
3922 MaybeObject* Heap::AllocateFunction(Map* function_map,
3923                                     SharedFunctionInfo* shared,
3924                                     Object* prototype,
3925                                     PretenureFlag pretenure) {
3926   AllocationSpace space =
3927       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3928   Object* result;
3929   { MaybeObject* maybe_result = Allocate(function_map, space);
3930     if (!maybe_result->ToObject(&result)) return maybe_result;
3931   }
3932   InitializeFunction(JSFunction::cast(result), shared, prototype);
3933   return result;
3934 }
3935
3936
3937 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3938   // To get fast allocation and map sharing for arguments objects we
3939   // allocate them based on an arguments boilerplate.
3940
3941   JSObject* boilerplate;
3942   int arguments_object_size;
3943   bool strict_mode_callee = callee->IsJSFunction() &&
3944       !JSFunction::cast(callee)->shared()->is_classic_mode();
3945   if (strict_mode_callee) {
3946     boilerplate =
3947         isolate()->context()->native_context()->
3948             strict_mode_arguments_boilerplate();
3949     arguments_object_size = kArgumentsObjectSizeStrict;
3950   } else {
3951     boilerplate =
3952         isolate()->context()->native_context()->arguments_boilerplate();
3953     arguments_object_size = kArgumentsObjectSize;
3954   }
3955
3956   // This calls Copy directly rather than using Heap::AllocateRaw so we
3957   // duplicate the check here.
3958   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3959
3960   // Check that the size of the boilerplate matches our
3961   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3962   // on the size being a known constant.
3963   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3964
3965   // Do the allocation.
3966   Object* result;
3967   { MaybeObject* maybe_result =
3968         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3969     if (!maybe_result->ToObject(&result)) return maybe_result;
3970   }
3971
3972   // Copy the content. The arguments boilerplate doesn't have any
3973   // fields that point to new space so it's safe to skip the write
3974   // barrier here.
3975   CopyBlock(HeapObject::cast(result)->address(),
3976             boilerplate->address(),
3977             JSObject::kHeaderSize);
3978
3979   // Set the length property.
3980   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3981                                                 Smi::FromInt(length),
3982                                                 SKIP_WRITE_BARRIER);
3983   // Set the callee property for non-strict mode arguments object only.
3984   if (!strict_mode_callee) {
3985     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3986                                                   callee);
3987   }
3988
3989   // Check the state of the object
3990   ASSERT(JSObject::cast(result)->HasFastProperties());
3991   ASSERT(JSObject::cast(result)->HasFastObjectElements());
3992
3993   return result;
3994 }
3995
3996
3997 static bool HasDuplicates(DescriptorArray* descriptors) {
3998   int count = descriptors->number_of_descriptors();
3999   if (count > 1) {
4000     String* prev_key = descriptors->GetKey(0);
4001     for (int i = 1; i != count; i++) {
4002       String* current_key = descriptors->GetKey(i);
4003       if (prev_key == current_key) return true;
4004       prev_key = current_key;
4005     }
4006   }
4007   return false;
4008 }
4009
4010
4011 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4012   ASSERT(!fun->has_initial_map());
4013
4014   // First create a new map with the size and number of in-object properties
4015   // suggested by the function.
4016   int instance_size = fun->shared()->CalculateInstanceSize();
4017   int in_object_properties = fun->shared()->CalculateInObjectProperties();
4018   Map* map;
4019   MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
4020   if (!maybe_map->To(&map)) return maybe_map;
4021
4022   // Fetch or allocate prototype.
4023   Object* prototype;
4024   if (fun->has_instance_prototype()) {
4025     prototype = fun->instance_prototype();
4026   } else {
4027     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4028     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4029   }
4030   map->set_inobject_properties(in_object_properties);
4031   map->set_unused_property_fields(in_object_properties);
4032   map->set_prototype(prototype);
4033   ASSERT(map->has_fast_object_elements());
4034
4035   // If the function has only simple this property assignments add
4036   // field descriptors for these to the initial map as the object
4037   // cannot be constructed without having these properties.  Guard by
4038   // the inline_new flag so we only change the map if we generate a
4039   // specialized construct stub.
4040   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4041   if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
4042     int count = fun->shared()->this_property_assignments_count();
4043     if (count > in_object_properties) {
4044       // Inline constructor can only handle inobject properties.
4045       fun->shared()->ForbidInlineConstructor();
4046     } else {
4047       DescriptorArray* descriptors;
4048       MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4049       if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4050
4051       DescriptorArray::WhitenessWitness witness(descriptors);
4052       for (int i = 0; i < count; i++) {
4053         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4054         ASSERT(name->IsSymbol());
4055         FieldDescriptor field(name, i, NONE, i + 1);
4056         descriptors->Set(i, &field, witness);
4057       }
4058       descriptors->Sort();
4059
4060       // The descriptors may contain duplicates because the compiler does not
4061       // guarantee the uniqueness of property names (it would have required
4062       // quadratic time). Once the descriptors are sorted we can check for
4063       // duplicates in linear time.
4064       if (HasDuplicates(descriptors)) {
4065         fun->shared()->ForbidInlineConstructor();
4066       } else {
4067         map->InitializeDescriptors(descriptors);
4068         map->set_pre_allocated_property_fields(count);
4069         map->set_unused_property_fields(in_object_properties - count);
4070       }
4071     }
4072   }
4073
4074   fun->shared()->StartInobjectSlackTracking(map);
4075
4076   return map;
4077 }
4078
4079
4080 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4081                                      FixedArray* properties,
4082                                      Map* map) {
4083   obj->set_properties(properties);
4084   obj->initialize_elements();
4085   // TODO(1240798): Initialize the object's body using valid initial values
4086   // according to the object's initial map.  For example, if the map's
4087   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4088   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4089   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4090   // verification code has to cope with (temporarily) invalid objects.  See
4091   // for example, JSArray::JSArrayVerify).
4092   Object* filler;
4093   // We cannot always fill with one_pointer_filler_map because objects
4094   // created from API functions expect their internal fields to be initialized
4095   // with undefined_value.
4096   // Pre-allocated fields need to be initialized with undefined_value as well
4097   // so that object accesses before the constructor completes (e.g. in the
4098   // debugger) will not cause a crash.
4099   if (map->constructor()->IsJSFunction() &&
4100       JSFunction::cast(map->constructor())->shared()->
4101           IsInobjectSlackTrackingInProgress()) {
4102     // We might want to shrink the object later.
4103     ASSERT(obj->GetInternalFieldCount() == 0);
4104     filler = Heap::one_pointer_filler_map();
4105   } else {
4106     filler = Heap::undefined_value();
4107   }
4108   obj->InitializeBody(map, Heap::undefined_value(), filler);
4109 }
4110
4111
4112 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4113   // JSFunctions should be allocated using AllocateFunction to be
4114   // properly initialized.
4115   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4116
4117   // Both types of global objects should be allocated using
4118   // AllocateGlobalObject to be properly initialized.
4119   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4120   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4121
4122   // Allocate the backing storage for the properties.
4123   int prop_size =
4124       map->pre_allocated_property_fields() +
4125       map->unused_property_fields() -
4126       map->inobject_properties();
4127   ASSERT(prop_size >= 0);
4128   Object* properties;
4129   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4130     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4131   }
4132
4133   // Allocate the JSObject.
4134   AllocationSpace space =
4135       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4136   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4137   Object* obj;
4138   { MaybeObject* maybe_obj = Allocate(map, space);
4139     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4140   }
4141
4142   // Initialize the JSObject.
4143   InitializeJSObjectFromMap(JSObject::cast(obj),
4144                             FixedArray::cast(properties),
4145                             map);
4146   ASSERT(JSObject::cast(obj)->HasFastElements());
4147   return obj;
4148 }
4149
4150
4151 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4152                                     PretenureFlag pretenure) {
4153   // Allocate the initial map if absent.
4154   if (!constructor->has_initial_map()) {
4155     Object* initial_map;
4156     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4157       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4158     }
4159     constructor->set_initial_map(Map::cast(initial_map));
4160     Map::cast(initial_map)->set_constructor(constructor);
4161   }
4162   // Allocate the object based on the constructors initial map.
4163   MaybeObject* result = AllocateJSObjectFromMap(
4164       constructor->initial_map(), pretenure);
4165 #ifdef DEBUG
4166   // Make sure result is NOT a global object if valid.
4167   Object* non_failure;
4168   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4169 #endif
4170   return result;
4171 }
4172
4173
4174 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4175   // Allocate a fresh map. Modules do not have a prototype.
4176   Map* map;
4177   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4178   if (!maybe_map->To(&map)) return maybe_map;
4179   // Allocate the object based on the map.
4180   JSModule* module;
4181   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4182   if (!maybe_module->To(&module)) return maybe_module;
4183   module->set_context(context);
4184   module->set_scope_info(scope_info);
4185   return module;
4186 }
4187
4188
4189 MaybeObject* Heap::AllocateJSArrayAndStorage(
4190     ElementsKind elements_kind,
4191     int length,
4192     int capacity,
4193     ArrayStorageAllocationMode mode,
4194     PretenureFlag pretenure) {
4195   ASSERT(capacity >= length);
4196   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4197   JSArray* array;
4198   if (!maybe_array->To(&array)) return maybe_array;
4199
4200   if (capacity == 0) {
4201     array->set_length(Smi::FromInt(0));
4202     array->set_elements(empty_fixed_array());
4203     return array;
4204   }
4205
4206   FixedArrayBase* elms;
4207   MaybeObject* maybe_elms = NULL;
4208   if (IsFastDoubleElementsKind(elements_kind)) {
4209     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4210       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4211     } else {
4212       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4213       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4214     }
4215   } else {
4216     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4217     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4218       maybe_elms = AllocateUninitializedFixedArray(capacity);
4219     } else {
4220       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4221       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4222     }
4223   }
4224   if (!maybe_elms->To(&elms)) return maybe_elms;
4225
4226   array->set_elements(elms);
4227   array->set_length(Smi::FromInt(length));
4228   return array;
4229 }
4230
4231
4232 MaybeObject* Heap::AllocateJSArrayWithElements(
4233     FixedArrayBase* elements,
4234     ElementsKind elements_kind,
4235     int length,
4236     PretenureFlag pretenure) {
4237   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4238   JSArray* array;
4239   if (!maybe_array->To(&array)) return maybe_array;
4240
4241   array->set_elements(elements);
4242   array->set_length(Smi::FromInt(length));
4243   array->ValidateElements();
4244   return array;
4245 }
4246
4247
4248 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4249   // Allocate map.
4250   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4251   // maps. Will probably depend on the identity of the handler object, too.
4252   Map* map;
4253   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4254   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4255   map->set_prototype(prototype);
4256
4257   // Allocate the proxy object.
4258   JSProxy* result;
4259   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4260   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4261   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4262   result->set_handler(handler);
4263   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4264   return result;
4265 }
4266
4267
4268 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4269                                            Object* call_trap,
4270                                            Object* construct_trap,
4271                                            Object* prototype) {
4272   // Allocate map.
4273   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4274   // maps. Will probably depend on the identity of the handler object, too.
4275   Map* map;
4276   MaybeObject* maybe_map_obj =
4277       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4278   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4279   map->set_prototype(prototype);
4280
4281   // Allocate the proxy object.
4282   JSFunctionProxy* result;
4283   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4284   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4285   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4286   result->set_handler(handler);
4287   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4288   result->set_call_trap(call_trap);
4289   result->set_construct_trap(construct_trap);
4290   return result;
4291 }
4292
4293
4294 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4295   ASSERT(constructor->has_initial_map());
4296   Map* map = constructor->initial_map();
4297   ASSERT(map->is_dictionary_map());
4298
4299   // Make sure no field properties are described in the initial map.
4300   // This guarantees us that normalizing the properties does not
4301   // require us to change property values to JSGlobalPropertyCells.
4302   ASSERT(map->NextFreePropertyIndex() == 0);
4303
4304   // Make sure we don't have a ton of pre-allocated slots in the
4305   // global objects. They will be unused once we normalize the object.
4306   ASSERT(map->unused_property_fields() == 0);
4307   ASSERT(map->inobject_properties() == 0);
4308
4309   // Initial size of the backing store to avoid resize of the storage during
4310   // bootstrapping. The size differs between the JS global object ad the
4311   // builtins object.
4312   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4313
4314   // Allocate a dictionary object for backing storage.
4315   StringDictionary* dictionary;
4316   MaybeObject* maybe_dictionary =
4317       StringDictionary::Allocate(
4318           map->NumberOfOwnDescriptors() * 2 + initial_size);
4319   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4320
4321   // The global object might be created from an object template with accessors.
4322   // Fill these accessors into the dictionary.
4323   DescriptorArray* descs = map->instance_descriptors();
4324   for (int i = 0; i < descs->number_of_descriptors(); i++) {
4325     PropertyDetails details = descs->GetDetails(i);
4326     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4327     PropertyDetails d = PropertyDetails(details.attributes(),
4328                                         CALLBACKS,
4329                                         details.descriptor_index());
4330     Object* value = descs->GetCallbacksObject(i);
4331     MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4332     if (!maybe_value->ToObject(&value)) return maybe_value;
4333
4334     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4335     if (!maybe_added->To(&dictionary)) return maybe_added;
4336   }
4337
4338   // Allocate the global object and initialize it with the backing store.
4339   JSObject* global;
4340   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4341   if (!maybe_global->To(&global)) return maybe_global;
4342
4343   InitializeJSObjectFromMap(global, dictionary, map);
4344
4345   // Create a new map for the global object.
4346   Map* new_map;
4347   MaybeObject* maybe_map = map->CopyDropDescriptors();
4348   if (!maybe_map->To(&new_map)) return maybe_map;
4349   new_map->set_dictionary_map(true);
4350
4351   // Set up the global object as a normalized object.
4352   global->set_map(new_map);
4353   global->set_properties(dictionary);
4354
4355   // Make sure result is a global object with properties in dictionary.
4356   ASSERT(global->IsGlobalObject());
4357   ASSERT(!global->HasFastProperties());
4358   return global;
4359 }
4360
4361
4362 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4363   // Never used to copy functions.  If functions need to be copied we
4364   // have to be careful to clear the literals array.
4365   SLOW_ASSERT(!source->IsJSFunction());
4366
4367   // Make the clone.
4368   Map* map = source->map();
4369   int object_size = map->instance_size();
4370   Object* clone;
4371
4372   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4373
4374   // If we're forced to always allocate, we use the general allocation
4375   // functions which may leave us with an object in old space.
4376   if (always_allocate()) {
4377     { MaybeObject* maybe_clone =
4378           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4379       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4380     }
4381     Address clone_address = HeapObject::cast(clone)->address();
4382     CopyBlock(clone_address,
4383               source->address(),
4384               object_size);
4385     // Update write barrier for all fields that lie beyond the header.
4386     RecordWrites(clone_address,
4387                  JSObject::kHeaderSize,
4388                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4389   } else {
4390     wb_mode = SKIP_WRITE_BARRIER;
4391     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4392       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4393     }
4394     SLOW_ASSERT(InNewSpace(clone));
4395     // Since we know the clone is allocated in new space, we can copy
4396     // the contents without worrying about updating the write barrier.
4397     CopyBlock(HeapObject::cast(clone)->address(),
4398               source->address(),
4399               object_size);
4400   }
4401
4402   SLOW_ASSERT(
4403       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4404   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4405   FixedArray* properties = FixedArray::cast(source->properties());
4406   // Update elements if necessary.
4407   if (elements->length() > 0) {
4408     Object* elem;
4409     { MaybeObject* maybe_elem;
4410       if (elements->map() == fixed_cow_array_map()) {
4411         maybe_elem = FixedArray::cast(elements);
4412       } else if (source->HasFastDoubleElements()) {
4413         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4414       } else {
4415         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4416       }
4417       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4418     }
4419     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4420   }
4421   // Update properties if necessary.
4422   if (properties->length() > 0) {
4423     Object* prop;
4424     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4425       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4426     }
4427     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4428   }
4429   // Return the new clone.
4430   return clone;
4431 }
4432
4433
4434 MaybeObject* Heap::ReinitializeJSReceiver(
4435     JSReceiver* object, InstanceType type, int size) {
4436   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4437
4438   // Allocate fresh map.
4439   // TODO(rossberg): Once we optimize proxies, cache these maps.
4440   Map* map;
4441   MaybeObject* maybe = AllocateMap(type, size);
4442   if (!maybe->To<Map>(&map)) return maybe;
4443
4444   // Check that the receiver has at least the size of the fresh object.
4445   int size_difference = object->map()->instance_size() - map->instance_size();
4446   ASSERT(size_difference >= 0);
4447
4448   map->set_prototype(object->map()->prototype());
4449
4450   // Allocate the backing storage for the properties.
4451   int prop_size = map->unused_property_fields() - map->inobject_properties();
4452   Object* properties;
4453   maybe = AllocateFixedArray(prop_size, TENURED);
4454   if (!maybe->ToObject(&properties)) return maybe;
4455
4456   // Functions require some allocation, which might fail here.
4457   SharedFunctionInfo* shared = NULL;
4458   if (type == JS_FUNCTION_TYPE) {
4459     String* name;
4460     maybe = LookupAsciiSymbol("<freezing call trap>");
4461     if (!maybe->To<String>(&name)) return maybe;
4462     maybe = AllocateSharedFunctionInfo(name);
4463     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4464   }
4465
4466   // Because of possible retries of this function after failure,
4467   // we must NOT fail after this point, where we have changed the type!
4468
4469   // Reset the map for the object.
4470   object->set_map(map);
4471   JSObject* jsobj = JSObject::cast(object);
4472
4473   // Reinitialize the object from the constructor map.
4474   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4475
4476   // Functions require some minimal initialization.
4477   if (type == JS_FUNCTION_TYPE) {
4478     map->set_function_with_prototype(true);
4479     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4480     JSFunction::cast(object)->set_context(
4481         isolate()->context()->native_context());
4482   }
4483
4484   // Put in filler if the new object is smaller than the old.
4485   if (size_difference > 0) {
4486     CreateFillerObjectAt(
4487         object->address() + map->instance_size(), size_difference);
4488   }
4489
4490   return object;
4491 }
4492
4493
4494 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4495                                              JSGlobalProxy* object) {
4496   ASSERT(constructor->has_initial_map());
4497   Map* map = constructor->initial_map();
4498
4499   // Check that the already allocated object has the same size and type as
4500   // objects allocated using the constructor.
4501   ASSERT(map->instance_size() == object->map()->instance_size());
4502   ASSERT(map->instance_type() == object->map()->instance_type());
4503
4504   // Allocate the backing storage for the properties.
4505   int prop_size = map->unused_property_fields() - map->inobject_properties();
4506   Object* properties;
4507   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4508     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4509   }
4510
4511   // Reset the map for the object.
4512   object->set_map(constructor->initial_map());
4513
4514   // Reinitialize the object from the constructor map.
4515   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4516   return object;
4517 }
4518
4519
4520 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
4521                                            PretenureFlag pretenure) {
4522   int length = string.length();
4523   if (length == 1) {
4524     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4525   }
4526   Object* result;
4527   { MaybeObject* maybe_result =
4528         AllocateRawOneByteString(string.length(), pretenure);
4529     if (!maybe_result->ToObject(&result)) return maybe_result;
4530   }
4531
4532   // Copy the characters into the new object.
4533   CopyChars(SeqOneByteString::cast(result)->GetChars(), string.start(), length);
4534   return result;
4535 }
4536
4537
4538 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4539                                               int non_ascii_start,
4540                                               PretenureFlag pretenure) {
4541   // Continue counting the number of characters in the UTF-8 string, starting
4542   // from the first non-ascii character or word.
4543   int chars = non_ascii_start;
4544   Access<UnicodeCache::Utf8Decoder>
4545       decoder(isolate_->unicode_cache()->utf8_decoder());
4546   decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
4547   while (decoder->has_more()) {
4548     uint32_t r = decoder->GetNext();
4549     if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
4550       chars++;
4551     } else {
4552       chars += 2;
4553     }
4554   }
4555
4556   Object* result;
4557   { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4558     if (!maybe_result->ToObject(&result)) return maybe_result;
4559   }
4560
4561   // Convert and copy the characters into the new object.
4562   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4563   decoder->Reset(string.start(), string.length());
4564   int i = 0;
4565   while (i < chars) {
4566     uint32_t r = decoder->GetNext();
4567     if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4568       twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
4569       twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
4570     } else {
4571       twobyte->SeqTwoByteStringSet(i++, r);
4572     }
4573   }
4574   return result;
4575 }
4576
4577
4578 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4579                                              PretenureFlag pretenure) {
4580   // Check if the string is an ASCII string.
4581   Object* result;
4582   int length = string.length();
4583   const uc16* start = string.start();
4584
4585   if (String::IsAscii(start, length)) {
4586     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4587     if (!maybe_result->ToObject(&result)) return maybe_result;
4588     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4589   } else {  // It's not an ASCII string.
4590     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4591     if (!maybe_result->ToObject(&result)) return maybe_result;
4592     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4593   }
4594   return result;
4595 }
4596
4597
4598 Map* Heap::SymbolMapForString(String* string) {
4599   // If the string is in new space it cannot be used as a symbol.
4600   if (InNewSpace(string)) return NULL;
4601
4602   // Find the corresponding symbol map for strings.
4603   switch (string->map()->instance_type()) {
4604     case STRING_TYPE: return symbol_map();
4605     case ASCII_STRING_TYPE: return ascii_symbol_map();
4606     case CONS_STRING_TYPE: return cons_symbol_map();
4607     case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4608     case EXTERNAL_STRING_TYPE: return external_symbol_map();
4609     case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4610     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4611       return external_symbol_with_ascii_data_map();
4612     case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4613     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4614       return short_external_ascii_symbol_map();
4615     case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4616       return short_external_symbol_with_ascii_data_map();
4617     default: return NULL;  // No match found.
4618   }
4619 }
4620
4621
4622 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4623                                           int chars,
4624                                           uint32_t hash_field) {
4625   ASSERT(chars >= 0);
4626   // Ensure the chars matches the number of characters in the buffer.
4627   ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
4628   // Determine whether the string is ASCII.
4629   bool is_ascii = true;
4630   while (buffer->has_more()) {
4631     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4632       is_ascii = false;
4633       break;
4634     }
4635   }
4636   buffer->Rewind();
4637
4638   // Compute map and object size.
4639   int size;
4640   Map* map;
4641
4642   if (is_ascii) {
4643     if (chars > SeqOneByteString::kMaxLength) {
4644       return Failure::OutOfMemoryException();
4645     }
4646     map = ascii_symbol_map();
4647     size = SeqOneByteString::SizeFor(chars);
4648   } else {
4649     if (chars > SeqTwoByteString::kMaxLength) {
4650       return Failure::OutOfMemoryException();
4651     }
4652     map = symbol_map();
4653     size = SeqTwoByteString::SizeFor(chars);
4654   }
4655
4656   // Allocate string.
4657   Object* result;
4658   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4659                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4660                    : old_data_space_->AllocateRaw(size);
4661     if (!maybe_result->ToObject(&result)) return maybe_result;
4662   }
4663
4664   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4665   // Set length and hash fields of the allocated string.
4666   String* answer = String::cast(result);
4667   answer->set_length(chars);
4668   answer->set_hash_field(hash_field);
4669
4670   ASSERT_EQ(size, answer->Size());
4671
4672   // Fill in the characters.
4673   int i = 0;
4674   while (i < chars) {
4675     uint32_t character = buffer->GetNext();
4676     if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4677       answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
4678       answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
4679     } else {
4680       answer->Set(i++, character);
4681     }
4682   }
4683   return answer;
4684 }
4685
4686
4687 MaybeObject* Heap::AllocateRawOneByteString(int length,
4688                                             PretenureFlag pretenure) {
4689   if (length < 0 || length > SeqOneByteString::kMaxLength) {
4690     return Failure::OutOfMemoryException();
4691   }
4692
4693   int size = SeqOneByteString::SizeFor(length);
4694   ASSERT(size <= SeqOneByteString::kMaxSize);
4695
4696   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4697   AllocationSpace retry_space = OLD_DATA_SPACE;
4698
4699   if (space == NEW_SPACE) {
4700     if (size > kMaxObjectSizeInNewSpace) {
4701       // Allocate in large object space, retry space will be ignored.
4702       space = LO_SPACE;
4703     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4704       // Allocate in new space, retry in large object space.
4705       retry_space = LO_SPACE;
4706     }
4707   } else if (space == OLD_DATA_SPACE &&
4708              size > Page::kMaxNonCodeHeapObjectSize) {
4709     space = LO_SPACE;
4710   }
4711   Object* result;
4712   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4713     if (!maybe_result->ToObject(&result)) return maybe_result;
4714   }
4715
4716   // Partially initialize the object.
4717   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4718   String::cast(result)->set_length(length);
4719   String::cast(result)->set_hash_field(String::kEmptyHashField);
4720   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4721
4722 #ifdef VERIFY_HEAP
4723   if (FLAG_verify_heap) {
4724     // Initialize string's content to ensure ASCII-ness (character range 0-127)
4725     // as required when verifying the heap.
4726     char* dest = SeqOneByteString::cast(result)->GetChars();
4727     memset(dest, 0x0F, length * kCharSize);
4728   }
4729 #endif
4730
4731   return result;
4732 }
4733
4734
4735 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4736                                             PretenureFlag pretenure) {
4737   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4738     return Failure::OutOfMemoryException();
4739   }
4740   int size = SeqTwoByteString::SizeFor(length);
4741   ASSERT(size <= SeqTwoByteString::kMaxSize);
4742   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4743   AllocationSpace retry_space = OLD_DATA_SPACE;
4744
4745   if (space == NEW_SPACE) {
4746     if (size > kMaxObjectSizeInNewSpace) {
4747       // Allocate in large object space, retry space will be ignored.
4748       space = LO_SPACE;
4749     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4750       // Allocate in new space, retry in large object space.
4751       retry_space = LO_SPACE;
4752     }
4753   } else if (space == OLD_DATA_SPACE &&
4754              size > Page::kMaxNonCodeHeapObjectSize) {
4755     space = LO_SPACE;
4756   }
4757   Object* result;
4758   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4759     if (!maybe_result->ToObject(&result)) return maybe_result;
4760   }
4761
4762   // Partially initialize the object.
4763   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4764   String::cast(result)->set_length(length);
4765   String::cast(result)->set_hash_field(String::kEmptyHashField);
4766   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4767   return result;
4768 }
4769
4770
4771 MaybeObject* Heap::AllocateJSArray(
4772     ElementsKind elements_kind,
4773     PretenureFlag pretenure) {
4774   Context* native_context = isolate()->context()->native_context();
4775   JSFunction* array_function = native_context->array_function();
4776   Map* map = array_function->initial_map();
4777   Object* maybe_map_array = native_context->js_array_maps();
4778   if (!maybe_map_array->IsUndefined()) {
4779     Object* maybe_transitioned_map =
4780         FixedArray::cast(maybe_map_array)->get(elements_kind);
4781     if (!maybe_transitioned_map->IsUndefined()) {
4782       map = Map::cast(maybe_transitioned_map);
4783     }
4784   }
4785
4786   return AllocateJSObjectFromMap(map, pretenure);
4787 }
4788
4789
4790 MaybeObject* Heap::AllocateEmptyFixedArray() {
4791   int size = FixedArray::SizeFor(0);
4792   Object* result;
4793   { MaybeObject* maybe_result =
4794         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4795     if (!maybe_result->ToObject(&result)) return maybe_result;
4796   }
4797   // Initialize the object.
4798   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4799       fixed_array_map());
4800   reinterpret_cast<FixedArray*>(result)->set_length(0);
4801   return result;
4802 }
4803
4804
4805 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4806   if (length < 0 || length > FixedArray::kMaxLength) {
4807     return Failure::OutOfMemoryException();
4808   }
4809   ASSERT(length > 0);
4810   // Use the general function if we're forced to always allocate.
4811   if (always_allocate()) return AllocateFixedArray(length, TENURED);
4812   // Allocate the raw data for a fixed array.
4813   int size = FixedArray::SizeFor(length);
4814   return size <= kMaxObjectSizeInNewSpace
4815       ? new_space_.AllocateRaw(size)
4816       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4817 }
4818
4819
4820 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4821   int len = src->length();
4822   Object* obj;
4823   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4824     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4825   }
4826   if (InNewSpace(obj)) {
4827     HeapObject* dst = HeapObject::cast(obj);
4828     dst->set_map_no_write_barrier(map);
4829     CopyBlock(dst->address() + kPointerSize,
4830               src->address() + kPointerSize,
4831               FixedArray::SizeFor(len) - kPointerSize);
4832     return obj;
4833   }
4834   HeapObject::cast(obj)->set_map_no_write_barrier(map);
4835   FixedArray* result = FixedArray::cast(obj);
4836   result->set_length(len);
4837
4838   // Copy the content
4839   AssertNoAllocation no_gc;
4840   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4841   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4842   return result;
4843 }
4844
4845
4846 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4847                                                Map* map) {
4848   int len = src->length();
4849   Object* obj;
4850   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4851     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4852   }
4853   HeapObject* dst = HeapObject::cast(obj);
4854   dst->set_map_no_write_barrier(map);
4855   CopyBlock(
4856       dst->address() + FixedDoubleArray::kLengthOffset,
4857       src->address() + FixedDoubleArray::kLengthOffset,
4858       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4859   return obj;
4860 }
4861
4862
4863 MaybeObject* Heap::AllocateFixedArray(int length) {
4864   ASSERT(length >= 0);
4865   if (length == 0) return empty_fixed_array();
4866   Object* result;
4867   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4868     if (!maybe_result->ToObject(&result)) return maybe_result;
4869   }
4870   // Initialize header.
4871   FixedArray* array = reinterpret_cast<FixedArray*>(result);
4872   array->set_map_no_write_barrier(fixed_array_map());
4873   array->set_length(length);
4874   // Initialize body.
4875   ASSERT(!InNewSpace(undefined_value()));
4876   MemsetPointer(array->data_start(), undefined_value(), length);
4877   return result;
4878 }
4879
4880
4881 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4882   if (length < 0 || length > FixedArray::kMaxLength) {
4883     return Failure::OutOfMemoryException();
4884   }
4885
4886   AllocationSpace space =
4887       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4888   int size = FixedArray::SizeFor(length);
4889   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4890     // Too big for new space.
4891     space = LO_SPACE;
4892   } else if (space == OLD_POINTER_SPACE &&
4893              size > Page::kMaxNonCodeHeapObjectSize) {
4894     // Too big for old pointer space.
4895     space = LO_SPACE;
4896   }
4897
4898   AllocationSpace retry_space =
4899       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4900
4901   return AllocateRaw(size, space, retry_space);
4902 }
4903
4904
4905 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4906     Heap* heap,
4907     int length,
4908     PretenureFlag pretenure,
4909     Object* filler) {
4910   ASSERT(length >= 0);
4911   ASSERT(heap->empty_fixed_array()->IsFixedArray());
4912   if (length == 0) return heap->empty_fixed_array();
4913
4914   ASSERT(!heap->InNewSpace(filler));
4915   Object* result;
4916   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4917     if (!maybe_result->ToObject(&result)) return maybe_result;
4918   }
4919
4920   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4921   FixedArray* array = FixedArray::cast(result);
4922   array->set_length(length);
4923   MemsetPointer(array->data_start(), filler, length);
4924   return array;
4925 }
4926
4927
4928 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4929   return AllocateFixedArrayWithFiller(this,
4930                                       length,
4931                                       pretenure,
4932                                       undefined_value());
4933 }
4934
4935
4936 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4937                                                PretenureFlag pretenure) {
4938   return AllocateFixedArrayWithFiller(this,
4939                                       length,
4940                                       pretenure,
4941                                       the_hole_value());
4942 }
4943
4944
4945 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4946   if (length == 0) return empty_fixed_array();
4947
4948   Object* obj;
4949   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4950     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4951   }
4952
4953   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4954       fixed_array_map());
4955   FixedArray::cast(obj)->set_length(length);
4956   return obj;
4957 }
4958
4959
4960 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4961   int size = FixedDoubleArray::SizeFor(0);
4962   Object* result;
4963   { MaybeObject* maybe_result =
4964         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4965     if (!maybe_result->ToObject(&result)) return maybe_result;
4966   }
4967   // Initialize the object.
4968   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4969       fixed_double_array_map());
4970   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4971   return result;
4972 }
4973
4974
4975 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4976     int length,
4977     PretenureFlag pretenure) {
4978   if (length == 0) return empty_fixed_array();
4979
4980   Object* elements_object;
4981   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4982   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4983   FixedDoubleArray* elements =
4984       reinterpret_cast<FixedDoubleArray*>(elements_object);
4985
4986   elements->set_map_no_write_barrier(fixed_double_array_map());
4987   elements->set_length(length);
4988   return elements;
4989 }
4990
4991
4992 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
4993     int length,
4994     PretenureFlag pretenure) {
4995   if (length == 0) return empty_fixed_array();
4996
4997   Object* elements_object;
4998   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4999   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5000   FixedDoubleArray* elements =
5001       reinterpret_cast<FixedDoubleArray*>(elements_object);
5002
5003   for (int i = 0; i < length; ++i) {
5004     elements->set_the_hole(i);
5005   }
5006
5007   elements->set_map_no_write_barrier(fixed_double_array_map());
5008   elements->set_length(length);
5009   return elements;
5010 }
5011
5012
5013 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5014                                                PretenureFlag pretenure) {
5015   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5016     return Failure::OutOfMemoryException();
5017   }
5018
5019   AllocationSpace space =
5020       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5021   int size = FixedDoubleArray::SizeFor(length);
5022
5023 #ifndef V8_HOST_ARCH_64_BIT
5024   size += kPointerSize;
5025 #endif
5026
5027   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5028     // Too big for new space.
5029     space = LO_SPACE;
5030   } else if (space == OLD_DATA_SPACE &&
5031              size > Page::kMaxNonCodeHeapObjectSize) {
5032     // Too big for old data space.
5033     space = LO_SPACE;
5034   }
5035
5036   AllocationSpace retry_space =
5037       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5038
5039   HeapObject* object;
5040   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5041     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5042   }
5043
5044   return EnsureDoubleAligned(this, object, size);
5045 }
5046
5047
5048 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5049   Object* result;
5050   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5051     if (!maybe_result->ToObject(&result)) return maybe_result;
5052   }
5053   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5054       hash_table_map());
5055   ASSERT(result->IsHashTable());
5056   return result;
5057 }
5058
5059
5060 MaybeObject* Heap::AllocateNativeContext() {
5061   Object* result;
5062   { MaybeObject* maybe_result =
5063         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5064     if (!maybe_result->ToObject(&result)) return maybe_result;
5065   }
5066   Context* context = reinterpret_cast<Context*>(result);
5067   context->set_map_no_write_barrier(native_context_map());
5068   context->set_js_array_maps(undefined_value());
5069   ASSERT(context->IsNativeContext());
5070   ASSERT(result->IsContext());
5071   return result;
5072 }
5073
5074
5075 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5076                                          ScopeInfo* scope_info) {
5077   Object* result;
5078   { MaybeObject* maybe_result =
5079         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5080     if (!maybe_result->ToObject(&result)) return maybe_result;
5081   }
5082   Context* context = reinterpret_cast<Context*>(result);
5083   context->set_map_no_write_barrier(global_context_map());
5084   context->set_closure(function);
5085   context->set_previous(function->context());
5086   context->set_extension(scope_info);
5087   context->set_global_object(function->context()->global_object());
5088   ASSERT(context->IsGlobalContext());
5089   ASSERT(result->IsContext());
5090   return context;
5091 }
5092
5093
5094 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5095   Object* result;
5096   { MaybeObject* maybe_result =
5097         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5098     if (!maybe_result->ToObject(&result)) return maybe_result;
5099   }
5100   Context* context = reinterpret_cast<Context*>(result);
5101   context->set_map_no_write_barrier(module_context_map());
5102   // Instance link will be set later.
5103   context->set_extension(Smi::FromInt(0));
5104   return context;
5105 }
5106
5107
5108 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5109   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5110   Object* result;
5111   { MaybeObject* maybe_result = AllocateFixedArray(length);
5112     if (!maybe_result->ToObject(&result)) return maybe_result;
5113   }
5114   Context* context = reinterpret_cast<Context*>(result);
5115   context->set_map_no_write_barrier(function_context_map());
5116   context->set_closure(function);
5117   context->set_previous(function->context());
5118   context->set_extension(Smi::FromInt(0));
5119   context->set_global_object(function->context()->global_object());
5120   return context;
5121 }
5122
5123
5124 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5125                                         Context* previous,
5126                                         String* name,
5127                                         Object* thrown_object) {
5128   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5129   Object* result;
5130   { MaybeObject* maybe_result =
5131         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5132     if (!maybe_result->ToObject(&result)) return maybe_result;
5133   }
5134   Context* context = reinterpret_cast<Context*>(result);
5135   context->set_map_no_write_barrier(catch_context_map());
5136   context->set_closure(function);
5137   context->set_previous(previous);
5138   context->set_extension(name);
5139   context->set_global_object(previous->global_object());
5140   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5141   return context;
5142 }
5143
5144
5145 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5146                                        Context* previous,
5147                                        JSObject* extension) {
5148   Object* result;
5149   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5150     if (!maybe_result->ToObject(&result)) return maybe_result;
5151   }
5152   Context* context = reinterpret_cast<Context*>(result);
5153   context->set_map_no_write_barrier(with_context_map());
5154   context->set_closure(function);
5155   context->set_previous(previous);
5156   context->set_extension(extension);
5157   context->set_global_object(previous->global_object());
5158   return context;
5159 }
5160
5161
5162 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5163                                         Context* previous,
5164                                         ScopeInfo* scope_info) {
5165   Object* result;
5166   { MaybeObject* maybe_result =
5167         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5168     if (!maybe_result->ToObject(&result)) return maybe_result;
5169   }
5170   Context* context = reinterpret_cast<Context*>(result);
5171   context->set_map_no_write_barrier(block_context_map());
5172   context->set_closure(function);
5173   context->set_previous(previous);
5174   context->set_extension(scope_info);
5175   context->set_global_object(previous->global_object());
5176   return context;
5177 }
5178
5179
5180 MaybeObject* Heap::AllocateScopeInfo(int length) {
5181   FixedArray* scope_info;
5182   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5183   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5184   scope_info->set_map_no_write_barrier(scope_info_map());
5185   return scope_info;
5186 }
5187
5188
5189 MaybeObject* Heap::AllocateExternal(void* value) {
5190   Foreign* foreign;
5191   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5192     if (!maybe_result->To(&foreign)) return maybe_result;
5193   }
5194   JSObject* external;
5195   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5196     if (!maybe_result->To(&external)) return maybe_result;
5197   }
5198   external->SetInternalField(0, foreign);
5199   return external;
5200 }
5201
5202
5203 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5204   Map* map;
5205   switch (type) {
5206 #define MAKE_CASE(NAME, Name, name) \
5207     case NAME##_TYPE: map = name##_map(); break;
5208 STRUCT_LIST(MAKE_CASE)
5209 #undef MAKE_CASE
5210     default:
5211       UNREACHABLE();
5212       return Failure::InternalError();
5213   }
5214   int size = map->instance_size();
5215   AllocationSpace space =
5216       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5217   Object* result;
5218   { MaybeObject* maybe_result = Allocate(map, space);
5219     if (!maybe_result->ToObject(&result)) return maybe_result;
5220   }
5221   Struct::cast(result)->InitializeBody(size);
5222   return result;
5223 }
5224
5225
5226 bool Heap::IsHeapIterable() {
5227   return (!old_pointer_space()->was_swept_conservatively() &&
5228           !old_data_space()->was_swept_conservatively());
5229 }
5230
5231
5232 void Heap::EnsureHeapIsIterable() {
5233   ASSERT(IsAllocationAllowed());
5234   if (!IsHeapIterable()) {
5235     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5236   }
5237   ASSERT(IsHeapIterable());
5238 }
5239
5240
5241 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5242   incremental_marking()->Step(step_size,
5243                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5244
5245   if (incremental_marking()->IsComplete()) {
5246     bool uncommit = false;
5247     if (gc_count_at_last_idle_gc_ == gc_count_) {
5248       // No GC since the last full GC, the mutator is probably not active.
5249       isolate_->compilation_cache()->Clear();
5250       uncommit = true;
5251     }
5252     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5253     gc_count_at_last_idle_gc_ = gc_count_;
5254     if (uncommit) {
5255       new_space_.Shrink();
5256       UncommitFromSpace();
5257     }
5258   }
5259 }
5260
5261
5262 bool Heap::IdleNotification(int hint) {
5263   // Hints greater than this value indicate that
5264   // the embedder is requesting a lot of GC work.
5265   const int kMaxHint = 1000;
5266   // Minimal hint that allows to do full GC.
5267   const int kMinHintForFullGC = 100;
5268   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5269   // The size factor is in range [5..250]. The numbers here are chosen from
5270   // experiments. If you changes them, make sure to test with
5271   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5272   intptr_t step_size =
5273       size_factor * IncrementalMarking::kAllocatedThreshold;
5274
5275   if (contexts_disposed_ > 0) {
5276     if (hint >= kMaxHint) {
5277       // The embedder is requesting a lot of GC work after context disposal,
5278       // we age inline caches so that they don't keep objects from
5279       // the old context alive.
5280       AgeInlineCaches();
5281     }
5282     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5283     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5284         incremental_marking()->IsStopped()) {
5285       HistogramTimerScope scope(isolate_->counters()->gc_context());
5286       CollectAllGarbage(kReduceMemoryFootprintMask,
5287                         "idle notification: contexts disposed");
5288     } else {
5289       AdvanceIdleIncrementalMarking(step_size);
5290       contexts_disposed_ = 0;
5291     }
5292     // After context disposal there is likely a lot of garbage remaining, reset
5293     // the idle notification counters in order to trigger more incremental GCs
5294     // on subsequent idle notifications.
5295     StartIdleRound();
5296     return false;
5297   }
5298
5299   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5300     return IdleGlobalGC();
5301   }
5302
5303   // By doing small chunks of GC work in each IdleNotification,
5304   // perform a round of incremental GCs and after that wait until
5305   // the mutator creates enough garbage to justify a new round.
5306   // An incremental GC progresses as follows:
5307   // 1. many incremental marking steps,
5308   // 2. one old space mark-sweep-compact,
5309   // 3. many lazy sweep steps.
5310   // Use mark-sweep-compact events to count incremental GCs in a round.
5311
5312
5313   if (incremental_marking()->IsStopped()) {
5314     if (!IsSweepingComplete() &&
5315         !AdvanceSweepers(static_cast<int>(step_size))) {
5316       return false;
5317     }
5318   }
5319
5320   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5321     if (EnoughGarbageSinceLastIdleRound()) {
5322       StartIdleRound();
5323     } else {
5324       return true;
5325     }
5326   }
5327
5328   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5329   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5330   ms_count_at_last_idle_notification_ = ms_count_;
5331
5332   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5333                               mark_sweeps_since_idle_round_started_;
5334
5335   if (remaining_mark_sweeps <= 0) {
5336     FinishIdleRound();
5337     return true;
5338   }
5339
5340   if (incremental_marking()->IsStopped()) {
5341     // If there are no more than two GCs left in this idle round and we are
5342     // allowed to do a full GC, then make those GCs full in order to compact
5343     // the code space.
5344     // TODO(ulan): Once we enable code compaction for incremental marking,
5345     // we can get rid of this special case and always start incremental marking.
5346     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5347       CollectAllGarbage(kReduceMemoryFootprintMask,
5348                         "idle notification: finalize idle round");
5349     } else {
5350       incremental_marking()->Start();
5351     }
5352   }
5353   if (!incremental_marking()->IsStopped()) {
5354     AdvanceIdleIncrementalMarking(step_size);
5355   }
5356   return false;
5357 }
5358
5359
5360 bool Heap::IdleGlobalGC() {
5361   static const int kIdlesBeforeScavenge = 4;
5362   static const int kIdlesBeforeMarkSweep = 7;
5363   static const int kIdlesBeforeMarkCompact = 8;
5364   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5365   static const unsigned int kGCsBetweenCleanup = 4;
5366
5367   if (!last_idle_notification_gc_count_init_) {
5368     last_idle_notification_gc_count_ = gc_count_;
5369     last_idle_notification_gc_count_init_ = true;
5370   }
5371
5372   bool uncommit = true;
5373   bool finished = false;
5374
5375   // Reset the number of idle notifications received when a number of
5376   // GCs have taken place. This allows another round of cleanup based
5377   // on idle notifications if enough work has been carried out to
5378   // provoke a number of garbage collections.
5379   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5380     number_idle_notifications_ =
5381         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5382   } else {
5383     number_idle_notifications_ = 0;
5384     last_idle_notification_gc_count_ = gc_count_;
5385   }
5386
5387   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5388     CollectGarbage(NEW_SPACE, "idle notification");
5389     new_space_.Shrink();
5390     last_idle_notification_gc_count_ = gc_count_;
5391   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5392     // Before doing the mark-sweep collections we clear the
5393     // compilation cache to avoid hanging on to source code and
5394     // generated code for cached functions.
5395     isolate_->compilation_cache()->Clear();
5396
5397     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5398     new_space_.Shrink();
5399     last_idle_notification_gc_count_ = gc_count_;
5400
5401   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5402     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5403     new_space_.Shrink();
5404     last_idle_notification_gc_count_ = gc_count_;
5405     number_idle_notifications_ = 0;
5406     finished = true;
5407   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5408     // If we have received more than kIdlesBeforeMarkCompact idle
5409     // notifications we do not perform any cleanup because we don't
5410     // expect to gain much by doing so.
5411     finished = true;
5412   }
5413
5414   if (uncommit) UncommitFromSpace();
5415
5416   return finished;
5417 }
5418
5419
5420 #ifdef DEBUG
5421
5422 void Heap::Print() {
5423   if (!HasBeenSetUp()) return;
5424   isolate()->PrintStack();
5425   AllSpaces spaces;
5426   for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5427     space->Print();
5428 }
5429
5430
5431 void Heap::ReportCodeStatistics(const char* title) {
5432   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5433   PagedSpace::ResetCodeStatistics();
5434   // We do not look for code in new space, map space, or old space.  If code
5435   // somehow ends up in those spaces, we would miss it here.
5436   code_space_->CollectCodeStatistics();
5437   lo_space_->CollectCodeStatistics();
5438   PagedSpace::ReportCodeStatistics();
5439 }
5440
5441
5442 // This function expects that NewSpace's allocated objects histogram is
5443 // populated (via a call to CollectStatistics or else as a side effect of a
5444 // just-completed scavenge collection).
5445 void Heap::ReportHeapStatistics(const char* title) {
5446   USE(title);
5447   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5448          title, gc_count_);
5449   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5450          old_gen_promotion_limit_);
5451   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5452          old_gen_allocation_limit_);
5453   PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5454
5455   PrintF("\n");
5456   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
5457   isolate_->global_handles()->PrintStats();
5458   PrintF("\n");
5459
5460   PrintF("Heap statistics : ");
5461   isolate_->memory_allocator()->ReportStatistics();
5462   PrintF("To space : ");
5463   new_space_.ReportStatistics();
5464   PrintF("Old pointer space : ");
5465   old_pointer_space_->ReportStatistics();
5466   PrintF("Old data space : ");
5467   old_data_space_->ReportStatistics();
5468   PrintF("Code space : ");
5469   code_space_->ReportStatistics();
5470   PrintF("Map space : ");
5471   map_space_->ReportStatistics();
5472   PrintF("Cell space : ");
5473   cell_space_->ReportStatistics();
5474   PrintF("Large object space : ");
5475   lo_space_->ReportStatistics();
5476   PrintF(">>>>>> ========================================= >>>>>>\n");
5477 }
5478
5479 #endif  // DEBUG
5480
5481 bool Heap::Contains(HeapObject* value) {
5482   return Contains(value->address());
5483 }
5484
5485
5486 bool Heap::Contains(Address addr) {
5487   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5488   return HasBeenSetUp() &&
5489     (new_space_.ToSpaceContains(addr) ||
5490      old_pointer_space_->Contains(addr) ||
5491      old_data_space_->Contains(addr) ||
5492      code_space_->Contains(addr) ||
5493      map_space_->Contains(addr) ||
5494      cell_space_->Contains(addr) ||
5495      lo_space_->SlowContains(addr));
5496 }
5497
5498
5499 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5500   return InSpace(value->address(), space);
5501 }
5502
5503
5504 bool Heap::InSpace(Address addr, AllocationSpace space) {
5505   if (OS::IsOutsideAllocatedSpace(addr)) return false;
5506   if (!HasBeenSetUp()) return false;
5507
5508   switch (space) {
5509     case NEW_SPACE:
5510       return new_space_.ToSpaceContains(addr);
5511     case OLD_POINTER_SPACE:
5512       return old_pointer_space_->Contains(addr);
5513     case OLD_DATA_SPACE:
5514       return old_data_space_->Contains(addr);
5515     case CODE_SPACE:
5516       return code_space_->Contains(addr);
5517     case MAP_SPACE:
5518       return map_space_->Contains(addr);
5519     case CELL_SPACE:
5520       return cell_space_->Contains(addr);
5521     case LO_SPACE:
5522       return lo_space_->SlowContains(addr);
5523   }
5524
5525   return false;
5526 }
5527
5528
5529 #ifdef VERIFY_HEAP
5530 void Heap::Verify() {
5531   CHECK(HasBeenSetUp());
5532
5533   store_buffer()->Verify();
5534
5535   VerifyPointersVisitor visitor;
5536   IterateRoots(&visitor, VISIT_ONLY_STRONG);
5537
5538   new_space_.Verify();
5539
5540   old_pointer_space_->Verify(&visitor);
5541   map_space_->Verify(&visitor);
5542
5543   VerifyPointersVisitor no_dirty_regions_visitor;
5544   old_data_space_->Verify(&no_dirty_regions_visitor);
5545   code_space_->Verify(&no_dirty_regions_visitor);
5546   cell_space_->Verify(&no_dirty_regions_visitor);
5547
5548   lo_space_->Verify();
5549 }
5550 #endif
5551
5552
5553 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
5554   Object* symbol = NULL;
5555   Object* new_table;
5556   { MaybeObject* maybe_new_table =
5557         symbol_table()->LookupSymbol(string, &symbol);
5558     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5559   }
5560   // Can't use set_symbol_table because SymbolTable::cast knows that
5561   // SymbolTable is a singleton and checks for identity.
5562   roots_[kSymbolTableRootIndex] = new_table;
5563   ASSERT(symbol != NULL);
5564   return symbol;
5565 }
5566
5567
5568 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
5569   Object* symbol = NULL;
5570   Object* new_table;
5571   { MaybeObject* maybe_new_table =
5572         symbol_table()->LookupAsciiSymbol(string, &symbol);
5573     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5574   }
5575   // Can't use set_symbol_table because SymbolTable::cast knows that
5576   // SymbolTable is a singleton and checks for identity.
5577   roots_[kSymbolTableRootIndex] = new_table;
5578   ASSERT(symbol != NULL);
5579   return symbol;
5580 }
5581
5582
5583 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
5584                                      int from,
5585                                      int length) {
5586   Object* symbol = NULL;
5587   Object* new_table;
5588   { MaybeObject* maybe_new_table =
5589         symbol_table()->LookupSubStringAsciiSymbol(string,
5590                                                    from,
5591                                                    length,
5592                                                    &symbol);
5593     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5594   }
5595   // Can't use set_symbol_table because SymbolTable::cast knows that
5596   // SymbolTable is a singleton and checks for identity.
5597   roots_[kSymbolTableRootIndex] = new_table;
5598   ASSERT(symbol != NULL);
5599   return symbol;
5600 }
5601
5602
5603 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
5604   Object* symbol = NULL;
5605   Object* new_table;
5606   { MaybeObject* maybe_new_table =
5607         symbol_table()->LookupTwoByteSymbol(string, &symbol);
5608     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5609   }
5610   // Can't use set_symbol_table because SymbolTable::cast knows that
5611   // SymbolTable is a singleton and checks for identity.
5612   roots_[kSymbolTableRootIndex] = new_table;
5613   ASSERT(symbol != NULL);
5614   return symbol;
5615 }
5616
5617
5618 MaybeObject* Heap::LookupSymbol(String* string) {
5619   if (string->IsSymbol()) return string;
5620   Object* symbol = NULL;
5621   Object* new_table;
5622   { MaybeObject* maybe_new_table =
5623         symbol_table()->LookupString(string, &symbol);
5624     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5625   }
5626   // Can't use set_symbol_table because SymbolTable::cast knows that
5627   // SymbolTable is a singleton and checks for identity.
5628   roots_[kSymbolTableRootIndex] = new_table;
5629   ASSERT(symbol != NULL);
5630   return symbol;
5631 }
5632
5633
5634 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
5635   if (string->IsSymbol()) {
5636     *symbol = string;
5637     return true;
5638   }
5639   return symbol_table()->LookupSymbolIfExists(string, symbol);
5640 }
5641
5642
5643 void Heap::ZapFromSpace() {
5644   NewSpacePageIterator it(new_space_.FromSpaceStart(),
5645                           new_space_.FromSpaceEnd());
5646   while (it.has_next()) {
5647     NewSpacePage* page = it.next();
5648     for (Address cursor = page->area_start(), limit = page->area_end();
5649          cursor < limit;
5650          cursor += kPointerSize) {
5651       Memory::Address_at(cursor) = kFromSpaceZapValue;
5652     }
5653   }
5654 }
5655
5656
5657 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5658                                              Address end,
5659                                              ObjectSlotCallback callback) {
5660   Address slot_address = start;
5661
5662   // We are not collecting slots on new space objects during mutation
5663   // thus we have to scan for pointers to evacuation candidates when we
5664   // promote objects. But we should not record any slots in non-black
5665   // objects. Grey object's slots would be rescanned.
5666   // White object might not survive until the end of collection
5667   // it would be a violation of the invariant to record it's slots.
5668   bool record_slots = false;
5669   if (incremental_marking()->IsCompacting()) {
5670     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5671     record_slots = Marking::IsBlack(mark_bit);
5672   }
5673
5674   while (slot_address < end) {
5675     Object** slot = reinterpret_cast<Object**>(slot_address);
5676     Object* object = *slot;
5677     // If the store buffer becomes overfull we mark pages as being exempt from
5678     // the store buffer.  These pages are scanned to find pointers that point
5679     // to the new space.  In that case we may hit newly promoted objects and
5680     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
5681     if (object->IsHeapObject()) {
5682       if (Heap::InFromSpace(object)) {
5683         callback(reinterpret_cast<HeapObject**>(slot),
5684                  HeapObject::cast(object));
5685         Object* new_object = *slot;
5686         if (InNewSpace(new_object)) {
5687           SLOW_ASSERT(Heap::InToSpace(new_object));
5688           SLOW_ASSERT(new_object->IsHeapObject());
5689           store_buffer_.EnterDirectlyIntoStoreBuffer(
5690               reinterpret_cast<Address>(slot));
5691         }
5692         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5693       } else if (record_slots &&
5694                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5695         mark_compact_collector()->RecordSlot(slot, slot, object);
5696       }
5697     }
5698     slot_address += kPointerSize;
5699   }
5700 }
5701
5702
5703 #ifdef DEBUG
5704 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5705
5706
5707 bool IsAMapPointerAddress(Object** addr) {
5708   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5709   int mod = a % Map::kSize;
5710   return mod >= Map::kPointerFieldsBeginOffset &&
5711          mod < Map::kPointerFieldsEndOffset;
5712 }
5713
5714
5715 bool EverythingsAPointer(Object** addr) {
5716   return true;
5717 }
5718
5719
5720 static void CheckStoreBuffer(Heap* heap,
5721                              Object** current,
5722                              Object** limit,
5723                              Object**** store_buffer_position,
5724                              Object*** store_buffer_top,
5725                              CheckStoreBufferFilter filter,
5726                              Address special_garbage_start,
5727                              Address special_garbage_end) {
5728   Map* free_space_map = heap->free_space_map();
5729   for ( ; current < limit; current++) {
5730     Object* o = *current;
5731     Address current_address = reinterpret_cast<Address>(current);
5732     // Skip free space.
5733     if (o == free_space_map) {
5734       Address current_address = reinterpret_cast<Address>(current);
5735       FreeSpace* free_space =
5736           FreeSpace::cast(HeapObject::FromAddress(current_address));
5737       int skip = free_space->Size();
5738       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5739       ASSERT(skip > 0);
5740       current_address += skip - kPointerSize;
5741       current = reinterpret_cast<Object**>(current_address);
5742       continue;
5743     }
5744     // Skip the current linear allocation space between top and limit which is
5745     // unmarked with the free space map, but can contain junk.
5746     if (current_address == special_garbage_start &&
5747         special_garbage_end != special_garbage_start) {
5748       current_address = special_garbage_end - kPointerSize;
5749       current = reinterpret_cast<Object**>(current_address);
5750       continue;
5751     }
5752     if (!(*filter)(current)) continue;
5753     ASSERT(current_address < special_garbage_start ||
5754            current_address >= special_garbage_end);
5755     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5756     // We have to check that the pointer does not point into new space
5757     // without trying to cast it to a heap object since the hash field of
5758     // a string can contain values like 1 and 3 which are tagged null
5759     // pointers.
5760     if (!heap->InNewSpace(o)) continue;
5761     while (**store_buffer_position < current &&
5762            *store_buffer_position < store_buffer_top) {
5763       (*store_buffer_position)++;
5764     }
5765     if (**store_buffer_position != current ||
5766         *store_buffer_position == store_buffer_top) {
5767       Object** obj_start = current;
5768       while (!(*obj_start)->IsMap()) obj_start--;
5769       UNREACHABLE();
5770     }
5771   }
5772 }
5773
5774
5775 // Check that the store buffer contains all intergenerational pointers by
5776 // scanning a page and ensuring that all pointers to young space are in the
5777 // store buffer.
5778 void Heap::OldPointerSpaceCheckStoreBuffer() {
5779   OldSpace* space = old_pointer_space();
5780   PageIterator pages(space);
5781
5782   store_buffer()->SortUniq();
5783
5784   while (pages.has_next()) {
5785     Page* page = pages.next();
5786     Object** current = reinterpret_cast<Object**>(page->area_start());
5787
5788     Address end = page->area_end();
5789
5790     Object*** store_buffer_position = store_buffer()->Start();
5791     Object*** store_buffer_top = store_buffer()->Top();
5792
5793     Object** limit = reinterpret_cast<Object**>(end);
5794     CheckStoreBuffer(this,
5795                      current,
5796                      limit,
5797                      &store_buffer_position,
5798                      store_buffer_top,
5799                      &EverythingsAPointer,
5800                      space->top(),
5801                      space->limit());
5802   }
5803 }
5804
5805
5806 void Heap::MapSpaceCheckStoreBuffer() {
5807   MapSpace* space = map_space();
5808   PageIterator pages(space);
5809
5810   store_buffer()->SortUniq();
5811
5812   while (pages.has_next()) {
5813     Page* page = pages.next();
5814     Object** current = reinterpret_cast<Object**>(page->area_start());
5815
5816     Address end = page->area_end();
5817
5818     Object*** store_buffer_position = store_buffer()->Start();
5819     Object*** store_buffer_top = store_buffer()->Top();
5820
5821     Object** limit = reinterpret_cast<Object**>(end);
5822     CheckStoreBuffer(this,
5823                      current,
5824                      limit,
5825                      &store_buffer_position,
5826                      store_buffer_top,
5827                      &IsAMapPointerAddress,
5828                      space->top(),
5829                      space->limit());
5830   }
5831 }
5832
5833
5834 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5835   LargeObjectIterator it(lo_space());
5836   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5837     // We only have code, sequential strings, or fixed arrays in large
5838     // object space, and only fixed arrays can possibly contain pointers to
5839     // the young generation.
5840     if (object->IsFixedArray()) {
5841       Object*** store_buffer_position = store_buffer()->Start();
5842       Object*** store_buffer_top = store_buffer()->Top();
5843       Object** current = reinterpret_cast<Object**>(object->address());
5844       Object** limit =
5845           reinterpret_cast<Object**>(object->address() + object->Size());
5846       CheckStoreBuffer(this,
5847                        current,
5848                        limit,
5849                        &store_buffer_position,
5850                        store_buffer_top,
5851                        &EverythingsAPointer,
5852                        NULL,
5853                        NULL);
5854     }
5855   }
5856 }
5857 #endif
5858
5859
5860 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5861   IterateStrongRoots(v, mode);
5862   IterateWeakRoots(v, mode);
5863 }
5864
5865
5866 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5867   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5868   v->Synchronize(VisitorSynchronization::kSymbolTable);
5869   if (mode != VISIT_ALL_IN_SCAVENGE &&
5870       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5871     // Scavenge collections have special processing for this.
5872     external_string_table_.Iterate(v);
5873   }
5874   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5875 }
5876
5877
5878 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5879   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5880   v->Synchronize(VisitorSynchronization::kStrongRootList);
5881
5882   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5883   v->Synchronize(VisitorSynchronization::kSymbol);
5884
5885   isolate_->bootstrapper()->Iterate(v);
5886   v->Synchronize(VisitorSynchronization::kBootstrapper);
5887   isolate_->Iterate(v);
5888   v->Synchronize(VisitorSynchronization::kTop);
5889   Relocatable::Iterate(v);
5890   v->Synchronize(VisitorSynchronization::kRelocatable);
5891
5892 #ifdef ENABLE_DEBUGGER_SUPPORT
5893   isolate_->debug()->Iterate(v);
5894   if (isolate_->deoptimizer_data() != NULL) {
5895     isolate_->deoptimizer_data()->Iterate(v);
5896   }
5897 #endif
5898   v->Synchronize(VisitorSynchronization::kDebug);
5899   isolate_->compilation_cache()->Iterate(v);
5900   v->Synchronize(VisitorSynchronization::kCompilationCache);
5901
5902   // Iterate over local handles in handle scopes.
5903   isolate_->handle_scope_implementer()->Iterate(v);
5904   isolate_->IterateDeferredHandles(v);
5905   v->Synchronize(VisitorSynchronization::kHandleScope);
5906
5907   // Iterate over the builtin code objects and code stubs in the
5908   // heap. Note that it is not necessary to iterate over code objects
5909   // on scavenge collections.
5910   if (mode != VISIT_ALL_IN_SCAVENGE) {
5911     isolate_->builtins()->IterateBuiltins(v);
5912   }
5913   v->Synchronize(VisitorSynchronization::kBuiltins);
5914
5915   // Iterate over global handles.
5916   switch (mode) {
5917     case VISIT_ONLY_STRONG:
5918       isolate_->global_handles()->IterateStrongRoots(v);
5919       break;
5920     case VISIT_ALL_IN_SCAVENGE:
5921       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5922       break;
5923     case VISIT_ALL_IN_SWEEP_NEWSPACE:
5924     case VISIT_ALL:
5925       isolate_->global_handles()->IterateAllRoots(v);
5926       break;
5927   }
5928   v->Synchronize(VisitorSynchronization::kGlobalHandles);
5929
5930   // Iterate over pointers being held by inactive threads.
5931   isolate_->thread_manager()->Iterate(v);
5932   v->Synchronize(VisitorSynchronization::kThreadManager);
5933
5934   // Iterate over the pointers the Serialization/Deserialization code is
5935   // holding.
5936   // During garbage collection this keeps the partial snapshot cache alive.
5937   // During deserialization of the startup snapshot this creates the partial
5938   // snapshot cache and deserializes the objects it refers to.  During
5939   // serialization this does nothing, since the partial snapshot cache is
5940   // empty.  However the next thing we do is create the partial snapshot,
5941   // filling up the partial snapshot cache with objects it needs as we go.
5942   SerializerDeserializer::Iterate(v);
5943   // We don't do a v->Synchronize call here, because in debug mode that will
5944   // output a flag to the snapshot.  However at this point the serializer and
5945   // deserializer are deliberately a little unsynchronized (see above) so the
5946   // checking of the sync flag in the snapshot would fail.
5947 }
5948
5949
5950 // TODO(1236194): Since the heap size is configurable on the command line
5951 // and through the API, we should gracefully handle the case that the heap
5952 // size is not big enough to fit all the initial objects.
5953 bool Heap::ConfigureHeap(int max_semispace_size,
5954                          intptr_t max_old_gen_size,
5955                          intptr_t max_executable_size) {
5956   if (HasBeenSetUp()) return false;
5957
5958   if (FLAG_stress_compaction) {
5959     // This will cause more frequent GCs when stressing.
5960     max_semispace_size_ = Page::kPageSize;
5961   }
5962
5963   if (max_semispace_size > 0) {
5964     if (max_semispace_size < Page::kPageSize) {
5965       max_semispace_size = Page::kPageSize;
5966       if (FLAG_trace_gc) {
5967         PrintPID("Max semispace size cannot be less than %dkbytes\n",
5968                  Page::kPageSize >> 10);
5969       }
5970     }
5971     max_semispace_size_ = max_semispace_size;
5972   }
5973
5974   if (Snapshot::IsEnabled()) {
5975     // If we are using a snapshot we always reserve the default amount
5976     // of memory for each semispace because code in the snapshot has
5977     // write-barrier code that relies on the size and alignment of new
5978     // space.  We therefore cannot use a larger max semispace size
5979     // than the default reserved semispace size.
5980     if (max_semispace_size_ > reserved_semispace_size_) {
5981       max_semispace_size_ = reserved_semispace_size_;
5982       if (FLAG_trace_gc) {
5983         PrintPID("Max semispace size cannot be more than %dkbytes\n",
5984                  reserved_semispace_size_ >> 10);
5985       }
5986     }
5987   } else {
5988     // If we are not using snapshots we reserve space for the actual
5989     // max semispace size.
5990     reserved_semispace_size_ = max_semispace_size_;
5991   }
5992
5993   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5994   if (max_executable_size > 0) {
5995     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5996   }
5997
5998   // The max executable size must be less than or equal to the max old
5999   // generation size.
6000   if (max_executable_size_ > max_old_generation_size_) {
6001     max_executable_size_ = max_old_generation_size_;
6002   }
6003
6004   // The new space size must be a power of two to support single-bit testing
6005   // for containment.
6006   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6007   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6008   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6009   external_allocation_limit_ = 16 * max_semispace_size_;
6010
6011   // The old generation is paged and needs at least one page for each space.
6012   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6013   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6014                                                        Page::kPageSize),
6015                                  RoundUp(max_old_generation_size_,
6016                                          Page::kPageSize));
6017
6018   configured_ = true;
6019   return true;
6020 }
6021
6022
6023 bool Heap::ConfigureHeapDefault() {
6024   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6025                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6026                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6027 }
6028
6029
6030 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6031   *stats->start_marker = HeapStats::kStartMarker;
6032   *stats->end_marker = HeapStats::kEndMarker;
6033   *stats->new_space_size = new_space_.SizeAsInt();
6034   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6035   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6036   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6037   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6038   *stats->old_data_space_capacity = old_data_space_->Capacity();
6039   *stats->code_space_size = code_space_->SizeOfObjects();
6040   *stats->code_space_capacity = code_space_->Capacity();
6041   *stats->map_space_size = map_space_->SizeOfObjects();
6042   *stats->map_space_capacity = map_space_->Capacity();
6043   *stats->cell_space_size = cell_space_->SizeOfObjects();
6044   *stats->cell_space_capacity = cell_space_->Capacity();
6045   *stats->lo_space_size = lo_space_->Size();
6046   isolate_->global_handles()->RecordStats(stats);
6047   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6048   *stats->memory_allocator_capacity =
6049       isolate()->memory_allocator()->Size() +
6050       isolate()->memory_allocator()->Available();
6051   *stats->os_error = OS::GetLastError();
6052       isolate()->memory_allocator()->Available();
6053   if (take_snapshot) {
6054     HeapIterator iterator;
6055     for (HeapObject* obj = iterator.next();
6056          obj != NULL;
6057          obj = iterator.next()) {
6058       InstanceType type = obj->map()->instance_type();
6059       ASSERT(0 <= type && type <= LAST_TYPE);
6060       stats->objects_per_type[type]++;
6061       stats->size_per_type[type] += obj->Size();
6062     }
6063   }
6064 }
6065
6066
6067 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6068   return old_pointer_space_->SizeOfObjects()
6069       + old_data_space_->SizeOfObjects()
6070       + code_space_->SizeOfObjects()
6071       + map_space_->SizeOfObjects()
6072       + cell_space_->SizeOfObjects()
6073       + lo_space_->SizeOfObjects();
6074 }
6075
6076
6077 intptr_t Heap::PromotedExternalMemorySize() {
6078   if (amount_of_external_allocated_memory_
6079       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6080   return amount_of_external_allocated_memory_
6081       - amount_of_external_allocated_memory_at_last_global_gc_;
6082 }
6083
6084
6085 V8_DECLARE_ONCE(initialize_gc_once);
6086
6087 static void InitializeGCOnce() {
6088   InitializeScavengingVisitorsTables();
6089   NewSpaceScavenger::Initialize();
6090   MarkCompactCollector::Initialize();
6091 }
6092
6093 bool Heap::SetUp(bool create_heap_objects) {
6094 #ifdef DEBUG
6095   allocation_timeout_ = FLAG_gc_interval;
6096 #endif
6097
6098   // Initialize heap spaces and initial maps and objects. Whenever something
6099   // goes wrong, just return false. The caller should check the results and
6100   // call Heap::TearDown() to release allocated memory.
6101   //
6102   // If the heap is not yet configured (e.g. through the API), configure it.
6103   // Configuration is based on the flags new-space-size (really the semispace
6104   // size) and old-space-size if set or the initial values of semispace_size_
6105   // and old_generation_size_ otherwise.
6106   if (!configured_) {
6107     if (!ConfigureHeapDefault()) return false;
6108   }
6109
6110   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6111
6112   MarkMapPointersAsEncoded(false);
6113
6114   // Set up memory allocator.
6115   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6116       return false;
6117
6118   // Set up new space.
6119   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6120     return false;
6121   }
6122
6123   // Initialize old pointer space.
6124   old_pointer_space_ =
6125       new OldSpace(this,
6126                    max_old_generation_size_,
6127                    OLD_POINTER_SPACE,
6128                    NOT_EXECUTABLE);
6129   if (old_pointer_space_ == NULL) return false;
6130   if (!old_pointer_space_->SetUp()) return false;
6131
6132   // Initialize old data space.
6133   old_data_space_ =
6134       new OldSpace(this,
6135                    max_old_generation_size_,
6136                    OLD_DATA_SPACE,
6137                    NOT_EXECUTABLE);
6138   if (old_data_space_ == NULL) return false;
6139   if (!old_data_space_->SetUp()) return false;
6140
6141   // Initialize the code space, set its maximum capacity to the old
6142   // generation size. It needs executable memory.
6143   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6144   // virtual address space, so that they can call each other with near calls.
6145   if (code_range_size_ > 0) {
6146     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6147       return false;
6148     }
6149   }
6150
6151   code_space_ =
6152       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6153   if (code_space_ == NULL) return false;
6154   if (!code_space_->SetUp()) return false;
6155
6156   // Initialize map space.
6157   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6158   if (map_space_ == NULL) return false;
6159   if (!map_space_->SetUp()) return false;
6160
6161   // Initialize global property cell space.
6162   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6163   if (cell_space_ == NULL) return false;
6164   if (!cell_space_->SetUp()) return false;
6165
6166   // The large object code space may contain code or data.  We set the memory
6167   // to be non-executable here for safety, but this means we need to enable it
6168   // explicitly when allocating large code objects.
6169   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6170   if (lo_space_ == NULL) return false;
6171   if (!lo_space_->SetUp()) return false;
6172
6173   // Set up the seed that is used to randomize the string hash function.
6174   ASSERT(hash_seed() == 0);
6175   if (FLAG_randomize_hashes) {
6176     if (FLAG_hash_seed == 0) {
6177       set_hash_seed(
6178           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6179     } else {
6180       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6181     }
6182   }
6183
6184   if (create_heap_objects) {
6185     // Create initial maps.
6186     if (!CreateInitialMaps()) return false;
6187     if (!CreateApiObjects()) return false;
6188
6189     // Create initial objects
6190     if (!CreateInitialObjects()) return false;
6191
6192     native_contexts_list_ = undefined_value();
6193   }
6194
6195   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6196   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6197
6198   store_buffer()->SetUp();
6199
6200   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6201
6202   return true;
6203 }
6204
6205
6206 void Heap::SetStackLimits() {
6207   ASSERT(isolate_ != NULL);
6208   ASSERT(isolate_ == isolate());
6209   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6210   // something that looks like an out of range Smi to the GC.
6211
6212   // Set up the special root array entries containing the stack limits.
6213   // These are actually addresses, but the tag makes the GC ignore it.
6214   roots_[kStackLimitRootIndex] =
6215       reinterpret_cast<Object*>(
6216           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6217   roots_[kRealStackLimitRootIndex] =
6218       reinterpret_cast<Object*>(
6219           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6220 }
6221
6222
6223 void Heap::TearDown() {
6224 #ifdef VERIFY_HEAP
6225   if (FLAG_verify_heap) {
6226     Verify();
6227   }
6228 #endif
6229
6230   if (FLAG_print_cumulative_gc_stat) {
6231     PrintF("\n\n");
6232     PrintF("gc_count=%d ", gc_count_);
6233     PrintF("mark_sweep_count=%d ", ms_count_);
6234     PrintF("max_gc_pause=%d ", get_max_gc_pause());
6235     PrintF("total_gc_time=%d ", total_gc_time_ms_);
6236     PrintF("min_in_mutator=%d ", get_min_in_mutator());
6237     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6238            get_max_alive_after_gc());
6239     PrintF("\n\n");
6240   }
6241
6242   isolate_->global_handles()->TearDown();
6243
6244   external_string_table_.TearDown();
6245
6246   new_space_.TearDown();
6247
6248   if (old_pointer_space_ != NULL) {
6249     old_pointer_space_->TearDown();
6250     delete old_pointer_space_;
6251     old_pointer_space_ = NULL;
6252   }
6253
6254   if (old_data_space_ != NULL) {
6255     old_data_space_->TearDown();
6256     delete old_data_space_;
6257     old_data_space_ = NULL;
6258   }
6259
6260   if (code_space_ != NULL) {
6261     code_space_->TearDown();
6262     delete code_space_;
6263     code_space_ = NULL;
6264   }
6265
6266   if (map_space_ != NULL) {
6267     map_space_->TearDown();
6268     delete map_space_;
6269     map_space_ = NULL;
6270   }
6271
6272   if (cell_space_ != NULL) {
6273     cell_space_->TearDown();
6274     delete cell_space_;
6275     cell_space_ = NULL;
6276   }
6277
6278   if (lo_space_ != NULL) {
6279     lo_space_->TearDown();
6280     delete lo_space_;
6281     lo_space_ = NULL;
6282   }
6283
6284   store_buffer()->TearDown();
6285   incremental_marking()->TearDown();
6286
6287   isolate_->memory_allocator()->TearDown();
6288
6289   delete relocation_mutex_;
6290 }
6291
6292
6293 void Heap::Shrink() {
6294   // Try to shrink all paged spaces.
6295   PagedSpaces spaces;
6296   for (PagedSpace* space = spaces.next();
6297        space != NULL;
6298        space = spaces.next()) {
6299     space->ReleaseAllUnusedPages();
6300   }
6301 }
6302
6303
6304 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6305   ASSERT(callback != NULL);
6306   GCPrologueCallbackPair pair(callback, gc_type);
6307   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6308   return gc_prologue_callbacks_.Add(pair);
6309 }
6310
6311
6312 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6313   ASSERT(callback != NULL);
6314   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6315     if (gc_prologue_callbacks_[i].callback == callback) {
6316       gc_prologue_callbacks_.Remove(i);
6317       return;
6318     }
6319   }
6320   UNREACHABLE();
6321 }
6322
6323
6324 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6325   ASSERT(callback != NULL);
6326   GCEpilogueCallbackPair pair(callback, gc_type);
6327   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6328   return gc_epilogue_callbacks_.Add(pair);
6329 }
6330
6331
6332 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6333   ASSERT(callback != NULL);
6334   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6335     if (gc_epilogue_callbacks_[i].callback == callback) {
6336       gc_epilogue_callbacks_.Remove(i);
6337       return;
6338     }
6339   }
6340   UNREACHABLE();
6341 }
6342
6343
6344 #ifdef DEBUG
6345
6346 class PrintHandleVisitor: public ObjectVisitor {
6347  public:
6348   void VisitPointers(Object** start, Object** end) {
6349     for (Object** p = start; p < end; p++)
6350       PrintF("  handle %p to %p\n",
6351              reinterpret_cast<void*>(p),
6352              reinterpret_cast<void*>(*p));
6353   }
6354 };
6355
6356 void Heap::PrintHandles() {
6357   PrintF("Handles:\n");
6358   PrintHandleVisitor v;
6359   isolate_->handle_scope_implementer()->Iterate(&v);
6360 }
6361
6362 #endif
6363
6364
6365 Space* AllSpaces::next() {
6366   switch (counter_++) {
6367     case NEW_SPACE:
6368       return HEAP->new_space();
6369     case OLD_POINTER_SPACE:
6370       return HEAP->old_pointer_space();
6371     case OLD_DATA_SPACE:
6372       return HEAP->old_data_space();
6373     case CODE_SPACE:
6374       return HEAP->code_space();
6375     case MAP_SPACE:
6376       return HEAP->map_space();
6377     case CELL_SPACE:
6378       return HEAP->cell_space();
6379     case LO_SPACE:
6380       return HEAP->lo_space();
6381     default:
6382       return NULL;
6383   }
6384 }
6385
6386
6387 PagedSpace* PagedSpaces::next() {
6388   switch (counter_++) {
6389     case OLD_POINTER_SPACE:
6390       return HEAP->old_pointer_space();
6391     case OLD_DATA_SPACE:
6392       return HEAP->old_data_space();
6393     case CODE_SPACE:
6394       return HEAP->code_space();
6395     case MAP_SPACE:
6396       return HEAP->map_space();
6397     case CELL_SPACE:
6398       return HEAP->cell_space();
6399     default:
6400       return NULL;
6401   }
6402 }
6403
6404
6405
6406 OldSpace* OldSpaces::next() {
6407   switch (counter_++) {
6408     case OLD_POINTER_SPACE:
6409       return HEAP->old_pointer_space();
6410     case OLD_DATA_SPACE:
6411       return HEAP->old_data_space();
6412     case CODE_SPACE:
6413       return HEAP->code_space();
6414     default:
6415       return NULL;
6416   }
6417 }
6418
6419
6420 SpaceIterator::SpaceIterator()
6421     : current_space_(FIRST_SPACE),
6422       iterator_(NULL),
6423       size_func_(NULL) {
6424 }
6425
6426
6427 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
6428     : current_space_(FIRST_SPACE),
6429       iterator_(NULL),
6430       size_func_(size_func) {
6431 }
6432
6433
6434 SpaceIterator::~SpaceIterator() {
6435   // Delete active iterator if any.
6436   delete iterator_;
6437 }
6438
6439
6440 bool SpaceIterator::has_next() {
6441   // Iterate until no more spaces.
6442   return current_space_ != LAST_SPACE;
6443 }
6444
6445
6446 ObjectIterator* SpaceIterator::next() {
6447   if (iterator_ != NULL) {
6448     delete iterator_;
6449     iterator_ = NULL;
6450     // Move to the next space
6451     current_space_++;
6452     if (current_space_ > LAST_SPACE) {
6453       return NULL;
6454     }
6455   }
6456
6457   // Return iterator for the new current space.
6458   return CreateIterator();
6459 }
6460
6461
6462 // Create an iterator for the space to iterate.
6463 ObjectIterator* SpaceIterator::CreateIterator() {
6464   ASSERT(iterator_ == NULL);
6465
6466   switch (current_space_) {
6467     case NEW_SPACE:
6468       iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
6469       break;
6470     case OLD_POINTER_SPACE:
6471       iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
6472       break;
6473     case OLD_DATA_SPACE:
6474       iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
6475       break;
6476     case CODE_SPACE:
6477       iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
6478       break;
6479     case MAP_SPACE:
6480       iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
6481       break;
6482     case CELL_SPACE:
6483       iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
6484       break;
6485     case LO_SPACE:
6486       iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
6487       break;
6488   }
6489
6490   // Return the newly allocated iterator;
6491   ASSERT(iterator_ != NULL);
6492   return iterator_;
6493 }
6494
6495
6496 class HeapObjectsFilter {
6497  public:
6498   virtual ~HeapObjectsFilter() {}
6499   virtual bool SkipObject(HeapObject* object) = 0;
6500 };
6501
6502
6503 class UnreachableObjectsFilter : public HeapObjectsFilter {
6504  public:
6505   UnreachableObjectsFilter() {
6506     MarkReachableObjects();
6507   }
6508
6509   ~UnreachableObjectsFilter() {
6510     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6511   }
6512
6513   bool SkipObject(HeapObject* object) {
6514     MarkBit mark_bit = Marking::MarkBitFrom(object);
6515     return !mark_bit.Get();
6516   }
6517
6518  private:
6519   class MarkingVisitor : public ObjectVisitor {
6520    public:
6521     MarkingVisitor() : marking_stack_(10) {}
6522
6523     void VisitPointers(Object** start, Object** end) {
6524       for (Object** p = start; p < end; p++) {
6525         if (!(*p)->IsHeapObject()) continue;
6526         HeapObject* obj = HeapObject::cast(*p);
6527         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6528         if (!mark_bit.Get()) {
6529           mark_bit.Set();
6530           marking_stack_.Add(obj);
6531         }
6532       }
6533     }
6534
6535     void TransitiveClosure() {
6536       while (!marking_stack_.is_empty()) {
6537         HeapObject* obj = marking_stack_.RemoveLast();
6538         obj->Iterate(this);
6539       }
6540     }
6541
6542    private:
6543     List<HeapObject*> marking_stack_;
6544   };
6545
6546   void MarkReachableObjects() {
6547     Heap* heap = Isolate::Current()->heap();
6548     MarkingVisitor visitor;
6549     heap->IterateRoots(&visitor, VISIT_ALL);
6550     visitor.TransitiveClosure();
6551   }
6552
6553   AssertNoAllocation no_alloc;
6554 };
6555
6556
6557 HeapIterator::HeapIterator()
6558     : filtering_(HeapIterator::kNoFiltering),
6559       filter_(NULL) {
6560   Init();
6561 }
6562
6563
6564 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6565     : filtering_(filtering),
6566       filter_(NULL) {
6567   Init();
6568 }
6569
6570
6571 HeapIterator::~HeapIterator() {
6572   Shutdown();
6573 }
6574
6575
6576 void HeapIterator::Init() {
6577   // Start the iteration.
6578   space_iterator_ = new SpaceIterator;
6579   switch (filtering_) {
6580     case kFilterUnreachable:
6581       filter_ = new UnreachableObjectsFilter;
6582       break;
6583     default:
6584       break;
6585   }
6586   object_iterator_ = space_iterator_->next();
6587 }
6588
6589
6590 void HeapIterator::Shutdown() {
6591 #ifdef DEBUG
6592   // Assert that in filtering mode we have iterated through all
6593   // objects. Otherwise, heap will be left in an inconsistent state.
6594   if (filtering_ != kNoFiltering) {
6595     ASSERT(object_iterator_ == NULL);
6596   }
6597 #endif
6598   // Make sure the last iterator is deallocated.
6599   delete space_iterator_;
6600   space_iterator_ = NULL;
6601   object_iterator_ = NULL;
6602   delete filter_;
6603   filter_ = NULL;
6604 }
6605
6606
6607 HeapObject* HeapIterator::next() {
6608   if (filter_ == NULL) return NextObject();
6609
6610   HeapObject* obj = NextObject();
6611   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6612   return obj;
6613 }
6614
6615
6616 HeapObject* HeapIterator::NextObject() {
6617   // No iterator means we are done.
6618   if (object_iterator_ == NULL) return NULL;
6619
6620   if (HeapObject* obj = object_iterator_->next_object()) {
6621     // If the current iterator has more objects we are fine.
6622     return obj;
6623   } else {
6624     // Go though the spaces looking for one that has objects.
6625     while (space_iterator_->has_next()) {
6626       object_iterator_ = space_iterator_->next();
6627       if (HeapObject* obj = object_iterator_->next_object()) {
6628         return obj;
6629       }
6630     }
6631   }
6632   // Done with the last space.
6633   object_iterator_ = NULL;
6634   return NULL;
6635 }
6636
6637
6638 void HeapIterator::reset() {
6639   // Restart the iterator.
6640   Shutdown();
6641   Init();
6642 }
6643
6644
6645 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6646
6647 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6648
6649 class PathTracer::MarkVisitor: public ObjectVisitor {
6650  public:
6651   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6652   void VisitPointers(Object** start, Object** end) {
6653     // Scan all HeapObject pointers in [start, end)
6654     for (Object** p = start; !tracer_->found() && (p < end); p++) {
6655       if ((*p)->IsHeapObject())
6656         tracer_->MarkRecursively(p, this);
6657     }
6658   }
6659
6660  private:
6661   PathTracer* tracer_;
6662 };
6663
6664
6665 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6666  public:
6667   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6668   void VisitPointers(Object** start, Object** end) {
6669     // Scan all HeapObject pointers in [start, end)
6670     for (Object** p = start; p < end; p++) {
6671       if ((*p)->IsHeapObject())
6672         tracer_->UnmarkRecursively(p, this);
6673     }
6674   }
6675
6676  private:
6677   PathTracer* tracer_;
6678 };
6679
6680
6681 void PathTracer::VisitPointers(Object** start, Object** end) {
6682   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6683   // Visit all HeapObject pointers in [start, end)
6684   for (Object** p = start; !done && (p < end); p++) {
6685     if ((*p)->IsHeapObject()) {
6686       TracePathFrom(p);
6687       done = ((what_to_find_ == FIND_FIRST) && found_target_);
6688     }
6689   }
6690 }
6691
6692
6693 void PathTracer::Reset() {
6694   found_target_ = false;
6695   object_stack_.Clear();
6696 }
6697
6698
6699 void PathTracer::TracePathFrom(Object** root) {
6700   ASSERT((search_target_ == kAnyGlobalObject) ||
6701          search_target_->IsHeapObject());
6702   found_target_in_trace_ = false;
6703   Reset();
6704
6705   MarkVisitor mark_visitor(this);
6706   MarkRecursively(root, &mark_visitor);
6707
6708   UnmarkVisitor unmark_visitor(this);
6709   UnmarkRecursively(root, &unmark_visitor);
6710
6711   ProcessResults();
6712 }
6713
6714
6715 static bool SafeIsNativeContext(HeapObject* obj) {
6716   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6717 }
6718
6719
6720 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6721   if (!(*p)->IsHeapObject()) return;
6722
6723   HeapObject* obj = HeapObject::cast(*p);
6724
6725   Object* map = obj->map();
6726
6727   if (!map->IsHeapObject()) return;  // visited before
6728
6729   if (found_target_in_trace_) return;  // stop if target found
6730   object_stack_.Add(obj);
6731   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6732       (obj == search_target_)) {
6733     found_target_in_trace_ = true;
6734     found_target_ = true;
6735     return;
6736   }
6737
6738   bool is_native_context = SafeIsNativeContext(obj);
6739
6740   // not visited yet
6741   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6742
6743   Address map_addr = map_p->address();
6744
6745   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6746
6747   // Scan the object body.
6748   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6749     // This is specialized to scan Context's properly.
6750     Object** start = reinterpret_cast<Object**>(obj->address() +
6751                                                 Context::kHeaderSize);
6752     Object** end = reinterpret_cast<Object**>(obj->address() +
6753         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6754     mark_visitor->VisitPointers(start, end);
6755   } else {
6756     obj->IterateBody(map_p->instance_type(),
6757                      obj->SizeFromMap(map_p),
6758                      mark_visitor);
6759   }
6760
6761   // Scan the map after the body because the body is a lot more interesting
6762   // when doing leak detection.
6763   MarkRecursively(&map, mark_visitor);
6764
6765   if (!found_target_in_trace_)  // don't pop if found the target
6766     object_stack_.RemoveLast();
6767 }
6768
6769
6770 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6771   if (!(*p)->IsHeapObject()) return;
6772
6773   HeapObject* obj = HeapObject::cast(*p);
6774
6775   Object* map = obj->map();
6776
6777   if (map->IsHeapObject()) return;  // unmarked already
6778
6779   Address map_addr = reinterpret_cast<Address>(map);
6780
6781   map_addr -= kMarkTag;
6782
6783   ASSERT_TAG_ALIGNED(map_addr);
6784
6785   HeapObject* map_p = HeapObject::FromAddress(map_addr);
6786
6787   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6788
6789   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6790
6791   obj->IterateBody(Map::cast(map_p)->instance_type(),
6792                    obj->SizeFromMap(Map::cast(map_p)),
6793                    unmark_visitor);
6794 }
6795
6796
6797 void PathTracer::ProcessResults() {
6798   if (found_target_) {
6799     PrintF("=====================================\n");
6800     PrintF("====        Path to object       ====\n");
6801     PrintF("=====================================\n\n");
6802
6803     ASSERT(!object_stack_.is_empty());
6804     for (int i = 0; i < object_stack_.length(); i++) {
6805       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
6806       Object* obj = object_stack_[i];
6807       obj->Print();
6808     }
6809     PrintF("=====================================\n");
6810   }
6811 }
6812 #endif  // DEBUG || LIVE_OBJECT_LIST
6813
6814
6815 #ifdef DEBUG
6816 // Triggers a depth-first traversal of reachable objects from one
6817 // given root object and finds a path to a specific heap object and
6818 // prints it.
6819 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6820   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6821   tracer.VisitPointer(&root);
6822 }
6823
6824
6825 // Triggers a depth-first traversal of reachable objects from roots
6826 // and finds a path to a specific heap object and prints it.
6827 void Heap::TracePathToObject(Object* target) {
6828   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6829   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6830 }
6831
6832
6833 // Triggers a depth-first traversal of reachable objects from roots
6834 // and finds a path to any global object and prints it. Useful for
6835 // determining the source for leaks of global objects.
6836 void Heap::TracePathToGlobal() {
6837   PathTracer tracer(PathTracer::kAnyGlobalObject,
6838                     PathTracer::FIND_ALL,
6839                     VISIT_ALL);
6840   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6841 }
6842 #endif
6843
6844
6845 static intptr_t CountTotalHolesSize() {
6846   intptr_t holes_size = 0;
6847   OldSpaces spaces;
6848   for (OldSpace* space = spaces.next();
6849        space != NULL;
6850        space = spaces.next()) {
6851     holes_size += space->Waste() + space->Available();
6852   }
6853   return holes_size;
6854 }
6855
6856
6857 GCTracer::GCTracer(Heap* heap,
6858                    const char* gc_reason,
6859                    const char* collector_reason)
6860     : start_time_(0.0),
6861       start_object_size_(0),
6862       start_memory_size_(0),
6863       gc_count_(0),
6864       full_gc_count_(0),
6865       allocated_since_last_gc_(0),
6866       spent_in_mutator_(0),
6867       promoted_objects_size_(0),
6868       nodes_died_in_new_space_(0),
6869       nodes_copied_in_new_space_(0),
6870       nodes_promoted_(0),
6871       heap_(heap),
6872       gc_reason_(gc_reason),
6873       collector_reason_(collector_reason) {
6874   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6875   start_time_ = OS::TimeCurrentMillis();
6876   start_object_size_ = heap_->SizeOfObjects();
6877   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6878
6879   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6880     scopes_[i] = 0;
6881   }
6882
6883   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6884
6885   allocated_since_last_gc_ =
6886       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6887
6888   if (heap_->last_gc_end_timestamp_ > 0) {
6889     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6890   }
6891
6892   steps_count_ = heap_->incremental_marking()->steps_count();
6893   steps_took_ = heap_->incremental_marking()->steps_took();
6894   longest_step_ = heap_->incremental_marking()->longest_step();
6895   steps_count_since_last_gc_ =
6896       heap_->incremental_marking()->steps_count_since_last_gc();
6897   steps_took_since_last_gc_ =
6898       heap_->incremental_marking()->steps_took_since_last_gc();
6899 }
6900
6901
6902 GCTracer::~GCTracer() {
6903   // Printf ONE line iff flag is set.
6904   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6905
6906   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6907
6908   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6909   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6910
6911   int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6912
6913   // Update cumulative GC statistics if required.
6914   if (FLAG_print_cumulative_gc_stat) {
6915     heap_->total_gc_time_ms_ += time;
6916     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6917     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6918                                      heap_->alive_after_last_gc_);
6919     if (!first_gc) {
6920       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6921                                    static_cast<int>(spent_in_mutator_));
6922     }
6923   } else if (FLAG_trace_gc_verbose) {
6924     heap_->total_gc_time_ms_ += time;
6925   }
6926
6927   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6928
6929   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6930
6931   if (!FLAG_trace_gc_nvp) {
6932     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6933
6934     double end_memory_size_mb =
6935         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6936
6937     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6938            CollectorString(),
6939            static_cast<double>(start_object_size_) / MB,
6940            static_cast<double>(start_memory_size_) / MB,
6941            SizeOfHeapObjects(),
6942            end_memory_size_mb);
6943
6944     if (external_time > 0) PrintF("%d / ", external_time);
6945     PrintF("%d ms", time);
6946     if (steps_count_ > 0) {
6947       if (collector_ == SCAVENGER) {
6948         PrintF(" (+ %d ms in %d steps since last GC)",
6949                static_cast<int>(steps_took_since_last_gc_),
6950                steps_count_since_last_gc_);
6951       } else {
6952         PrintF(" (+ %d ms in %d steps since start of marking, "
6953                    "biggest step %f ms)",
6954                static_cast<int>(steps_took_),
6955                steps_count_,
6956                longest_step_);
6957       }
6958     }
6959
6960     if (gc_reason_ != NULL) {
6961       PrintF(" [%s]", gc_reason_);
6962     }
6963
6964     if (collector_reason_ != NULL) {
6965       PrintF(" [%s]", collector_reason_);
6966     }
6967
6968     PrintF(".\n");
6969   } else {
6970     PrintF("pause=%d ", time);
6971     PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
6972     PrintF("gc=");
6973     switch (collector_) {
6974       case SCAVENGER:
6975         PrintF("s");
6976         break;
6977       case MARK_COMPACTOR:
6978         PrintF("ms");
6979         break;
6980       default:
6981         UNREACHABLE();
6982     }
6983     PrintF(" ");
6984
6985     PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6986     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6987     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6988     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6989     PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
6990     PrintF("new_new=%d ",
6991            static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
6992     PrintF("root_new=%d ",
6993            static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6994     PrintF("old_new=%d ",
6995            static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6996     PrintF("compaction_ptrs=%d ",
6997            static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6998     PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6999         Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
7000     PrintF("misc_compaction=%d ",
7001            static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
7002
7003     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7004     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7005     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7006            in_free_list_or_wasted_before_gc_);
7007     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
7008
7009     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7010     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7011     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7012     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7013     PrintF("nodes_promoted=%d ", nodes_promoted_);
7014
7015     if (collector_ == SCAVENGER) {
7016       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7017       PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
7018     } else {
7019       PrintF("stepscount=%d ", steps_count_);
7020       PrintF("stepstook=%d ", static_cast<int>(steps_took_));
7021       PrintF("longeststep=%.f ", longest_step_);
7022     }
7023
7024     PrintF("\n");
7025   }
7026
7027   heap_->PrintShortHeapStatistics();
7028 }
7029
7030
7031 const char* GCTracer::CollectorString() {
7032   switch (collector_) {
7033     case SCAVENGER:
7034       return "Scavenge";
7035     case MARK_COMPACTOR:
7036       return "Mark-sweep";
7037   }
7038   return "Unknown GC";
7039 }
7040
7041
7042 int KeyedLookupCache::Hash(Map* map, String* name) {
7043   // Uses only lower 32 bits if pointers are larger.
7044   uintptr_t addr_hash =
7045       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7046   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7047 }
7048
7049
7050 int KeyedLookupCache::Lookup(Map* map, String* name) {
7051   int index = (Hash(map, name) & kHashMask);
7052   for (int i = 0; i < kEntriesPerBucket; i++) {
7053     Key& key = keys_[index + i];
7054     if ((key.map == map) && key.name->Equals(name)) {
7055       return field_offsets_[index + i];
7056     }
7057   }
7058   return kNotFound;
7059 }
7060
7061
7062 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
7063   String* symbol;
7064   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
7065     int index = (Hash(map, symbol) & kHashMask);
7066     // After a GC there will be free slots, so we use them in order (this may
7067     // help to get the most frequently used one in position 0).
7068     for (int i = 0; i< kEntriesPerBucket; i++) {
7069       Key& key = keys_[index];
7070       Object* free_entry_indicator = NULL;
7071       if (key.map == free_entry_indicator) {
7072         key.map = map;
7073         key.name = symbol;
7074         field_offsets_[index + i] = field_offset;
7075         return;
7076       }
7077     }
7078     // No free entry found in this bucket, so we move them all down one and
7079     // put the new entry at position zero.
7080     for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7081       Key& key = keys_[index + i];
7082       Key& key2 = keys_[index + i - 1];
7083       key = key2;
7084       field_offsets_[index + i] = field_offsets_[index + i - 1];
7085     }
7086
7087     // Write the new first entry.
7088     Key& key = keys_[index];
7089     key.map = map;
7090     key.name = symbol;
7091     field_offsets_[index] = field_offset;
7092   }
7093 }
7094
7095
7096 void KeyedLookupCache::Clear() {
7097   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7098 }
7099
7100
7101 void DescriptorLookupCache::Clear() {
7102   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7103 }
7104
7105
7106 #ifdef DEBUG
7107 void Heap::GarbageCollectionGreedyCheck() {
7108   ASSERT(FLAG_gc_greedy);
7109   if (isolate_->bootstrapper()->IsActive()) return;
7110   if (disallow_allocation_failure()) return;
7111   CollectGarbage(NEW_SPACE);
7112 }
7113 #endif
7114
7115
7116 TranscendentalCache::SubCache::SubCache(Type t)
7117   : type_(t),
7118     isolate_(Isolate::Current()) {
7119   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7120   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7121   for (int i = 0; i < kCacheSize; i++) {
7122     elements_[i].in[0] = in0;
7123     elements_[i].in[1] = in1;
7124     elements_[i].output = NULL;
7125   }
7126 }
7127
7128
7129 void TranscendentalCache::Clear() {
7130   for (int i = 0; i < kNumberOfCaches; i++) {
7131     if (caches_[i] != NULL) {
7132       delete caches_[i];
7133       caches_[i] = NULL;
7134     }
7135   }
7136 }
7137
7138
7139 void ExternalStringTable::CleanUp() {
7140   int last = 0;
7141   for (int i = 0; i < new_space_strings_.length(); ++i) {
7142     if (new_space_strings_[i] == heap_->the_hole_value()) {
7143       continue;
7144     }
7145     if (heap_->InNewSpace(new_space_strings_[i])) {
7146       new_space_strings_[last++] = new_space_strings_[i];
7147     } else {
7148       old_space_strings_.Add(new_space_strings_[i]);
7149     }
7150   }
7151   new_space_strings_.Rewind(last);
7152   last = 0;
7153   for (int i = 0; i < old_space_strings_.length(); ++i) {
7154     if (old_space_strings_[i] == heap_->the_hole_value()) {
7155       continue;
7156     }
7157     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7158     old_space_strings_[last++] = old_space_strings_[i];
7159   }
7160   old_space_strings_.Rewind(last);
7161 #ifdef VERIFY_HEAP
7162   if (FLAG_verify_heap) {
7163     Verify();
7164   }
7165 #endif
7166 }
7167
7168
7169 void ExternalStringTable::TearDown() {
7170   new_space_strings_.Free();
7171   old_space_strings_.Free();
7172 }
7173
7174
7175 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7176   chunk->set_next_chunk(chunks_queued_for_free_);
7177   chunks_queued_for_free_ = chunk;
7178 }
7179
7180
7181 void Heap::FreeQueuedChunks() {
7182   if (chunks_queued_for_free_ == NULL) return;
7183   MemoryChunk* next;
7184   MemoryChunk* chunk;
7185   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7186     next = chunk->next_chunk();
7187     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7188
7189     if (chunk->owner()->identity() == LO_SPACE) {
7190       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7191       // If FromAnyPointerAddress encounters a slot that belongs to a large
7192       // chunk queued for deletion it will fail to find the chunk because
7193       // it try to perform a search in the list of pages owned by of the large
7194       // object space and queued chunks were detached from that list.
7195       // To work around this we split large chunk into normal kPageSize aligned
7196       // pieces and initialize size, owner and flags field of every piece.
7197       // If FromAnyPointerAddress encounters a slot that belongs to one of
7198       // these smaller pieces it will treat it as a slot on a normal Page.
7199       Address chunk_end = chunk->address() + chunk->size();
7200       MemoryChunk* inner = MemoryChunk::FromAddress(
7201           chunk->address() + Page::kPageSize);
7202       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7203       while (inner <= inner_last) {
7204         // Size of a large chunk is always a multiple of
7205         // OS::AllocateAlignment() so there is always
7206         // enough space for a fake MemoryChunk header.
7207         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7208         // Guard against overflow.
7209         if (area_end < inner->address()) area_end = chunk_end;
7210         inner->SetArea(inner->address(), area_end);
7211         inner->set_size(Page::kPageSize);
7212         inner->set_owner(lo_space());
7213         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7214         inner = MemoryChunk::FromAddress(
7215             inner->address() + Page::kPageSize);
7216       }
7217     }
7218   }
7219   isolate_->heap()->store_buffer()->Compact();
7220   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7221   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7222     next = chunk->next_chunk();
7223     isolate_->memory_allocator()->Free(chunk);
7224   }
7225   chunks_queued_for_free_ = NULL;
7226 }
7227
7228
7229 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7230   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7231   // Tag the page pointer to make it findable in the dump file.
7232   if (compacted) {
7233     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7234   } else {
7235     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7236   }
7237   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7238       reinterpret_cast<Address>(p);
7239   remembered_unmapped_pages_index_++;
7240   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7241 }
7242
7243
7244 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7245   memset(object_counts_, 0, sizeof(object_counts_));
7246   memset(object_sizes_, 0, sizeof(object_sizes_));
7247   if (clear_last_time_stats) {
7248     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7249     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7250   }
7251 }
7252
7253
7254 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7255
7256
7257 void Heap::CheckpointObjectStats() {
7258   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7259   Counters* counters = isolate()->counters();
7260 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7261   counters->count_of_##name()->Increment(                                      \
7262       static_cast<int>(object_counts_[name]));                                 \
7263   counters->count_of_##name()->Decrement(                                      \
7264       static_cast<int>(object_counts_last_time_[name]));                       \
7265   counters->size_of_##name()->Increment(                                       \
7266       static_cast<int>(object_sizes_[name]));                                  \
7267   counters->size_of_##name()->Decrement(                                       \
7268       static_cast<int>(object_sizes_last_time_[name]));
7269   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7270 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7271   int index;
7272 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7273   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7274   counters->count_of_CODE_TYPE_##name()->Increment(       \
7275       static_cast<int>(object_counts_[index]));           \
7276   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7277       static_cast<int>(object_counts_last_time_[index])); \
7278   counters->size_of_CODE_TYPE_##name()->Increment(        \
7279       static_cast<int>(object_sizes_[index]));            \
7280   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7281       static_cast<int>(object_sizes_last_time_[index]));
7282   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7283 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7284 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7285   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7286   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7287       static_cast<int>(object_counts_[index]));           \
7288   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7289       static_cast<int>(object_counts_last_time_[index])); \
7290   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7291       static_cast<int>(object_sizes_[index]));            \
7292   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7293       static_cast<int>(object_sizes_last_time_[index]));
7294   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7295 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7296
7297   memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7298   memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7299   ClearObjectStats();
7300 }
7301
7302 } }  // namespace v8::internal