1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if defined(V8_TARGET_ARCH_X64)
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
110 gc_state_(NOT_IN_GC),
111 gc_post_processing_depth_(0),
114 remembered_unmapped_pages_index_(0),
115 unflattened_strings_length_(0),
117 allocation_allowed_(true),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
122 new_space_high_promotion_mode_active_(false),
123 old_gen_promotion_limit_(kMinimumPromotionLimit),
124 old_gen_allocation_limit_(kMinimumAllocationLimit),
125 old_gen_limit_factor_(1),
126 size_of_old_gen_at_last_old_space_gc_(0),
127 external_allocation_limit_(0),
128 amount_of_external_allocated_memory_(0),
129 amount_of_external_allocated_memory_at_last_global_gc_(0),
130 old_gen_exhausted_(false),
131 store_buffer_rebuilder_(store_buffer()),
132 hidden_symbol_(NULL),
133 global_gc_prologue_callback_(NULL),
134 global_gc_epilogue_callback_(NULL),
135 gc_safe_size_of_old_object_(NULL),
136 total_regexp_code_generated_(0),
138 young_survivors_after_last_gc_(0),
139 high_survival_rate_period_length_(0),
141 previous_survival_rate_trend_(Heap::STABLE),
142 survival_rate_trend_(Heap::STABLE),
144 total_gc_time_ms_(0),
145 max_alive_after_gc_(0),
146 min_in_mutator_(kMaxInt),
147 alive_after_last_gc_(0),
148 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 ms_count_at_last_idle_notification_(0),
157 gc_count_at_last_idle_gc_(0),
158 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
159 promotion_queue_(this),
161 chunks_queued_for_free_(NULL),
162 relocation_mutex_(NULL) {
163 // Allow build-time customization of the max semispace size. Building
164 // V8 with snapshots and a non-default max semispace size is much
165 // easier if you can define it as part of the build environment.
166 #if defined(V8_MAX_SEMISPACE_SIZE)
167 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170 intptr_t max_virtual = OS::MaxVirtualMemory();
172 if (max_virtual > 0) {
173 if (code_range_size_ > 0) {
174 // Reserve no more than 1/8 of the memory for the code range.
175 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
180 native_contexts_list_ = NULL;
181 mark_compact_collector_.heap_ = this;
182 external_string_table_.heap_ = this;
183 // Put a dummy entry in the remembered pages so we can find the list the
184 // minidump even if there are no real unmapped pages.
185 RememberUnmappedPage(NULL, false);
187 ClearObjectStats(true);
191 intptr_t Heap::Capacity() {
192 if (!HasBeenSetUp()) return 0;
194 return new_space_.Capacity() +
195 old_pointer_space_->Capacity() +
196 old_data_space_->Capacity() +
197 code_space_->Capacity() +
198 map_space_->Capacity() +
199 cell_space_->Capacity();
203 intptr_t Heap::CommittedMemory() {
204 if (!HasBeenSetUp()) return 0;
206 return new_space_.CommittedMemory() +
207 old_pointer_space_->CommittedMemory() +
208 old_data_space_->CommittedMemory() +
209 code_space_->CommittedMemory() +
210 map_space_->CommittedMemory() +
211 cell_space_->CommittedMemory() +
215 intptr_t Heap::CommittedMemoryExecutable() {
216 if (!HasBeenSetUp()) return 0;
218 return isolate()->memory_allocator()->SizeExecutable();
222 intptr_t Heap::Available() {
223 if (!HasBeenSetUp()) return 0;
225 return new_space_.Available() +
226 old_pointer_space_->Available() +
227 old_data_space_->Available() +
228 code_space_->Available() +
229 map_space_->Available() +
230 cell_space_->Available();
234 bool Heap::HasBeenSetUp() {
235 return old_pointer_space_ != NULL &&
236 old_data_space_ != NULL &&
237 code_space_ != NULL &&
238 map_space_ != NULL &&
239 cell_space_ != NULL &&
244 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
245 if (IntrusiveMarking::IsMarked(object)) {
246 return IntrusiveMarking::SizeOfMarkedObject(object);
248 return object->SizeFromMap(object->map());
252 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
253 const char** reason) {
254 // Is global GC requested?
255 if (space != NEW_SPACE) {
256 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
257 *reason = "GC in old space requested";
258 return MARK_COMPACTOR;
261 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
262 *reason = "GC in old space forced by flags";
263 return MARK_COMPACTOR;
266 // Is enough data promoted to justify a global GC?
267 if (OldGenerationPromotionLimitReached()) {
268 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
269 *reason = "promotion limit reached";
270 return MARK_COMPACTOR;
273 // Have allocation in OLD and LO failed?
274 if (old_gen_exhausted_) {
275 isolate_->counters()->
276 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
277 *reason = "old generations exhausted";
278 return MARK_COMPACTOR;
281 // Is there enough space left in OLD to guarantee that a scavenge can
284 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
285 // for object promotion. It counts only the bytes that the memory
286 // allocator has not yet allocated from the OS and assigned to any space,
287 // and does not count available bytes already in the old space or code
288 // space. Undercounting is safe---we may get an unrequested full GC when
289 // a scavenge would have succeeded.
290 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
291 isolate_->counters()->
292 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
293 *reason = "scavenge might not succeed";
294 return MARK_COMPACTOR;
303 // TODO(1238405): Combine the infrastructure for --heap-stats and
304 // --log-gc to avoid the complicated preprocessor and flag testing.
305 void Heap::ReportStatisticsBeforeGC() {
306 // Heap::ReportHeapStatistics will also log NewSpace statistics when
307 // compiled --log-gc is set. The following logic is used to avoid
310 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
311 if (FLAG_heap_stats) {
312 ReportHeapStatistics("Before GC");
313 } else if (FLAG_log_gc) {
314 new_space_.ReportStatistics();
316 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
319 new_space_.CollectStatistics();
320 new_space_.ReportStatistics();
321 new_space_.ClearHistograms();
327 void Heap::PrintShortHeapStatistics() {
328 if (!FLAG_trace_gc_verbose) return;
329 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
330 ", available: %6" V8_PTR_PREFIX "d KB\n",
331 isolate_->memory_allocator()->Size() / KB,
332 isolate_->memory_allocator()->Available() / KB);
333 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
334 ", available: %6" V8_PTR_PREFIX "d KB"
335 ", committed: %6" V8_PTR_PREFIX "d KB\n",
336 new_space_.Size() / KB,
337 new_space_.Available() / KB,
338 new_space_.CommittedMemory() / KB);
339 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
340 ", available: %6" V8_PTR_PREFIX "d KB"
341 ", committed: %6" V8_PTR_PREFIX "d KB\n",
342 old_pointer_space_->SizeOfObjects() / KB,
343 old_pointer_space_->Available() / KB,
344 old_pointer_space_->CommittedMemory() / KB);
345 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
346 ", available: %6" V8_PTR_PREFIX "d KB"
347 ", committed: %6" V8_PTR_PREFIX "d KB\n",
348 old_data_space_->SizeOfObjects() / KB,
349 old_data_space_->Available() / KB,
350 old_data_space_->CommittedMemory() / KB);
351 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
352 ", available: %6" V8_PTR_PREFIX "d KB"
353 ", committed: %6" V8_PTR_PREFIX "d KB\n",
354 code_space_->SizeOfObjects() / KB,
355 code_space_->Available() / KB,
356 code_space_->CommittedMemory() / KB);
357 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB"
359 ", committed: %6" V8_PTR_PREFIX "d KB\n",
360 map_space_->SizeOfObjects() / KB,
361 map_space_->Available() / KB,
362 map_space_->CommittedMemory() / KB);
363 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
364 ", available: %6" V8_PTR_PREFIX "d KB"
365 ", committed: %6" V8_PTR_PREFIX "d KB\n",
366 cell_space_->SizeOfObjects() / KB,
367 cell_space_->Available() / KB,
368 cell_space_->CommittedMemory() / KB);
369 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
370 ", available: %6" V8_PTR_PREFIX "d KB"
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 lo_space_->SizeOfObjects() / KB,
373 lo_space_->Available() / KB,
374 lo_space_->CommittedMemory() / KB);
375 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
376 ", available: %6" V8_PTR_PREFIX "d KB"
377 ", committed: %6" V8_PTR_PREFIX "d KB\n",
378 this->SizeOfObjects() / KB,
379 this->Available() / KB,
380 this->CommittedMemory() / KB);
381 PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
385 // TODO(1238405): Combine the infrastructure for --heap-stats and
386 // --log-gc to avoid the complicated preprocessor and flag testing.
387 void Heap::ReportStatisticsAfterGC() {
388 // Similar to the before GC, we use some complicated logic to ensure that
389 // NewSpace statistics are logged exactly once when --log-gc is turned on.
391 if (FLAG_heap_stats) {
392 new_space_.CollectStatistics();
393 ReportHeapStatistics("After GC");
394 } else if (FLAG_log_gc) {
395 new_space_.ReportStatistics();
398 if (FLAG_log_gc) new_space_.ReportStatistics();
403 void Heap::GarbageCollectionPrologue() {
404 isolate_->transcendental_cache()->Clear();
405 ClearJSFunctionResultCaches();
407 unflattened_strings_length_ = 0;
410 if (FLAG_verify_heap) {
416 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
417 allow_allocation(false);
419 if (FLAG_gc_verbose) Print();
421 ReportStatisticsBeforeGC();
424 LiveObjectList::GCPrologue();
425 store_buffer()->GCPrologue();
429 intptr_t Heap::SizeOfObjects() {
432 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
433 total += space->SizeOfObjects();
439 void Heap::RepairFreeListsAfterBoot() {
441 for (PagedSpace* space = spaces.next();
443 space = spaces.next()) {
444 space->RepairFreeListsAfterBoot();
449 void Heap::GarbageCollectionEpilogue() {
450 store_buffer()->GCEpilogue();
451 LiveObjectList::GCEpilogue();
453 // In release mode, we only zap the from space under heap verification.
454 if (Heap::ShouldZapGarbage()) {
459 if (FLAG_verify_heap) {
465 allow_allocation(true);
466 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
467 if (FLAG_print_handles) PrintHandles();
468 if (FLAG_gc_verbose) Print();
469 if (FLAG_code_stats) ReportCodeStatistics("After GC");
472 isolate_->counters()->alive_after_last_gc()->Set(
473 static_cast<int>(SizeOfObjects()));
475 isolate_->counters()->symbol_table_capacity()->Set(
476 symbol_table()->Capacity());
477 isolate_->counters()->number_of_symbols()->Set(
478 symbol_table()->NumberOfElements());
480 if (CommittedMemory() > 0) {
481 isolate_->counters()->external_fragmentation_total()->AddSample(
482 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
484 isolate_->counters()->heap_fraction_map_space()->AddSample(
486 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
487 isolate_->counters()->heap_fraction_cell_space()->AddSample(
489 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
491 isolate_->counters()->heap_sample_total_committed()->AddSample(
492 static_cast<int>(CommittedMemory() / KB));
493 isolate_->counters()->heap_sample_total_used()->AddSample(
494 static_cast<int>(SizeOfObjects() / KB));
495 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
496 static_cast<int>(map_space()->CommittedMemory() / KB));
497 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
498 static_cast<int>(cell_space()->CommittedMemory() / KB));
501 #define UPDATE_COUNTERS_FOR_SPACE(space) \
502 isolate_->counters()->space##_bytes_available()->Set( \
503 static_cast<int>(space()->Available())); \
504 isolate_->counters()->space##_bytes_committed()->Set( \
505 static_cast<int>(space()->CommittedMemory())); \
506 isolate_->counters()->space##_bytes_used()->Set( \
507 static_cast<int>(space()->SizeOfObjects()));
508 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
509 if (space()->CommittedMemory() > 0) { \
510 isolate_->counters()->external_fragmentation_##space()->AddSample( \
511 static_cast<int>(100 - \
512 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
514 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
515 UPDATE_COUNTERS_FOR_SPACE(space) \
516 UPDATE_FRAGMENTATION_FOR_SPACE(space)
518 UPDATE_COUNTERS_FOR_SPACE(new_space)
519 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
520 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
521 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
522 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
523 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
524 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
525 #undef UPDATE_COUNTERS_FOR_SPACE
526 #undef UPDATE_FRAGMENTATION_FOR_SPACE
527 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
530 ReportStatisticsAfterGC();
532 #ifdef ENABLE_DEBUGGER_SUPPORT
533 isolate_->debug()->AfterGarbageCollection();
534 #endif // ENABLE_DEBUGGER_SUPPORT
538 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
539 // Since we are ignoring the return value, the exact choice of space does
540 // not matter, so long as we do not specify NEW_SPACE, which would not
542 mark_compact_collector_.SetFlags(flags);
543 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
544 mark_compact_collector_.SetFlags(kNoGCFlags);
548 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
549 // Since we are ignoring the return value, the exact choice of space does
550 // not matter, so long as we do not specify NEW_SPACE, which would not
552 // Major GC would invoke weak handle callbacks on weakly reachable
553 // handles, but won't collect weakly reachable objects until next
554 // major GC. Therefore if we collect aggressively and weak handle callback
555 // has been invoked, we rerun major GC to release objects which become
557 // Note: as weak callbacks can execute arbitrary code, we cannot
558 // hope that eventually there will be no weak callbacks invocations.
559 // Therefore stop recollecting after several attempts.
560 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
561 kReduceMemoryFootprintMask);
562 isolate_->compilation_cache()->Clear();
563 const int kMaxNumberOfAttempts = 7;
564 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
565 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
569 mark_compact_collector()->SetFlags(kNoGCFlags);
573 incremental_marking()->UncommitMarkingDeque();
577 bool Heap::CollectGarbage(AllocationSpace space,
578 GarbageCollector collector,
579 const char* gc_reason,
580 const char* collector_reason) {
581 // The VM is in the GC state until exiting this function.
582 VMState state(isolate_, GC);
585 // Reset the allocation timeout to the GC interval, but make sure to
586 // allow at least a few allocations after a collection. The reason
587 // for this is that we have a lot of allocation sequences and we
588 // assume that a garbage collection will allow the subsequent
589 // allocation attempts to go through.
590 allocation_timeout_ = Max(6, FLAG_gc_interval);
593 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
594 if (FLAG_trace_incremental_marking) {
595 PrintF("[IncrementalMarking] Scavenge during marking.\n");
599 if (collector == MARK_COMPACTOR &&
600 !mark_compact_collector()->abort_incremental_marking_ &&
601 !incremental_marking()->IsStopped() &&
602 !incremental_marking()->should_hurry() &&
603 FLAG_incremental_marking_steps) {
604 // Make progress in incremental marking.
605 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
606 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
607 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
608 if (!incremental_marking()->IsComplete()) {
609 if (FLAG_trace_incremental_marking) {
610 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
612 collector = SCAVENGER;
613 collector_reason = "incremental marking delaying mark-sweep";
617 bool next_gc_likely_to_collect_more = false;
619 { GCTracer tracer(this, gc_reason, collector_reason);
620 GarbageCollectionPrologue();
621 // The GC count was incremented in the prologue. Tell the tracer about
623 tracer.set_gc_count(gc_count_);
625 // Tell the tracer which collector we've selected.
626 tracer.set_collector(collector);
628 HistogramTimer* rate = (collector == SCAVENGER)
629 ? isolate_->counters()->gc_scavenger()
630 : isolate_->counters()->gc_compactor();
632 next_gc_likely_to_collect_more =
633 PerformGarbageCollection(collector, &tracer);
636 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
638 // This can do debug callbacks and restart incremental marking.
639 GarbageCollectionEpilogue();
642 if (incremental_marking()->IsStopped()) {
643 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
644 incremental_marking()->Start();
648 return next_gc_likely_to_collect_more;
652 void Heap::PerformScavenge() {
653 GCTracer tracer(this, NULL, NULL);
654 if (incremental_marking()->IsStopped()) {
655 PerformGarbageCollection(SCAVENGER, &tracer);
657 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
663 // Helper class for verifying the symbol table.
664 class SymbolTableVerifier : public ObjectVisitor {
666 void VisitPointers(Object** start, Object** end) {
667 // Visit all HeapObject pointers in [start, end).
668 for (Object** p = start; p < end; p++) {
669 if ((*p)->IsHeapObject()) {
670 // Check that the symbol is actually a symbol.
671 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
678 static void VerifySymbolTable() {
679 SymbolTableVerifier verifier;
680 HEAP->symbol_table()->IterateElements(&verifier);
682 #endif // VERIFY_HEAP
685 static bool AbortIncrementalMarkingAndCollectGarbage(
687 AllocationSpace space,
688 const char* gc_reason = NULL) {
689 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
690 bool result = heap->CollectGarbage(space, gc_reason);
691 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
696 void Heap::ReserveSpace(
698 Address *locations_out) {
699 bool gc_performed = true;
701 static const int kThreshold = 20;
702 while (gc_performed && counter++ < kThreshold) {
703 gc_performed = false;
704 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
705 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
706 if (sizes[space] != 0) {
707 MaybeObject* allocation;
708 if (space == NEW_SPACE) {
709 allocation = new_space()->AllocateRaw(sizes[space]);
711 allocation = paged_space(space)->AllocateRaw(sizes[space]);
714 if (!allocation->To<FreeListNode>(&node)) {
715 if (space == NEW_SPACE) {
716 Heap::CollectGarbage(NEW_SPACE,
717 "failed to reserve space in the new space");
719 AbortIncrementalMarkingAndCollectGarbage(
721 static_cast<AllocationSpace>(space),
722 "failed to reserve space in paged space");
727 // Mark with a free list node, in case we have a GC before
729 node->set_size(this, sizes[space]);
730 locations_out[space] = node->address();
737 // Failed to reserve the space after several attempts.
738 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
743 void Heap::EnsureFromSpaceIsCommitted() {
744 if (new_space_.CommitFromSpaceIfNeeded()) return;
746 // Committing memory to from space failed.
747 // Try shrinking and try again.
749 if (new_space_.CommitFromSpaceIfNeeded()) return;
751 // Committing memory to from space failed again.
752 // Memory is exhausted and we will die.
753 V8::FatalProcessOutOfMemory("Committing semi space failed.");
757 void Heap::ClearJSFunctionResultCaches() {
758 if (isolate_->bootstrapper()->IsActive()) return;
760 Object* context = native_contexts_list_;
761 while (!context->IsUndefined()) {
762 // Get the caches for this context. GC can happen when the context
763 // is not fully initialized, so the caches can be undefined.
764 Object* caches_or_undefined =
765 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
766 if (!caches_or_undefined->IsUndefined()) {
767 FixedArray* caches = FixedArray::cast(caches_or_undefined);
769 int length = caches->length();
770 for (int i = 0; i < length; i++) {
771 JSFunctionResultCache::cast(caches->get(i))->Clear();
774 // Get the next context:
775 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
781 void Heap::ClearNormalizedMapCaches() {
782 if (isolate_->bootstrapper()->IsActive() &&
783 !incremental_marking()->IsMarking()) {
787 Object* context = native_contexts_list_;
788 while (!context->IsUndefined()) {
789 // GC can happen when the context is not fully initialized,
790 // so the cache can be undefined.
792 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
793 if (!cache->IsUndefined()) {
794 NormalizedMapCache::cast(cache)->Clear();
796 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
801 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
802 double survival_rate =
803 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
804 start_new_space_size;
806 if (survival_rate > kYoungSurvivalRateHighThreshold) {
807 high_survival_rate_period_length_++;
809 high_survival_rate_period_length_ = 0;
812 if (survival_rate < kYoungSurvivalRateLowThreshold) {
813 low_survival_rate_period_length_++;
815 low_survival_rate_period_length_ = 0;
818 double survival_rate_diff = survival_rate_ - survival_rate;
820 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
821 set_survival_rate_trend(DECREASING);
822 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
823 set_survival_rate_trend(INCREASING);
825 set_survival_rate_trend(STABLE);
828 survival_rate_ = survival_rate;
831 bool Heap::PerformGarbageCollection(GarbageCollector collector,
833 bool next_gc_likely_to_collect_more = false;
835 if (collector != SCAVENGER) {
836 PROFILE(isolate_, CodeMovingGCEvent());
840 if (FLAG_verify_heap) {
845 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
846 ASSERT(!allocation_allowed_);
847 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
848 global_gc_prologue_callback_();
852 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
854 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
855 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
856 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
860 EnsureFromSpaceIsCommitted();
862 int start_new_space_size = Heap::new_space()->SizeAsInt();
864 if (IsHighSurvivalRate()) {
865 // We speed up the incremental marker if it is running so that it
866 // does not fall behind the rate of promotion, which would cause a
867 // constantly growing old space.
868 incremental_marking()->NotifyOfHighPromotionRate();
871 if (collector == MARK_COMPACTOR) {
872 // Perform mark-sweep with optional compaction.
875 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
876 IsStableOrIncreasingSurvivalTrend();
878 UpdateSurvivalRateTrend(start_new_space_size);
880 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
882 if (high_survival_rate_during_scavenges &&
883 IsStableOrIncreasingSurvivalTrend()) {
884 // Stable high survival rates of young objects both during partial and
885 // full collection indicate that mutator is either building or modifying
886 // a structure with a long lifetime.
887 // In this case we aggressively raise old generation memory limits to
888 // postpone subsequent mark-sweep collection and thus trade memory
889 // space for the mutation speed.
890 old_gen_limit_factor_ = 2;
892 old_gen_limit_factor_ = 1;
895 old_gen_promotion_limit_ =
896 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
897 old_gen_allocation_limit_ =
898 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
900 old_gen_exhausted_ = false;
906 UpdateSurvivalRateTrend(start_new_space_size);
909 if (!new_space_high_promotion_mode_active_ &&
910 new_space_.Capacity() == new_space_.MaximumCapacity() &&
911 IsStableOrIncreasingSurvivalTrend() &&
912 IsHighSurvivalRate()) {
913 // Stable high survival rates even though young generation is at
914 // maximum capacity indicates that most objects will be promoted.
915 // To decrease scavenger pauses and final mark-sweep pauses, we
916 // have to limit maximal capacity of the young generation.
917 new_space_high_promotion_mode_active_ = true;
919 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
920 new_space_.InitialCapacity() / MB);
922 } else if (new_space_high_promotion_mode_active_ &&
923 IsStableOrDecreasingSurvivalTrend() &&
924 IsLowSurvivalRate()) {
925 // Decreasing low survival rates might indicate that the above high
926 // promotion mode is over and we should allow the young generation
928 new_space_high_promotion_mode_active_ = false;
930 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
931 new_space_.MaximumCapacity() / MB);
935 if (new_space_high_promotion_mode_active_ &&
936 new_space_.Capacity() > new_space_.InitialCapacity()) {
940 isolate_->counters()->objs_since_last_young()->Set(0);
942 gc_post_processing_depth_++;
943 { DisableAssertNoAllocation allow_allocation;
944 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
945 next_gc_likely_to_collect_more =
946 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
948 gc_post_processing_depth_--;
950 // Update relocatables.
951 Relocatable::PostGarbageCollectionProcessing();
953 if (collector == MARK_COMPACTOR) {
954 // Register the amount of external allocated memory.
955 amount_of_external_allocated_memory_at_last_global_gc_ =
956 amount_of_external_allocated_memory_;
959 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
960 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
961 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
962 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
966 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
967 ASSERT(!allocation_allowed_);
968 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
969 global_gc_epilogue_callback_();
973 if (FLAG_verify_heap) {
978 return next_gc_likely_to_collect_more;
982 void Heap::MarkCompact(GCTracer* tracer) {
983 gc_state_ = MARK_COMPACT;
984 LOG(isolate_, ResourceEvent("markcompact", "begin"));
986 mark_compact_collector_.Prepare(tracer);
989 tracer->set_full_gc_count(ms_count_);
991 MarkCompactPrologue();
993 mark_compact_collector_.CollectGarbage();
995 LOG(isolate_, ResourceEvent("markcompact", "end"));
997 gc_state_ = NOT_IN_GC;
999 isolate_->counters()->objs_since_last_full()->Set(0);
1001 contexts_disposed_ = 0;
1003 flush_monomorphic_ics_ = false;
1007 void Heap::MarkCompactPrologue() {
1008 // At any old GC clear the keyed lookup cache to enable collection of unused
1010 isolate_->keyed_lookup_cache()->Clear();
1011 isolate_->context_slot_cache()->Clear();
1012 isolate_->descriptor_lookup_cache()->Clear();
1013 RegExpResultsCache::Clear(string_split_cache());
1014 RegExpResultsCache::Clear(regexp_multiple_cache());
1016 isolate_->compilation_cache()->MarkCompactPrologue();
1018 CompletelyClearInstanceofCache();
1020 FlushNumberStringCache();
1021 if (FLAG_cleanup_code_caches_at_gc) {
1022 polymorphic_code_cache()->set_cache(undefined_value());
1025 ClearNormalizedMapCaches();
1029 Object* Heap::FindCodeObject(Address a) {
1030 return isolate()->inner_pointer_to_code_cache()->
1031 GcSafeFindCodeForInnerPointer(a);
1035 // Helper class for copying HeapObjects
1036 class ScavengeVisitor: public ObjectVisitor {
1038 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1040 void VisitPointer(Object** p) { ScavengePointer(p); }
1042 void VisitPointers(Object** start, Object** end) {
1043 // Copy all HeapObject pointers in [start, end)
1044 for (Object** p = start; p < end; p++) ScavengePointer(p);
1048 void ScavengePointer(Object** p) {
1049 Object* object = *p;
1050 if (!heap_->InNewSpace(object)) return;
1051 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1052 reinterpret_cast<HeapObject*>(object));
1060 // Visitor class to verify pointers in code or data space do not point into
1062 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1064 void VisitPointers(Object** start, Object**end) {
1065 for (Object** current = start; current < end; current++) {
1066 if ((*current)->IsHeapObject()) {
1067 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1074 static void VerifyNonPointerSpacePointers() {
1075 // Verify that there are no pointers to new space in spaces where we
1076 // do not expect them.
1077 VerifyNonPointerSpacePointersVisitor v;
1078 HeapObjectIterator code_it(HEAP->code_space());
1079 for (HeapObject* object = code_it.Next();
1080 object != NULL; object = code_it.Next())
1081 object->Iterate(&v);
1083 // The old data space was normally swept conservatively so that the iterator
1084 // doesn't work, so we normally skip the next bit.
1085 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1086 HeapObjectIterator data_it(HEAP->old_data_space());
1087 for (HeapObject* object = data_it.Next();
1088 object != NULL; object = data_it.Next())
1089 object->Iterate(&v);
1092 #endif // VERIFY_HEAP
1095 void Heap::CheckNewSpaceExpansionCriteria() {
1096 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1097 survived_since_last_expansion_ > new_space_.Capacity() &&
1098 !new_space_high_promotion_mode_active_) {
1099 // Grow the size of new space if there is room to grow, enough data
1100 // has survived scavenge since the last expansion and we are not in
1101 // high promotion mode.
1103 survived_since_last_expansion_ = 0;
1108 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1109 return heap->InNewSpace(*p) &&
1110 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1114 void Heap::ScavengeStoreBufferCallback(
1117 StoreBufferEvent event) {
1118 heap->store_buffer_rebuilder_.Callback(page, event);
1122 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1123 if (event == kStoreBufferStartScanningPagesEvent) {
1124 start_of_current_page_ = NULL;
1125 current_page_ = NULL;
1126 } else if (event == kStoreBufferScanningPageEvent) {
1127 if (current_page_ != NULL) {
1128 // If this page already overflowed the store buffer during this iteration.
1129 if (current_page_->scan_on_scavenge()) {
1130 // Then we should wipe out the entries that have been added for it.
1131 store_buffer_->SetTop(start_of_current_page_);
1132 } else if (store_buffer_->Top() - start_of_current_page_ >=
1133 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1134 // Did we find too many pointers in the previous page? The heuristic is
1135 // that no page can take more then 1/5 the remaining slots in the store
1137 current_page_->set_scan_on_scavenge(true);
1138 store_buffer_->SetTop(start_of_current_page_);
1140 // In this case the page we scanned took a reasonable number of slots in
1141 // the store buffer. It has now been rehabilitated and is no longer
1142 // marked scan_on_scavenge.
1143 ASSERT(!current_page_->scan_on_scavenge());
1146 start_of_current_page_ = store_buffer_->Top();
1147 current_page_ = page;
1148 } else if (event == kStoreBufferFullEvent) {
1149 // The current page overflowed the store buffer again. Wipe out its entries
1150 // in the store buffer and mark it scan-on-scavenge again. This may happen
1151 // several times while scanning.
1152 if (current_page_ == NULL) {
1153 // Store Buffer overflowed while scanning promoted objects. These are not
1154 // in any particular page, though they are likely to be clustered by the
1155 // allocation routines.
1156 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1158 // Store Buffer overflowed while scanning a particular old space page for
1159 // pointers to new space.
1160 ASSERT(current_page_ == page);
1161 ASSERT(page != NULL);
1162 current_page_->set_scan_on_scavenge(true);
1163 ASSERT(start_of_current_page_ != store_buffer_->Top());
1164 store_buffer_->SetTop(start_of_current_page_);
1172 void PromotionQueue::Initialize() {
1173 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1174 // entries (where each is a pair of intptr_t). This allows us to simplify
1175 // the test fpr when to switch pages.
1176 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1178 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1180 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1181 emergency_stack_ = NULL;
1186 void PromotionQueue::RelocateQueueHead() {
1187 ASSERT(emergency_stack_ == NULL);
1189 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1190 intptr_t* head_start = rear_;
1191 intptr_t* head_end =
1192 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1195 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1197 emergency_stack_ = new List<Entry>(2 * entries_count);
1199 while (head_start != head_end) {
1200 int size = static_cast<int>(*(head_start++));
1201 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1202 emergency_stack_->Add(Entry(obj, size));
1208 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1210 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1212 virtual Object* RetainAs(Object* object) {
1213 if (!heap_->InFromSpace(object)) {
1217 MapWord map_word = HeapObject::cast(object)->map_word();
1218 if (map_word.IsForwardingAddress()) {
1219 return map_word.ToForwardingAddress();
1229 void Heap::Scavenge() {
1230 RelocationLock relocation_lock(this);
1233 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1236 gc_state_ = SCAVENGE;
1238 // Implements Cheney's copying algorithm
1239 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1241 // Clear descriptor cache.
1242 isolate_->descriptor_lookup_cache()->Clear();
1244 // Used for updating survived_since_last_expansion_ at function end.
1245 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1247 CheckNewSpaceExpansionCriteria();
1249 SelectScavengingVisitorsTable();
1251 incremental_marking()->PrepareForScavenge();
1253 AdvanceSweepers(static_cast<int>(new_space_.Size()));
1255 // Flip the semispaces. After flipping, to space is empty, from space has
1258 new_space_.ResetAllocationInfo();
1260 // We need to sweep newly copied objects which can be either in the
1261 // to space or promoted to the old generation. For to-space
1262 // objects, we treat the bottom of the to space as a queue. Newly
1263 // copied and unswept objects lie between a 'front' mark and the
1264 // allocation pointer.
1266 // Promoted objects can go into various old-generation spaces, and
1267 // can be allocated internally in the spaces (from the free list).
1268 // We treat the top of the to space as a queue of addresses of
1269 // promoted objects. The addresses of newly promoted and unswept
1270 // objects lie between a 'front' mark and a 'rear' mark that is
1271 // updated as a side effect of promoting an object.
1273 // There is guaranteed to be enough room at the top of the to space
1274 // for the addresses of promoted objects: every object promoted
1275 // frees up its size in bytes from the top of the new space, and
1276 // objects are at least one pointer in size.
1277 Address new_space_front = new_space_.ToSpaceStart();
1278 promotion_queue_.Initialize();
1281 store_buffer()->Clean();
1284 ScavengeVisitor scavenge_visitor(this);
1286 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1288 // Copy objects reachable from the old generation.
1290 StoreBufferRebuildScope scope(this,
1292 &ScavengeStoreBufferCallback);
1293 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1296 // Copy objects reachable from cells by scavenging cell values directly.
1297 HeapObjectIterator cell_iterator(cell_space_);
1298 for (HeapObject* heap_object = cell_iterator.Next();
1299 heap_object != NULL;
1300 heap_object = cell_iterator.Next()) {
1301 if (heap_object->IsJSGlobalPropertyCell()) {
1302 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1303 Address value_address = cell->ValueAddress();
1304 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1308 // Scavenge object reachable from the native contexts list directly.
1309 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1311 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1312 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1313 &IsUnscavengedHeapObject);
1314 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1316 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1318 UpdateNewSpaceReferencesInExternalStringTable(
1319 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1321 promotion_queue_.Destroy();
1323 LiveObjectList::UpdateReferencesForScavengeGC();
1324 if (!FLAG_watch_ic_patching) {
1325 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1327 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1329 ScavengeWeakObjectRetainer weak_object_retainer(this);
1330 ProcessWeakReferences(&weak_object_retainer);
1332 ASSERT(new_space_front == new_space_.top());
1335 new_space_.set_age_mark(new_space_.top());
1337 new_space_.LowerInlineAllocationLimit(
1338 new_space_.inline_allocation_limit_step());
1340 // Update how much has survived scavenge.
1341 IncrementYoungSurvivorsCounter(static_cast<int>(
1342 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1344 LOG(isolate_, ResourceEvent("scavenge", "end"));
1346 gc_state_ = NOT_IN_GC;
1348 scavenges_since_last_idle_round_++;
1352 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1354 MapWord first_word = HeapObject::cast(*p)->map_word();
1356 if (!first_word.IsForwardingAddress()) {
1357 // Unreachable external string can be finalized.
1358 heap->FinalizeExternalString(String::cast(*p));
1362 // String is still reachable.
1363 return String::cast(first_word.ToForwardingAddress());
1367 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1368 ExternalStringTableUpdaterCallback updater_func) {
1370 if (FLAG_verify_heap) {
1371 external_string_table_.Verify();
1375 if (external_string_table_.new_space_strings_.is_empty()) return;
1377 Object** start = &external_string_table_.new_space_strings_[0];
1378 Object** end = start + external_string_table_.new_space_strings_.length();
1379 Object** last = start;
1381 for (Object** p = start; p < end; ++p) {
1382 ASSERT(InFromSpace(*p));
1383 String* target = updater_func(this, p);
1385 if (target == NULL) continue;
1387 ASSERT(target->IsExternalString());
1389 if (InNewSpace(target)) {
1390 // String is still in new space. Update the table entry.
1394 // String got promoted. Move it to the old string list.
1395 external_string_table_.AddOldString(target);
1399 ASSERT(last <= end);
1400 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1404 void Heap::UpdateReferencesInExternalStringTable(
1405 ExternalStringTableUpdaterCallback updater_func) {
1407 // Update old space string references.
1408 if (external_string_table_.old_space_strings_.length() > 0) {
1409 Object** start = &external_string_table_.old_space_strings_[0];
1410 Object** end = start + external_string_table_.old_space_strings_.length();
1411 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1414 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1418 static Object* ProcessFunctionWeakReferences(Heap* heap,
1420 WeakObjectRetainer* retainer,
1421 bool record_slots) {
1422 Object* undefined = heap->undefined_value();
1423 Object* head = undefined;
1424 JSFunction* tail = NULL;
1425 Object* candidate = function;
1426 while (candidate != undefined) {
1427 // Check whether to keep the candidate in the list.
1428 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1429 Object* retain = retainer->RetainAs(candidate);
1430 if (retain != NULL) {
1431 if (head == undefined) {
1432 // First element in the list.
1435 // Subsequent elements in the list.
1436 ASSERT(tail != NULL);
1437 tail->set_next_function_link(retain);
1439 Object** next_function =
1440 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1441 heap->mark_compact_collector()->RecordSlot(
1442 next_function, next_function, retain);
1445 // Retained function is new tail.
1446 candidate_function = reinterpret_cast<JSFunction*>(retain);
1447 tail = candidate_function;
1449 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1451 if (retain == undefined) break;
1454 // Move to next element in the list.
1455 candidate = candidate_function->next_function_link();
1458 // Terminate the list if there is one or more elements.
1460 tail->set_next_function_link(undefined);
1467 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1468 Object* undefined = undefined_value();
1469 Object* head = undefined;
1470 Context* tail = NULL;
1471 Object* candidate = native_contexts_list_;
1473 // We don't record weak slots during marking or scavenges.
1474 // Instead we do it once when we complete mark-compact cycle.
1475 // Note that write barrier has no effect if we are already in the middle of
1476 // compacting mark-sweep cycle and we have to record slots manually.
1478 gc_state() == MARK_COMPACT &&
1479 mark_compact_collector()->is_compacting();
1481 while (candidate != undefined) {
1482 // Check whether to keep the candidate in the list.
1483 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1484 Object* retain = retainer->RetainAs(candidate);
1485 if (retain != NULL) {
1486 if (head == undefined) {
1487 // First element in the list.
1490 // Subsequent elements in the list.
1491 ASSERT(tail != NULL);
1492 tail->set_unchecked(this,
1493 Context::NEXT_CONTEXT_LINK,
1495 UPDATE_WRITE_BARRIER);
1498 Object** next_context =
1499 HeapObject::RawField(
1500 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1501 mark_compact_collector()->RecordSlot(
1502 next_context, next_context, retain);
1505 // Retained context is new tail.
1506 candidate_context = reinterpret_cast<Context*>(retain);
1507 tail = candidate_context;
1509 if (retain == undefined) break;
1511 // Process the weak list of optimized functions for the context.
1512 Object* function_list_head =
1513 ProcessFunctionWeakReferences(
1515 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1518 candidate_context->set_unchecked(this,
1519 Context::OPTIMIZED_FUNCTIONS_LIST,
1521 UPDATE_WRITE_BARRIER);
1523 Object** optimized_functions =
1524 HeapObject::RawField(
1525 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1526 mark_compact_collector()->RecordSlot(
1527 optimized_functions, optimized_functions, function_list_head);
1531 // Move to next element in the list.
1532 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1535 // Terminate the list if there is one or more elements.
1537 tail->set_unchecked(this,
1538 Context::NEXT_CONTEXT_LINK,
1539 Heap::undefined_value(),
1540 UPDATE_WRITE_BARRIER);
1543 // Update the head of the list of contexts.
1544 native_contexts_list_ = head;
1548 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1549 AssertNoAllocation no_allocation;
1551 class VisitorAdapter : public ObjectVisitor {
1553 explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1554 : visitor_(visitor) {}
1555 virtual void VisitPointers(Object** start, Object** end) {
1556 for (Object** p = start; p < end; p++) {
1557 if ((*p)->IsExternalString()) {
1558 visitor_->VisitExternalString(Utils::ToLocal(
1559 Handle<String>(String::cast(*p))));
1564 v8::ExternalResourceVisitor* visitor_;
1565 } visitor_adapter(visitor);
1566 external_string_table_.Iterate(&visitor_adapter);
1570 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1572 static inline void VisitPointer(Heap* heap, Object** p) {
1573 Object* object = *p;
1574 if (!heap->InNewSpace(object)) return;
1575 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1576 reinterpret_cast<HeapObject*>(object));
1581 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1582 Address new_space_front) {
1584 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1585 // The addresses new_space_front and new_space_.top() define a
1586 // queue of unprocessed copied objects. Process them until the
1588 while (new_space_front != new_space_.top()) {
1589 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1590 HeapObject* object = HeapObject::FromAddress(new_space_front);
1592 NewSpaceScavenger::IterateBody(object->map(), object);
1595 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1599 // Promote and process all the to-be-promoted objects.
1601 StoreBufferRebuildScope scope(this,
1603 &ScavengeStoreBufferCallback);
1604 while (!promotion_queue()->is_empty()) {
1607 promotion_queue()->remove(&target, &size);
1609 // Promoted object might be already partially visited
1610 // during old space pointer iteration. Thus we search specificly
1611 // for pointers to from semispace instead of looking for pointers
1613 ASSERT(!target->IsMap());
1614 IterateAndMarkPointersToFromSpace(target->address(),
1615 target->address() + size,
1620 // Take another spin if there are now unswept objects in new space
1621 // (there are currently no more unswept promoted objects).
1622 } while (new_space_front != new_space_.top());
1624 return new_space_front;
1628 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1631 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1635 static HeapObject* EnsureDoubleAligned(Heap* heap,
1638 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1639 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1640 return HeapObject::FromAddress(object->address() + kPointerSize);
1642 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1649 enum LoggingAndProfiling {
1650 LOGGING_AND_PROFILING_ENABLED,
1651 LOGGING_AND_PROFILING_DISABLED
1655 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1658 template<MarksHandling marks_handling,
1659 LoggingAndProfiling logging_and_profiling_mode>
1660 class ScavengingVisitor : public StaticVisitorBase {
1662 static void Initialize() {
1663 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1664 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1665 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1666 table_.Register(kVisitByteArray, &EvacuateByteArray);
1667 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1668 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1670 table_.Register(kVisitNativeContext,
1671 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1672 template VisitSpecialized<Context::kSize>);
1674 table_.Register(kVisitConsString,
1675 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1676 template VisitSpecialized<ConsString::kSize>);
1678 table_.Register(kVisitSlicedString,
1679 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1680 template VisitSpecialized<SlicedString::kSize>);
1682 table_.Register(kVisitSharedFunctionInfo,
1683 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1684 template VisitSpecialized<SharedFunctionInfo::kSize>);
1686 table_.Register(kVisitJSWeakMap,
1687 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1690 table_.Register(kVisitJSRegExp,
1691 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1694 if (marks_handling == IGNORE_MARKS) {
1695 table_.Register(kVisitJSFunction,
1696 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1697 template VisitSpecialized<JSFunction::kSize>);
1699 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1702 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1704 kVisitDataObjectGeneric>();
1706 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1708 kVisitJSObjectGeneric>();
1710 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1712 kVisitStructGeneric>();
1715 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1720 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1721 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1723 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1724 bool should_record = false;
1726 should_record = FLAG_heap_stats;
1728 should_record = should_record || FLAG_log_gc;
1729 if (should_record) {
1730 if (heap->new_space()->Contains(obj)) {
1731 heap->new_space()->RecordAllocation(obj);
1733 heap->new_space()->RecordPromotion(obj);
1738 // Helper function used by CopyObject to copy a source object to an
1739 // allocated target object and update the forwarding pointer in the source
1740 // object. Returns the target object.
1741 INLINE(static void MigrateObject(Heap* heap,
1745 // Copy the content of source to target.
1746 heap->CopyBlock(target->address(), source->address(), size);
1748 // Set the forwarding address.
1749 source->set_map_word(MapWord::FromForwardingAddress(target));
1751 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1752 // Update NewSpace stats if necessary.
1753 RecordCopiedObject(heap, target);
1754 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1755 Isolate* isolate = heap->isolate();
1756 if (isolate->logger()->is_logging_code_events() ||
1757 CpuProfiler::is_profiling(isolate)) {
1758 if (target->IsSharedFunctionInfo()) {
1759 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1760 source->address(), target->address()));
1765 if (marks_handling == TRANSFER_MARKS) {
1766 if (Marking::TransferColor(source, target)) {
1767 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1773 template<ObjectContents object_contents,
1774 SizeRestriction size_restriction,
1776 static inline void EvacuateObject(Map* map,
1780 SLOW_ASSERT((size_restriction != SMALL) ||
1781 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1782 SLOW_ASSERT(object->Size() == object_size);
1784 int allocation_size = object_size;
1785 if (alignment != kObjectAlignment) {
1786 ASSERT(alignment == kDoubleAlignment);
1787 allocation_size += kPointerSize;
1790 Heap* heap = map->GetHeap();
1791 if (heap->ShouldBePromoted(object->address(), object_size)) {
1792 MaybeObject* maybe_result;
1794 if ((size_restriction != SMALL) &&
1795 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1796 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1799 if (object_contents == DATA_OBJECT) {
1800 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1803 heap->old_pointer_space()->AllocateRaw(allocation_size);
1807 Object* result = NULL; // Initialization to please compiler.
1808 if (maybe_result->ToObject(&result)) {
1809 HeapObject* target = HeapObject::cast(result);
1811 if (alignment != kObjectAlignment) {
1812 target = EnsureDoubleAligned(heap, target, allocation_size);
1815 // Order is important: slot might be inside of the target if target
1816 // was allocated over a dead object and slot comes from the store
1819 MigrateObject(heap, object, target, object_size);
1821 if (object_contents == POINTER_OBJECT) {
1822 if (map->instance_type() == JS_FUNCTION_TYPE) {
1823 heap->promotion_queue()->insert(
1824 target, JSFunction::kNonWeakFieldsEndOffset);
1826 heap->promotion_queue()->insert(target, object_size);
1830 heap->tracer()->increment_promoted_objects_size(object_size);
1834 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1835 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1836 Object* result = allocation->ToObjectUnchecked();
1837 HeapObject* target = HeapObject::cast(result);
1839 if (alignment != kObjectAlignment) {
1840 target = EnsureDoubleAligned(heap, target, allocation_size);
1843 // Order is important: slot might be inside of the target if target
1844 // was allocated over a dead object and slot comes from the store
1847 MigrateObject(heap, object, target, object_size);
1852 static inline void EvacuateJSFunction(Map* map,
1854 HeapObject* object) {
1855 ObjectEvacuationStrategy<POINTER_OBJECT>::
1856 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1858 HeapObject* target = *slot;
1859 MarkBit mark_bit = Marking::MarkBitFrom(target);
1860 if (Marking::IsBlack(mark_bit)) {
1861 // This object is black and it might not be rescanned by marker.
1862 // We should explicitly record code entry slot for compaction because
1863 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1864 // miss it as it is not HeapObject-tagged.
1865 Address code_entry_slot =
1866 target->address() + JSFunction::kCodeEntryOffset;
1867 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1868 map->GetHeap()->mark_compact_collector()->
1869 RecordCodeEntrySlot(code_entry_slot, code);
1874 static inline void EvacuateFixedArray(Map* map,
1876 HeapObject* object) {
1877 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1878 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1885 static inline void EvacuateFixedDoubleArray(Map* map,
1887 HeapObject* object) {
1888 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1889 int object_size = FixedDoubleArray::SizeFor(length);
1890 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1898 static inline void EvacuateByteArray(Map* map,
1900 HeapObject* object) {
1901 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1902 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1903 map, slot, object, object_size);
1907 static inline void EvacuateSeqAsciiString(Map* map,
1909 HeapObject* object) {
1910 int object_size = SeqAsciiString::cast(object)->
1911 SeqAsciiStringSize(map->instance_type());
1912 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1913 map, slot, object, object_size);
1917 static inline void EvacuateSeqTwoByteString(Map* map,
1919 HeapObject* object) {
1920 int object_size = SeqTwoByteString::cast(object)->
1921 SeqTwoByteStringSize(map->instance_type());
1922 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1923 map, slot, object, object_size);
1927 static inline bool IsShortcutCandidate(int type) {
1928 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1931 static inline void EvacuateShortcutCandidate(Map* map,
1933 HeapObject* object) {
1934 ASSERT(IsShortcutCandidate(map->instance_type()));
1936 Heap* heap = map->GetHeap();
1938 if (marks_handling == IGNORE_MARKS &&
1939 ConsString::cast(object)->unchecked_second() ==
1940 heap->empty_string()) {
1942 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1946 if (!heap->InNewSpace(first)) {
1947 object->set_map_word(MapWord::FromForwardingAddress(first));
1951 MapWord first_word = first->map_word();
1952 if (first_word.IsForwardingAddress()) {
1953 HeapObject* target = first_word.ToForwardingAddress();
1956 object->set_map_word(MapWord::FromForwardingAddress(target));
1960 heap->DoScavengeObject(first->map(), slot, first);
1961 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1965 int object_size = ConsString::kSize;
1966 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
1967 map, slot, object, object_size);
1970 template<ObjectContents object_contents>
1971 class ObjectEvacuationStrategy {
1973 template<int object_size>
1974 static inline void VisitSpecialized(Map* map,
1976 HeapObject* object) {
1977 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1978 map, slot, object, object_size);
1981 static inline void Visit(Map* map,
1983 HeapObject* object) {
1984 int object_size = map->instance_size();
1985 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1986 map, slot, object, object_size);
1990 static VisitorDispatchTable<ScavengingCallback> table_;
1994 template<MarksHandling marks_handling,
1995 LoggingAndProfiling logging_and_profiling_mode>
1996 VisitorDispatchTable<ScavengingCallback>
1997 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2000 static void InitializeScavengingVisitorsTables() {
2001 ScavengingVisitor<TRANSFER_MARKS,
2002 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2003 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2004 ScavengingVisitor<TRANSFER_MARKS,
2005 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2006 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2010 void Heap::SelectScavengingVisitorsTable() {
2011 bool logging_and_profiling =
2012 isolate()->logger()->is_logging() ||
2013 CpuProfiler::is_profiling(isolate()) ||
2014 (isolate()->heap_profiler() != NULL &&
2015 isolate()->heap_profiler()->is_profiling());
2017 if (!incremental_marking()->IsMarking()) {
2018 if (!logging_and_profiling) {
2019 scavenging_visitors_table_.CopyFrom(
2020 ScavengingVisitor<IGNORE_MARKS,
2021 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2023 scavenging_visitors_table_.CopyFrom(
2024 ScavengingVisitor<IGNORE_MARKS,
2025 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2028 if (!logging_and_profiling) {
2029 scavenging_visitors_table_.CopyFrom(
2030 ScavengingVisitor<TRANSFER_MARKS,
2031 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2033 scavenging_visitors_table_.CopyFrom(
2034 ScavengingVisitor<TRANSFER_MARKS,
2035 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2038 if (incremental_marking()->IsCompacting()) {
2039 // When compacting forbid short-circuiting of cons-strings.
2040 // Scavenging code relies on the fact that new space object
2041 // can't be evacuated into evacuation candidate but
2042 // short-circuiting violates this assumption.
2043 scavenging_visitors_table_.Register(
2044 StaticVisitorBase::kVisitShortcutCandidate,
2045 scavenging_visitors_table_.GetVisitorById(
2046 StaticVisitorBase::kVisitConsString));
2052 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2053 SLOW_ASSERT(HEAP->InFromSpace(object));
2054 MapWord first_word = object->map_word();
2055 SLOW_ASSERT(!first_word.IsForwardingAddress());
2056 Map* map = first_word.ToMap();
2057 map->GetHeap()->DoScavengeObject(map, p, object);
2061 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2062 int instance_size) {
2064 MaybeObject* maybe_result = AllocateRawMap();
2065 if (!maybe_result->ToObject(&result)) return maybe_result;
2067 // Map::cast cannot be used due to uninitialized map field.
2068 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2069 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2070 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2071 reinterpret_cast<Map*>(result)->set_visitor_id(
2072 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2073 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2074 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2075 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2076 reinterpret_cast<Map*>(result)->set_bit_field(0);
2077 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2078 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2079 Map::OwnsDescriptors::encode(true);
2080 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2085 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2087 ElementsKind elements_kind) {
2089 MaybeObject* maybe_result = AllocateRawMap();
2090 if (!maybe_result->To(&result)) return maybe_result;
2092 Map* map = reinterpret_cast<Map*>(result);
2093 map->set_map_no_write_barrier(meta_map());
2094 map->set_instance_type(instance_type);
2095 map->set_visitor_id(
2096 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2097 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2098 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2099 map->set_instance_size(instance_size);
2100 map->set_inobject_properties(0);
2101 map->set_pre_allocated_property_fields(0);
2102 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2103 map->init_back_pointer(undefined_value());
2104 map->set_unused_property_fields(0);
2105 map->set_instance_descriptors(empty_descriptor_array());
2106 map->set_bit_field(0);
2107 map->set_bit_field2(1 << Map::kIsExtensible);
2108 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2109 Map::OwnsDescriptors::encode(true);
2110 map->set_bit_field3(bit_field3);
2111 map->set_elements_kind(elements_kind);
2117 MaybeObject* Heap::AllocateCodeCache() {
2118 CodeCache* code_cache;
2119 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2120 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2122 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2123 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2128 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2129 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2133 MaybeObject* Heap::AllocateAccessorPair() {
2134 AccessorPair* accessors;
2135 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2136 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2138 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2139 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2144 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2145 TypeFeedbackInfo* info;
2146 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2147 if (!maybe_info->To(&info)) return maybe_info;
2149 info->initialize_storage();
2150 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2151 SKIP_WRITE_BARRIER);
2156 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2157 AliasedArgumentsEntry* entry;
2158 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2159 if (!maybe_entry->To(&entry)) return maybe_entry;
2161 entry->set_aliased_context_slot(aliased_context_slot);
2166 const Heap::StringTypeTable Heap::string_type_table[] = {
2167 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2168 {type, size, k##camel_name##MapRootIndex},
2169 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2170 #undef STRING_TYPE_ELEMENT
2174 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
2175 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
2176 {contents, k##name##RootIndex},
2177 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
2178 #undef CONSTANT_SYMBOL_ELEMENT
2182 const Heap::StructTable Heap::struct_table[] = {
2183 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2184 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2185 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2186 #undef STRUCT_TABLE_ELEMENT
2190 bool Heap::CreateInitialMaps() {
2192 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2193 if (!maybe_obj->ToObject(&obj)) return false;
2195 // Map::cast cannot be used due to uninitialized map field.
2196 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2197 set_meta_map(new_meta_map);
2198 new_meta_map->set_map(new_meta_map);
2200 { MaybeObject* maybe_obj =
2201 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2202 if (!maybe_obj->ToObject(&obj)) return false;
2204 set_fixed_array_map(Map::cast(obj));
2206 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2207 if (!maybe_obj->ToObject(&obj)) return false;
2209 set_oddball_map(Map::cast(obj));
2211 // Allocate the empty array.
2212 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2213 if (!maybe_obj->ToObject(&obj)) return false;
2215 set_empty_fixed_array(FixedArray::cast(obj));
2217 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2218 if (!maybe_obj->ToObject(&obj)) return false;
2220 set_null_value(Oddball::cast(obj));
2221 Oddball::cast(obj)->set_kind(Oddball::kNull);
2223 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2224 if (!maybe_obj->ToObject(&obj)) return false;
2226 set_undefined_value(Oddball::cast(obj));
2227 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2228 ASSERT(!InNewSpace(undefined_value()));
2230 // Allocate the empty descriptor array.
2231 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2232 if (!maybe_obj->ToObject(&obj)) return false;
2234 set_empty_descriptor_array(DescriptorArray::cast(obj));
2236 // Fix the instance_descriptors for the existing maps.
2237 meta_map()->set_code_cache(empty_fixed_array());
2238 meta_map()->init_back_pointer(undefined_value());
2239 meta_map()->set_instance_descriptors(empty_descriptor_array());
2241 fixed_array_map()->set_code_cache(empty_fixed_array());
2242 fixed_array_map()->init_back_pointer(undefined_value());
2243 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2245 oddball_map()->set_code_cache(empty_fixed_array());
2246 oddball_map()->init_back_pointer(undefined_value());
2247 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2249 // Fix prototype object for existing maps.
2250 meta_map()->set_prototype(null_value());
2251 meta_map()->set_constructor(null_value());
2253 fixed_array_map()->set_prototype(null_value());
2254 fixed_array_map()->set_constructor(null_value());
2256 oddball_map()->set_prototype(null_value());
2257 oddball_map()->set_constructor(null_value());
2259 { MaybeObject* maybe_obj =
2260 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2261 if (!maybe_obj->ToObject(&obj)) return false;
2263 set_fixed_cow_array_map(Map::cast(obj));
2264 ASSERT(fixed_array_map() != fixed_cow_array_map());
2266 { MaybeObject* maybe_obj =
2267 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2268 if (!maybe_obj->ToObject(&obj)) return false;
2270 set_scope_info_map(Map::cast(obj));
2272 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2273 if (!maybe_obj->ToObject(&obj)) return false;
2275 set_heap_number_map(Map::cast(obj));
2277 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2278 if (!maybe_obj->ToObject(&obj)) return false;
2280 set_foreign_map(Map::cast(obj));
2282 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2283 const StringTypeTable& entry = string_type_table[i];
2284 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2285 if (!maybe_obj->ToObject(&obj)) return false;
2287 roots_[entry.index] = Map::cast(obj);
2290 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2291 if (!maybe_obj->ToObject(&obj)) return false;
2293 set_undetectable_string_map(Map::cast(obj));
2294 Map::cast(obj)->set_is_undetectable();
2296 { MaybeObject* maybe_obj =
2297 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2298 if (!maybe_obj->ToObject(&obj)) return false;
2300 set_undetectable_ascii_string_map(Map::cast(obj));
2301 Map::cast(obj)->set_is_undetectable();
2303 { MaybeObject* maybe_obj =
2304 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2305 if (!maybe_obj->ToObject(&obj)) return false;
2307 set_fixed_double_array_map(Map::cast(obj));
2309 { MaybeObject* maybe_obj =
2310 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2311 if (!maybe_obj->ToObject(&obj)) return false;
2313 set_byte_array_map(Map::cast(obj));
2315 { MaybeObject* maybe_obj =
2316 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2317 if (!maybe_obj->ToObject(&obj)) return false;
2319 set_free_space_map(Map::cast(obj));
2321 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2322 if (!maybe_obj->ToObject(&obj)) return false;
2324 set_empty_byte_array(ByteArray::cast(obj));
2326 { MaybeObject* maybe_obj =
2327 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2328 if (!maybe_obj->ToObject(&obj)) return false;
2330 set_external_pixel_array_map(Map::cast(obj));
2332 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2333 ExternalArray::kAlignedSize);
2334 if (!maybe_obj->ToObject(&obj)) return false;
2336 set_external_byte_array_map(Map::cast(obj));
2338 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2339 ExternalArray::kAlignedSize);
2340 if (!maybe_obj->ToObject(&obj)) return false;
2342 set_external_unsigned_byte_array_map(Map::cast(obj));
2344 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2345 ExternalArray::kAlignedSize);
2346 if (!maybe_obj->ToObject(&obj)) return false;
2348 set_external_short_array_map(Map::cast(obj));
2350 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2351 ExternalArray::kAlignedSize);
2352 if (!maybe_obj->ToObject(&obj)) return false;
2354 set_external_unsigned_short_array_map(Map::cast(obj));
2356 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2357 ExternalArray::kAlignedSize);
2358 if (!maybe_obj->ToObject(&obj)) return false;
2360 set_external_int_array_map(Map::cast(obj));
2362 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2363 ExternalArray::kAlignedSize);
2364 if (!maybe_obj->ToObject(&obj)) return false;
2366 set_external_unsigned_int_array_map(Map::cast(obj));
2368 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2369 ExternalArray::kAlignedSize);
2370 if (!maybe_obj->ToObject(&obj)) return false;
2372 set_external_float_array_map(Map::cast(obj));
2374 { MaybeObject* maybe_obj =
2375 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2376 if (!maybe_obj->ToObject(&obj)) return false;
2378 set_non_strict_arguments_elements_map(Map::cast(obj));
2380 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2381 ExternalArray::kAlignedSize);
2382 if (!maybe_obj->ToObject(&obj)) return false;
2384 set_external_double_array_map(Map::cast(obj));
2386 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2387 if (!maybe_obj->ToObject(&obj)) return false;
2389 set_code_map(Map::cast(obj));
2391 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2392 JSGlobalPropertyCell::kSize);
2393 if (!maybe_obj->ToObject(&obj)) return false;
2395 set_global_property_cell_map(Map::cast(obj));
2397 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2398 if (!maybe_obj->ToObject(&obj)) return false;
2400 set_one_pointer_filler_map(Map::cast(obj));
2402 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2403 if (!maybe_obj->ToObject(&obj)) return false;
2405 set_two_pointer_filler_map(Map::cast(obj));
2407 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2408 const StructTable& entry = struct_table[i];
2409 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2410 if (!maybe_obj->ToObject(&obj)) return false;
2412 roots_[entry.index] = Map::cast(obj);
2415 { MaybeObject* maybe_obj =
2416 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2417 if (!maybe_obj->ToObject(&obj)) return false;
2419 set_hash_table_map(Map::cast(obj));
2421 { MaybeObject* maybe_obj =
2422 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2423 if (!maybe_obj->ToObject(&obj)) return false;
2425 set_function_context_map(Map::cast(obj));
2427 { MaybeObject* maybe_obj =
2428 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2429 if (!maybe_obj->ToObject(&obj)) return false;
2431 set_catch_context_map(Map::cast(obj));
2433 { MaybeObject* maybe_obj =
2434 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2435 if (!maybe_obj->ToObject(&obj)) return false;
2437 set_with_context_map(Map::cast(obj));
2439 { MaybeObject* maybe_obj =
2440 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2441 if (!maybe_obj->ToObject(&obj)) return false;
2443 set_block_context_map(Map::cast(obj));
2445 { MaybeObject* maybe_obj =
2446 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2447 if (!maybe_obj->ToObject(&obj)) return false;
2449 set_module_context_map(Map::cast(obj));
2451 { MaybeObject* maybe_obj =
2452 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2453 if (!maybe_obj->ToObject(&obj)) return false;
2455 set_global_context_map(Map::cast(obj));
2457 { MaybeObject* maybe_obj =
2458 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2459 if (!maybe_obj->ToObject(&obj)) return false;
2461 Map* native_context_map = Map::cast(obj);
2462 native_context_map->set_dictionary_map(true);
2463 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2464 set_native_context_map(native_context_map);
2466 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2467 SharedFunctionInfo::kAlignedSize);
2468 if (!maybe_obj->ToObject(&obj)) return false;
2470 set_shared_function_info_map(Map::cast(obj));
2472 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2473 JSMessageObject::kSize);
2474 if (!maybe_obj->ToObject(&obj)) return false;
2476 set_message_object_map(Map::cast(obj));
2478 ASSERT(!InNewSpace(empty_fixed_array()));
2483 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2484 // Statically ensure that it is safe to allocate heap numbers in paged
2486 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2487 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2490 { MaybeObject* maybe_result =
2491 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2492 if (!maybe_result->ToObject(&result)) return maybe_result;
2495 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2496 HeapNumber::cast(result)->set_value(value);
2501 MaybeObject* Heap::AllocateHeapNumber(double value) {
2502 // Use general version, if we're forced to always allocate.
2503 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2505 // This version of AllocateHeapNumber is optimized for
2506 // allocation in new space.
2507 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2508 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2510 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2511 if (!maybe_result->ToObject(&result)) return maybe_result;
2513 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2514 HeapNumber::cast(result)->set_value(value);
2519 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2521 { MaybeObject* maybe_result = AllocateRawCell();
2522 if (!maybe_result->ToObject(&result)) return maybe_result;
2524 HeapObject::cast(result)->set_map_no_write_barrier(
2525 global_property_cell_map());
2526 JSGlobalPropertyCell::cast(result)->set_value(value);
2531 MaybeObject* Heap::CreateOddball(const char* to_string,
2535 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2536 if (!maybe_result->ToObject(&result)) return maybe_result;
2538 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2542 bool Heap::CreateApiObjects() {
2545 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2546 if (!maybe_obj->ToObject(&obj)) return false;
2548 // Don't use Smi-only elements optimizations for objects with the neander
2549 // map. There are too many cases where element values are set directly with a
2550 // bottleneck to trap the Smi-only -> fast elements transition, and there
2551 // appears to be no benefit for optimize this case.
2552 Map* new_neander_map = Map::cast(obj);
2553 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2554 set_neander_map(new_neander_map);
2556 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2557 if (!maybe_obj->ToObject(&obj)) return false;
2560 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2561 if (!maybe_elements->ToObject(&elements)) return false;
2563 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2564 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2565 set_message_listeners(JSObject::cast(obj));
2571 void Heap::CreateJSEntryStub() {
2573 set_js_entry_code(*stub.GetCode());
2577 void Heap::CreateJSConstructEntryStub() {
2578 JSConstructEntryStub stub;
2579 set_js_construct_entry_code(*stub.GetCode());
2583 void Heap::CreateFixedStubs() {
2584 // Here we create roots for fixed stubs. They are needed at GC
2585 // for cooking and uncooking (check out frames.cc).
2586 // The eliminates the need for doing dictionary lookup in the
2587 // stub cache for these stubs.
2589 // gcc-4.4 has problem generating correct code of following snippet:
2590 // { JSEntryStub stub;
2591 // js_entry_code_ = *stub.GetCode();
2593 // { JSConstructEntryStub stub;
2594 // js_construct_entry_code_ = *stub.GetCode();
2596 // To workaround the problem, make separate functions without inlining.
2597 Heap::CreateJSEntryStub();
2598 Heap::CreateJSConstructEntryStub();
2600 // Create stubs that should be there, so we don't unexpectedly have to
2601 // create them if we need them during the creation of another stub.
2602 // Stub creation mixes raw pointers and handles in an unsafe manner so
2603 // we cannot create stubs while we are creating stubs.
2604 CodeStub::GenerateStubsAheadOfTime();
2608 bool Heap::CreateInitialObjects() {
2611 // The -0 value must be set before NumberFromDouble works.
2612 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2613 if (!maybe_obj->ToObject(&obj)) return false;
2615 set_minus_zero_value(HeapNumber::cast(obj));
2616 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2618 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2619 if (!maybe_obj->ToObject(&obj)) return false;
2621 set_nan_value(HeapNumber::cast(obj));
2623 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2624 if (!maybe_obj->ToObject(&obj)) return false;
2626 set_infinity_value(HeapNumber::cast(obj));
2628 // The hole has not been created yet, but we want to put something
2629 // predictable in the gaps in the symbol table, so lets make that Smi zero.
2630 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2632 // Allocate initial symbol table.
2633 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2634 if (!maybe_obj->ToObject(&obj)) return false;
2636 // Don't use set_symbol_table() due to asserts.
2637 roots_[kSymbolTableRootIndex] = obj;
2639 // Finish initializing oddballs after creating symboltable.
2640 { MaybeObject* maybe_obj =
2641 undefined_value()->Initialize("undefined",
2643 Oddball::kUndefined);
2644 if (!maybe_obj->ToObject(&obj)) return false;
2647 // Initialize the null_value.
2648 { MaybeObject* maybe_obj =
2649 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2650 if (!maybe_obj->ToObject(&obj)) return false;
2653 { MaybeObject* maybe_obj = CreateOddball("true",
2656 if (!maybe_obj->ToObject(&obj)) return false;
2658 set_true_value(Oddball::cast(obj));
2660 { MaybeObject* maybe_obj = CreateOddball("false",
2663 if (!maybe_obj->ToObject(&obj)) return false;
2665 set_false_value(Oddball::cast(obj));
2667 { MaybeObject* maybe_obj = CreateOddball("hole",
2670 if (!maybe_obj->ToObject(&obj)) return false;
2672 set_the_hole_value(Oddball::cast(obj));
2674 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2676 Oddball::kArgumentMarker);
2677 if (!maybe_obj->ToObject(&obj)) return false;
2679 set_arguments_marker(Oddball::cast(obj));
2681 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2684 if (!maybe_obj->ToObject(&obj)) return false;
2686 set_no_interceptor_result_sentinel(obj);
2688 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2691 if (!maybe_obj->ToObject(&obj)) return false;
2693 set_termination_exception(obj);
2695 // Allocate the empty string.
2696 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2697 if (!maybe_obj->ToObject(&obj)) return false;
2699 set_empty_string(String::cast(obj));
2701 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2702 { MaybeObject* maybe_obj =
2703 LookupAsciiSymbol(constant_symbol_table[i].contents);
2704 if (!maybe_obj->ToObject(&obj)) return false;
2706 roots_[constant_symbol_table[i].index] = String::cast(obj);
2709 // Allocate the hidden symbol which is used to identify the hidden properties
2710 // in JSObjects. The hash code has a special value so that it will not match
2711 // the empty string when searching for the property. It cannot be part of the
2712 // loop above because it needs to be allocated manually with the special
2713 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2714 // that it will always be at the first entry in property descriptors.
2715 { MaybeObject* maybe_obj =
2716 AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
2717 if (!maybe_obj->ToObject(&obj)) return false;
2719 hidden_symbol_ = String::cast(obj);
2721 // Allocate the foreign for __proto__.
2722 { MaybeObject* maybe_obj =
2723 AllocateForeign((Address) &Accessors::ObjectPrototype);
2724 if (!maybe_obj->ToObject(&obj)) return false;
2726 set_prototype_accessors(Foreign::cast(obj));
2728 // Allocate the code_stubs dictionary. The initial size is set to avoid
2729 // expanding the dictionary during bootstrapping.
2730 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2731 if (!maybe_obj->ToObject(&obj)) return false;
2733 set_code_stubs(UnseededNumberDictionary::cast(obj));
2736 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2737 // is set to avoid expanding the dictionary during bootstrapping.
2738 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2739 if (!maybe_obj->ToObject(&obj)) return false;
2741 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2743 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2744 if (!maybe_obj->ToObject(&obj)) return false;
2746 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2748 set_instanceof_cache_function(Smi::FromInt(0));
2749 set_instanceof_cache_map(Smi::FromInt(0));
2750 set_instanceof_cache_answer(Smi::FromInt(0));
2754 // Allocate the dictionary of intrinsic function names.
2755 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2756 if (!maybe_obj->ToObject(&obj)) return false;
2758 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2760 if (!maybe_obj->ToObject(&obj)) return false;
2762 set_intrinsic_function_names(StringDictionary::cast(obj));
2764 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2765 if (!maybe_obj->ToObject(&obj)) return false;
2767 set_number_string_cache(FixedArray::cast(obj));
2769 // Allocate cache for single character ASCII strings.
2770 { MaybeObject* maybe_obj =
2771 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2772 if (!maybe_obj->ToObject(&obj)) return false;
2774 set_single_character_string_cache(FixedArray::cast(obj));
2776 // Allocate cache for string split.
2777 { MaybeObject* maybe_obj = AllocateFixedArray(
2778 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2779 if (!maybe_obj->ToObject(&obj)) return false;
2781 set_string_split_cache(FixedArray::cast(obj));
2783 { MaybeObject* maybe_obj = AllocateFixedArray(
2784 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2785 if (!maybe_obj->ToObject(&obj)) return false;
2787 set_regexp_multiple_cache(FixedArray::cast(obj));
2789 // Allocate cache for external strings pointing to native source code.
2790 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2791 if (!maybe_obj->ToObject(&obj)) return false;
2793 set_natives_source_cache(FixedArray::cast(obj));
2795 // Handling of script id generation is in FACTORY->NewScript.
2796 set_last_script_id(undefined_value());
2798 // Initialize keyed lookup cache.
2799 isolate_->keyed_lookup_cache()->Clear();
2801 // Initialize context slot cache.
2802 isolate_->context_slot_cache()->Clear();
2804 // Initialize descriptor cache.
2805 isolate_->descriptor_lookup_cache()->Clear();
2807 // Initialize compilation cache.
2808 isolate_->compilation_cache()->Clear();
2814 Object* RegExpResultsCache::Lookup(Heap* heap,
2816 Object* key_pattern,
2817 ResultsCacheType type) {
2819 if (!key_string->IsSymbol()) return Smi::FromInt(0);
2820 if (type == STRING_SPLIT_SUBSTRINGS) {
2821 ASSERT(key_pattern->IsString());
2822 if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
2823 cache = heap->string_split_cache();
2825 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2826 ASSERT(key_pattern->IsFixedArray());
2827 cache = heap->regexp_multiple_cache();
2830 uint32_t hash = key_string->Hash();
2831 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2832 ~(kArrayEntriesPerCacheEntry - 1));
2833 if (cache->get(index + kStringOffset) == key_string &&
2834 cache->get(index + kPatternOffset) == key_pattern) {
2835 return cache->get(index + kArrayOffset);
2838 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2839 if (cache->get(index + kStringOffset) == key_string &&
2840 cache->get(index + kPatternOffset) == key_pattern) {
2841 return cache->get(index + kArrayOffset);
2843 return Smi::FromInt(0);
2847 void RegExpResultsCache::Enter(Heap* heap,
2849 Object* key_pattern,
2850 FixedArray* value_array,
2851 ResultsCacheType type) {
2853 if (!key_string->IsSymbol()) return;
2854 if (type == STRING_SPLIT_SUBSTRINGS) {
2855 ASSERT(key_pattern->IsString());
2856 if (!key_pattern->IsSymbol()) return;
2857 cache = heap->string_split_cache();
2859 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2860 ASSERT(key_pattern->IsFixedArray());
2861 cache = heap->regexp_multiple_cache();
2864 uint32_t hash = key_string->Hash();
2865 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2866 ~(kArrayEntriesPerCacheEntry - 1));
2867 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2868 cache->set(index + kStringOffset, key_string);
2869 cache->set(index + kPatternOffset, key_pattern);
2870 cache->set(index + kArrayOffset, value_array);
2873 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2874 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2875 cache->set(index2 + kStringOffset, key_string);
2876 cache->set(index2 + kPatternOffset, key_pattern);
2877 cache->set(index2 + kArrayOffset, value_array);
2879 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2880 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2881 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2882 cache->set(index + kStringOffset, key_string);
2883 cache->set(index + kPatternOffset, key_pattern);
2884 cache->set(index + kArrayOffset, value_array);
2887 // If the array is a reasonably short list of substrings, convert it into a
2889 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
2890 for (int i = 0; i < value_array->length(); i++) {
2891 String* str = String::cast(value_array->get(i));
2893 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2894 if (maybe_symbol->ToObject(&symbol)) {
2895 value_array->set(i, symbol);
2899 // Convert backing store to a copy-on-write array.
2900 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
2904 void RegExpResultsCache::Clear(FixedArray* cache) {
2905 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
2906 cache->set(i, Smi::FromInt(0));
2911 MaybeObject* Heap::AllocateInitialNumberStringCache() {
2912 MaybeObject* maybe_obj =
2913 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
2918 int Heap::FullSizeNumberStringCacheLength() {
2919 // Compute the size of the number string cache based on the max newspace size.
2920 // The number string cache has a minimum size based on twice the initial cache
2921 // size to ensure that it is bigger after being made 'full size'.
2922 int number_string_cache_size = max_semispace_size_ / 512;
2923 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2924 Min(0x4000, number_string_cache_size));
2925 // There is a string and a number per entry so the length is twice the number
2927 return number_string_cache_size * 2;
2931 void Heap::AllocateFullSizeNumberStringCache() {
2932 // The idea is to have a small number string cache in the snapshot to keep
2933 // boot-time memory usage down. If we expand the number string cache already
2934 // while creating the snapshot then that didn't work out.
2935 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
2936 MaybeObject* maybe_obj =
2937 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
2939 if (maybe_obj->ToObject(&new_cache)) {
2940 // We don't bother to repopulate the cache with entries from the old cache.
2941 // It will be repopulated soon enough with new strings.
2942 set_number_string_cache(FixedArray::cast(new_cache));
2944 // If allocation fails then we just return without doing anything. It is only
2945 // a cache, so best effort is OK here.
2949 void Heap::FlushNumberStringCache() {
2950 // Flush the number to string cache.
2951 int len = number_string_cache()->length();
2952 for (int i = 0; i < len; i++) {
2953 number_string_cache()->set_undefined(this, i);
2958 static inline int double_get_hash(double d) {
2959 DoubleRepresentation rep(d);
2960 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2964 static inline int smi_get_hash(Smi* smi) {
2965 return smi->value();
2969 Object* Heap::GetNumberStringCache(Object* number) {
2971 int mask = (number_string_cache()->length() >> 1) - 1;
2972 if (number->IsSmi()) {
2973 hash = smi_get_hash(Smi::cast(number)) & mask;
2975 hash = double_get_hash(number->Number()) & mask;
2977 Object* key = number_string_cache()->get(hash * 2);
2978 if (key == number) {
2979 return String::cast(number_string_cache()->get(hash * 2 + 1));
2980 } else if (key->IsHeapNumber() &&
2981 number->IsHeapNumber() &&
2982 key->Number() == number->Number()) {
2983 return String::cast(number_string_cache()->get(hash * 2 + 1));
2985 return undefined_value();
2989 void Heap::SetNumberStringCache(Object* number, String* string) {
2991 int mask = (number_string_cache()->length() >> 1) - 1;
2992 if (number->IsSmi()) {
2993 hash = smi_get_hash(Smi::cast(number)) & mask;
2995 hash = double_get_hash(number->Number()) & mask;
2997 if (number_string_cache()->get(hash * 2) != undefined_value() &&
2998 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
2999 // The first time we have a hash collision, we move to the full sized
3000 // number string cache.
3001 AllocateFullSizeNumberStringCache();
3004 number_string_cache()->set(hash * 2, number);
3005 number_string_cache()->set(hash * 2 + 1, string);
3009 MaybeObject* Heap::NumberToString(Object* number,
3010 bool check_number_string_cache) {
3011 isolate_->counters()->number_to_string_runtime()->Increment();
3012 if (check_number_string_cache) {
3013 Object* cached = GetNumberStringCache(number);
3014 if (cached != undefined_value()) {
3020 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3022 if (number->IsSmi()) {
3023 int num = Smi::cast(number)->value();
3024 str = IntToCString(num, buffer);
3026 double num = HeapNumber::cast(number)->value();
3027 str = DoubleToCString(num, buffer);
3031 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
3032 if (maybe_js_string->ToObject(&js_string)) {
3033 SetNumberStringCache(number, String::cast(js_string));
3035 return maybe_js_string;
3039 MaybeObject* Heap::Uint32ToString(uint32_t value,
3040 bool check_number_string_cache) {
3042 MaybeObject* maybe = NumberFromUint32(value);
3043 if (!maybe->To<Object>(&number)) return maybe;
3044 return NumberToString(number, check_number_string_cache);
3048 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3049 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3053 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3054 ExternalArrayType array_type) {
3055 switch (array_type) {
3056 case kExternalByteArray:
3057 return kExternalByteArrayMapRootIndex;
3058 case kExternalUnsignedByteArray:
3059 return kExternalUnsignedByteArrayMapRootIndex;
3060 case kExternalShortArray:
3061 return kExternalShortArrayMapRootIndex;
3062 case kExternalUnsignedShortArray:
3063 return kExternalUnsignedShortArrayMapRootIndex;
3064 case kExternalIntArray:
3065 return kExternalIntArrayMapRootIndex;
3066 case kExternalUnsignedIntArray:
3067 return kExternalUnsignedIntArrayMapRootIndex;
3068 case kExternalFloatArray:
3069 return kExternalFloatArrayMapRootIndex;
3070 case kExternalDoubleArray:
3071 return kExternalDoubleArrayMapRootIndex;
3072 case kExternalPixelArray:
3073 return kExternalPixelArrayMapRootIndex;
3076 return kUndefinedValueRootIndex;
3081 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3082 // We need to distinguish the minus zero value and this cannot be
3083 // done after conversion to int. Doing this by comparing bit
3084 // patterns is faster than using fpclassify() et al.
3085 static const DoubleRepresentation minus_zero(-0.0);
3087 DoubleRepresentation rep(value);
3088 if (rep.bits == minus_zero.bits) {
3089 return AllocateHeapNumber(-0.0, pretenure);
3092 int int_value = FastD2I(value);
3093 if (value == int_value && Smi::IsValid(int_value)) {
3094 return Smi::FromInt(int_value);
3097 // Materialize the value in the heap.
3098 return AllocateHeapNumber(value, pretenure);
3102 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3103 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3104 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3105 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3107 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3108 if (!maybe_result->To(&result)) return maybe_result;
3109 result->set_foreign_address(address);
3114 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3115 SharedFunctionInfo* share;
3116 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3117 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3119 // Set pointer fields.
3120 share->set_name(name);
3121 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3122 share->set_code(illegal);
3123 share->ClearOptimizedCodeMap();
3124 share->set_scope_info(ScopeInfo::Empty());
3125 Code* construct_stub =
3126 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3127 share->set_construct_stub(construct_stub);
3128 share->set_instance_class_name(Object_symbol());
3129 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3130 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3131 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3132 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3133 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3134 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3135 share->set_ast_node_count(0);
3136 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3137 share->set_counters(0);
3139 // Set integer fields (smi or int, depending on the architecture).
3140 share->set_length(0);
3141 share->set_formal_parameter_count(0);
3142 share->set_expected_nof_properties(0);
3143 share->set_num_literals(0);
3144 share->set_start_position_and_type(0);
3145 share->set_end_position(0);
3146 share->set_function_token_position(0);
3147 // All compiler hints default to false or 0.
3148 share->set_compiler_hints(0);
3149 share->set_this_property_assignments_count(0);
3150 share->set_opt_count(0);
3156 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3161 Object* stack_trace,
3162 Object* stack_frames) {
3164 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3165 if (!maybe_result->ToObject(&result)) return maybe_result;
3167 JSMessageObject* message = JSMessageObject::cast(result);
3168 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3169 message->initialize_elements();
3170 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3171 message->set_type(type);
3172 message->set_arguments(arguments);
3173 message->set_start_position(start_position);
3174 message->set_end_position(end_position);
3175 message->set_script(script);
3176 message->set_stack_trace(stack_trace);
3177 message->set_stack_frames(stack_frames);
3183 // Returns true for a character in a range. Both limits are inclusive.
3184 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3185 // This makes uses of the the unsigned wraparound.
3186 return character - from <= to - from;
3190 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3195 // Numeric strings have a different hash algorithm not known by
3196 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
3197 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3198 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
3200 // Now we know the length is 2, we might as well make use of that fact
3201 // when building the new string.
3202 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
3203 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
3205 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
3206 if (!maybe_result->ToObject(&result)) return maybe_result;
3208 char* dest = SeqAsciiString::cast(result)->GetChars();
3214 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3215 if (!maybe_result->ToObject(&result)) return maybe_result;
3217 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3225 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3226 int first_length = first->length();
3227 if (first_length == 0) {
3231 int second_length = second->length();
3232 if (second_length == 0) {
3236 int length = first_length + second_length;
3238 // Optimization for 2-byte strings often used as keys in a decompression
3239 // dictionary. Check whether we already have the string in the symbol
3240 // table to prevent creation of many unneccesary strings.
3242 unsigned c1 = first->Get(0);
3243 unsigned c2 = second->Get(0);
3244 return MakeOrFindTwoCharacterString(this, c1, c2);
3247 bool first_is_ascii = first->IsAsciiRepresentation();
3248 bool second_is_ascii = second->IsAsciiRepresentation();
3249 bool is_ascii = first_is_ascii && second_is_ascii;
3251 // Make sure that an out of memory exception is thrown if the length
3252 // of the new cons string is too large.
3253 if (length > String::kMaxLength || length < 0) {
3254 isolate()->context()->mark_out_of_memory();
3255 return Failure::OutOfMemoryException();
3258 bool is_ascii_data_in_two_byte_string = false;
3260 // At least one of the strings uses two-byte representation so we
3261 // can't use the fast case code for short ASCII strings below, but
3262 // we can try to save memory if all chars actually fit in ASCII.
3263 is_ascii_data_in_two_byte_string =
3264 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
3265 if (is_ascii_data_in_two_byte_string) {
3266 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3270 // If the resulting string is small make a flat string.
3271 if (length < ConsString::kMinLength) {
3272 // Note that neither of the two inputs can be a slice because:
3273 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3274 ASSERT(first->IsFlat());
3275 ASSERT(second->IsFlat());
3278 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3279 if (!maybe_result->ToObject(&result)) return maybe_result;
3281 // Copy the characters into the new object.
3282 char* dest = SeqAsciiString::cast(result)->GetChars();
3285 if (first->IsExternalString()) {
3286 src = ExternalAsciiString::cast(first)->GetChars();
3288 src = SeqAsciiString::cast(first)->GetChars();
3290 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3291 // Copy second part.
3292 if (second->IsExternalString()) {
3293 src = ExternalAsciiString::cast(second)->GetChars();
3295 src = SeqAsciiString::cast(second)->GetChars();
3297 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3300 if (is_ascii_data_in_two_byte_string) {
3302 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3303 if (!maybe_result->ToObject(&result)) return maybe_result;
3305 // Copy the characters into the new object.
3306 char* dest = SeqAsciiString::cast(result)->GetChars();
3307 String::WriteToFlat(first, dest, 0, first_length);
3308 String::WriteToFlat(second, dest + first_length, 0, second_length);
3309 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3314 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3315 if (!maybe_result->ToObject(&result)) return maybe_result;
3317 // Copy the characters into the new object.
3318 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3319 String::WriteToFlat(first, dest, 0, first_length);
3320 String::WriteToFlat(second, dest + first_length, 0, second_length);
3325 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
3326 cons_ascii_string_map() : cons_string_map();
3329 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3330 if (!maybe_result->ToObject(&result)) return maybe_result;
3333 AssertNoAllocation no_gc;
3334 ConsString* cons_string = ConsString::cast(result);
3335 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3336 cons_string->set_length(length);
3337 cons_string->set_hash_field(String::kEmptyHashField);
3338 cons_string->set_first(first, mode);
3339 cons_string->set_second(second, mode);
3344 MaybeObject* Heap::AllocateSubString(String* buffer,
3347 PretenureFlag pretenure) {
3348 int length = end - start;
3350 return empty_string();
3351 } else if (length == 1) {
3352 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3353 } else if (length == 2) {
3354 // Optimization for 2-byte strings often used as keys in a decompression
3355 // dictionary. Check whether we already have the string in the symbol
3356 // table to prevent creation of many unneccesary strings.
3357 unsigned c1 = buffer->Get(start);
3358 unsigned c2 = buffer->Get(start + 1);
3359 return MakeOrFindTwoCharacterString(this, c1, c2);
3362 // Make an attempt to flatten the buffer to reduce access time.
3363 buffer = buffer->TryFlattenGetString();
3365 if (!FLAG_string_slices ||
3366 !buffer->IsFlat() ||
3367 length < SlicedString::kMinLength ||
3368 pretenure == TENURED) {
3370 // WriteToFlat takes care of the case when an indirect string has a
3371 // different encoding from its underlying string. These encodings may
3372 // differ because of externalization.
3373 bool is_ascii = buffer->IsAsciiRepresentation();
3374 { MaybeObject* maybe_result = is_ascii
3375 ? AllocateRawAsciiString(length, pretenure)
3376 : AllocateRawTwoByteString(length, pretenure);
3377 if (!maybe_result->ToObject(&result)) return maybe_result;
3379 String* string_result = String::cast(result);
3380 // Copy the characters into the new object.
3382 ASSERT(string_result->IsAsciiRepresentation());
3383 char* dest = SeqAsciiString::cast(string_result)->GetChars();
3384 String::WriteToFlat(buffer, dest, start, end);
3386 ASSERT(string_result->IsTwoByteRepresentation());
3387 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3388 String::WriteToFlat(buffer, dest, start, end);
3393 ASSERT(buffer->IsFlat());
3395 if (FLAG_verify_heap) {
3396 buffer->StringVerify();
3401 // When slicing an indirect string we use its encoding for a newly created
3402 // slice and don't check the encoding of the underlying string. This is safe
3403 // even if the encodings are different because of externalization. If an
3404 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3405 // codes of the underlying string must still fit into ASCII (because
3406 // externalization must not change char codes).
3407 { Map* map = buffer->IsAsciiRepresentation()
3408 ? sliced_ascii_string_map()
3409 : sliced_string_map();
3410 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3411 if (!maybe_result->ToObject(&result)) return maybe_result;
3414 AssertNoAllocation no_gc;
3415 SlicedString* sliced_string = SlicedString::cast(result);
3416 sliced_string->set_length(length);
3417 sliced_string->set_hash_field(String::kEmptyHashField);
3418 if (buffer->IsConsString()) {
3419 ConsString* cons = ConsString::cast(buffer);
3420 ASSERT(cons->second()->length() == 0);
3421 sliced_string->set_parent(cons->first());
3422 sliced_string->set_offset(start);
3423 } else if (buffer->IsSlicedString()) {
3424 // Prevent nesting sliced strings.
3425 SlicedString* parent_slice = SlicedString::cast(buffer);
3426 sliced_string->set_parent(parent_slice->parent());
3427 sliced_string->set_offset(start + parent_slice->offset());
3429 sliced_string->set_parent(buffer);
3430 sliced_string->set_offset(start);
3432 ASSERT(sliced_string->parent()->IsSeqString() ||
3433 sliced_string->parent()->IsExternalString());
3438 MaybeObject* Heap::AllocateExternalStringFromAscii(
3439 const ExternalAsciiString::Resource* resource) {
3440 size_t length = resource->length();
3441 if (length > static_cast<size_t>(String::kMaxLength)) {
3442 isolate()->context()->mark_out_of_memory();
3443 return Failure::OutOfMemoryException();
3446 ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
3448 Map* map = external_ascii_string_map();
3450 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3451 if (!maybe_result->ToObject(&result)) return maybe_result;
3454 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3455 external_string->set_length(static_cast<int>(length));
3456 external_string->set_hash_field(String::kEmptyHashField);
3457 external_string->set_resource(resource);
3463 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3464 const ExternalTwoByteString::Resource* resource) {
3465 size_t length = resource->length();
3466 if (length > static_cast<size_t>(String::kMaxLength)) {
3467 isolate()->context()->mark_out_of_memory();
3468 return Failure::OutOfMemoryException();
3471 // For small strings we check whether the resource contains only
3472 // ASCII characters. If yes, we use a different string map.
3473 static const size_t kAsciiCheckLengthLimit = 32;
3474 bool is_ascii = length <= kAsciiCheckLengthLimit &&
3475 String::IsAscii(resource->data(), static_cast<int>(length));
3476 Map* map = is_ascii ?
3477 external_string_with_ascii_data_map() : external_string_map();
3479 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3480 if (!maybe_result->ToObject(&result)) return maybe_result;
3483 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3484 external_string->set_length(static_cast<int>(length));
3485 external_string->set_hash_field(String::kEmptyHashField);
3486 external_string->set_resource(resource);
3492 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3493 if (code <= String::kMaxAsciiCharCode) {
3494 Object* value = single_character_string_cache()->get(code);
3495 if (value != undefined_value()) return value;
3498 buffer[0] = static_cast<char>(code);
3500 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3502 if (!maybe_result->ToObject(&result)) return maybe_result;
3503 single_character_string_cache()->set(code, result);
3508 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3509 if (!maybe_result->ToObject(&result)) return maybe_result;
3511 String* answer = String::cast(result);
3512 answer->Set(0, code);
3517 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3518 if (length < 0 || length > ByteArray::kMaxLength) {
3519 return Failure::OutOfMemoryException();
3521 if (pretenure == NOT_TENURED) {
3522 return AllocateByteArray(length);
3524 int size = ByteArray::SizeFor(length);
3526 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3527 ? old_data_space_->AllocateRaw(size)
3528 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3529 if (!maybe_result->ToObject(&result)) return maybe_result;
3532 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3534 reinterpret_cast<ByteArray*>(result)->set_length(length);
3539 MaybeObject* Heap::AllocateByteArray(int length) {
3540 if (length < 0 || length > ByteArray::kMaxLength) {
3541 return Failure::OutOfMemoryException();
3543 int size = ByteArray::SizeFor(length);
3544 AllocationSpace space =
3545 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3547 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3548 if (!maybe_result->ToObject(&result)) return maybe_result;
3551 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3553 reinterpret_cast<ByteArray*>(result)->set_length(length);
3558 void Heap::CreateFillerObjectAt(Address addr, int size) {
3559 if (size == 0) return;
3560 HeapObject* filler = HeapObject::FromAddress(addr);
3561 if (size == kPointerSize) {
3562 filler->set_map_no_write_barrier(one_pointer_filler_map());
3563 } else if (size == 2 * kPointerSize) {
3564 filler->set_map_no_write_barrier(two_pointer_filler_map());
3566 filler->set_map_no_write_barrier(free_space_map());
3567 FreeSpace::cast(filler)->set_size(size);
3572 MaybeObject* Heap::AllocateExternalArray(int length,
3573 ExternalArrayType array_type,
3574 void* external_pointer,
3575 PretenureFlag pretenure) {
3576 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3578 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3581 if (!maybe_result->ToObject(&result)) return maybe_result;
3584 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3585 MapForExternalArrayType(array_type));
3586 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3587 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3594 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3596 Handle<Object> self_reference,
3598 // Allocate ByteArray before the Code object, so that we do not risk
3599 // leaving uninitialized Code object (and breaking the heap).
3600 ByteArray* reloc_info;
3601 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3602 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3605 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3606 int obj_size = Code::SizeFor(body_size);
3607 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3608 MaybeObject* maybe_result;
3609 // Large code objects and code objects which should stay at a fixed address
3610 // are allocated in large object space.
3612 bool force_lo_space = obj_size > code_space()->AreaSize();
3613 if (force_lo_space) {
3614 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3616 maybe_result = code_space_->AllocateRaw(obj_size);
3618 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3620 if (immovable && !force_lo_space &&
3621 // Objects on the first page of each space are never moved.
3622 !code_space_->FirstPage()->Contains(result->address())) {
3623 // Discard the first code allocation, which was on a page where it could be
3625 CreateFillerObjectAt(result->address(), obj_size);
3626 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3627 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3630 // Initialize the object
3631 result->set_map_no_write_barrier(code_map());
3632 Code* code = Code::cast(result);
3633 ASSERT(!isolate_->code_range()->exists() ||
3634 isolate_->code_range()->contains(code->address()));
3635 code->set_instruction_size(desc.instr_size);
3636 code->set_relocation_info(reloc_info);
3637 code->set_flags(flags);
3638 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3639 code->set_check_type(RECEIVER_MAP_CHECK);
3641 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3642 code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
3643 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3644 code->set_gc_metadata(Smi::FromInt(0));
3645 code->set_ic_age(global_ic_age_);
3646 // Allow self references to created code object by patching the handle to
3647 // point to the newly allocated Code object.
3648 if (!self_reference.is_null()) {
3649 *(self_reference.location()) = code;
3651 // Migrate generated code.
3652 // The generated code can contain Object** values (typically from handles)
3653 // that are dereferenced during the copy to point directly to the actual heap
3654 // objects. These pointers can include references to the code object itself,
3655 // through the self_reference parameter.
3656 code->CopyFrom(desc);
3659 if (FLAG_verify_heap) {
3667 MaybeObject* Heap::CopyCode(Code* code) {
3668 // Allocate an object the same size as the code object.
3669 int obj_size = code->Size();
3670 MaybeObject* maybe_result;
3671 if (obj_size > code_space()->AreaSize()) {
3672 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3674 maybe_result = code_space_->AllocateRaw(obj_size);
3678 if (!maybe_result->ToObject(&result)) return maybe_result;
3680 // Copy code object.
3681 Address old_addr = code->address();
3682 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3683 CopyBlock(new_addr, old_addr, obj_size);
3684 // Relocate the copy.
3685 Code* new_code = Code::cast(result);
3686 ASSERT(!isolate_->code_range()->exists() ||
3687 isolate_->code_range()->contains(code->address()));
3688 new_code->Relocate(new_addr - old_addr);
3693 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3694 // Allocate ByteArray before the Code object, so that we do not risk
3695 // leaving uninitialized Code object (and breaking the heap).
3696 Object* reloc_info_array;
3697 { MaybeObject* maybe_reloc_info_array =
3698 AllocateByteArray(reloc_info.length(), TENURED);
3699 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3700 return maybe_reloc_info_array;
3704 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3706 int new_obj_size = Code::SizeFor(new_body_size);
3708 Address old_addr = code->address();
3710 size_t relocation_offset =
3711 static_cast<size_t>(code->instruction_end() - old_addr);
3713 MaybeObject* maybe_result;
3714 if (new_obj_size > code_space()->AreaSize()) {
3715 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3717 maybe_result = code_space_->AllocateRaw(new_obj_size);
3721 if (!maybe_result->ToObject(&result)) return maybe_result;
3723 // Copy code object.
3724 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3726 // Copy header and instructions.
3727 memcpy(new_addr, old_addr, relocation_offset);
3729 Code* new_code = Code::cast(result);
3730 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3732 // Copy patched rinfo.
3733 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3735 // Relocate the copy.
3736 ASSERT(!isolate_->code_range()->exists() ||
3737 isolate_->code_range()->contains(code->address()));
3738 new_code->Relocate(new_addr - old_addr);
3741 if (FLAG_verify_heap) {
3749 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3750 ASSERT(gc_state_ == NOT_IN_GC);
3751 ASSERT(map->instance_type() != MAP_TYPE);
3752 // If allocation failures are disallowed, we may allocate in a different
3753 // space when new space is full and the object is not a large object.
3754 AllocationSpace retry_space =
3755 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3757 { MaybeObject* maybe_result =
3758 AllocateRaw(map->instance_size(), space, retry_space);
3759 if (!maybe_result->ToObject(&result)) return maybe_result;
3761 // No need for write barrier since object is white and map is in old space.
3762 HeapObject::cast(result)->set_map_no_write_barrier(map);
3767 void Heap::InitializeFunction(JSFunction* function,
3768 SharedFunctionInfo* shared,
3769 Object* prototype) {
3770 ASSERT(!prototype->IsMap());
3771 function->initialize_properties();
3772 function->initialize_elements();
3773 function->set_shared(shared);
3774 function->set_code(shared->code());
3775 function->set_prototype_or_initial_map(prototype);
3776 function->set_context(undefined_value());
3777 function->set_literals_or_bindings(empty_fixed_array());
3778 function->set_next_function_link(undefined_value());
3782 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3783 // Allocate the prototype. Make sure to use the object function
3784 // from the function's context, since the function can be from a
3785 // different context.
3786 JSFunction* object_function =
3787 function->context()->native_context()->object_function();
3789 // Each function prototype gets a copy of the object function map.
3790 // This avoid unwanted sharing of maps between prototypes of different
3793 ASSERT(object_function->has_initial_map());
3794 MaybeObject* maybe_map = object_function->initial_map()->Copy();
3795 if (!maybe_map->To(&new_map)) return maybe_map;
3798 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3799 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3801 // When creating the prototype for the function we must set its
3802 // constructor to the function.
3803 MaybeObject* maybe_failure =
3804 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3805 constructor_symbol(), function, DONT_ENUM);
3806 if (maybe_failure->IsFailure()) return maybe_failure;
3812 MaybeObject* Heap::AllocateFunction(Map* function_map,
3813 SharedFunctionInfo* shared,
3815 PretenureFlag pretenure) {
3816 AllocationSpace space =
3817 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3819 { MaybeObject* maybe_result = Allocate(function_map, space);
3820 if (!maybe_result->ToObject(&result)) return maybe_result;
3822 InitializeFunction(JSFunction::cast(result), shared, prototype);
3827 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3828 // To get fast allocation and map sharing for arguments objects we
3829 // allocate them based on an arguments boilerplate.
3831 JSObject* boilerplate;
3832 int arguments_object_size;
3833 bool strict_mode_callee = callee->IsJSFunction() &&
3834 !JSFunction::cast(callee)->shared()->is_classic_mode();
3835 if (strict_mode_callee) {
3837 isolate()->context()->native_context()->
3838 strict_mode_arguments_boilerplate();
3839 arguments_object_size = kArgumentsObjectSizeStrict;
3842 isolate()->context()->native_context()->arguments_boilerplate();
3843 arguments_object_size = kArgumentsObjectSize;
3846 // This calls Copy directly rather than using Heap::AllocateRaw so we
3847 // duplicate the check here.
3848 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3850 // Check that the size of the boilerplate matches our
3851 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3852 // on the size being a known constant.
3853 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3855 // Do the allocation.
3857 { MaybeObject* maybe_result =
3858 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3859 if (!maybe_result->ToObject(&result)) return maybe_result;
3862 // Copy the content. The arguments boilerplate doesn't have any
3863 // fields that point to new space so it's safe to skip the write
3865 CopyBlock(HeapObject::cast(result)->address(),
3866 boilerplate->address(),
3867 JSObject::kHeaderSize);
3869 // Set the length property.
3870 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3871 Smi::FromInt(length),
3872 SKIP_WRITE_BARRIER);
3873 // Set the callee property for non-strict mode arguments object only.
3874 if (!strict_mode_callee) {
3875 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3879 // Check the state of the object
3880 ASSERT(JSObject::cast(result)->HasFastProperties());
3881 ASSERT(JSObject::cast(result)->HasFastObjectElements());
3887 static bool HasDuplicates(DescriptorArray* descriptors) {
3888 int count = descriptors->number_of_descriptors();
3890 String* prev_key = descriptors->GetKey(0);
3891 for (int i = 1; i != count; i++) {
3892 String* current_key = descriptors->GetKey(i);
3893 if (prev_key == current_key) return true;
3894 prev_key = current_key;
3901 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3902 ASSERT(!fun->has_initial_map());
3904 // First create a new map with the size and number of in-object properties
3905 // suggested by the function.
3906 int instance_size = fun->shared()->CalculateInstanceSize();
3907 int in_object_properties = fun->shared()->CalculateInObjectProperties();
3909 MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
3910 if (!maybe_map->To(&map)) return maybe_map;
3912 // Fetch or allocate prototype.
3914 if (fun->has_instance_prototype()) {
3915 prototype = fun->instance_prototype();
3917 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3918 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
3920 map->set_inobject_properties(in_object_properties);
3921 map->set_unused_property_fields(in_object_properties);
3922 map->set_prototype(prototype);
3923 ASSERT(map->has_fast_object_elements());
3925 // If the function has only simple this property assignments add
3926 // field descriptors for these to the initial map as the object
3927 // cannot be constructed without having these properties. Guard by
3928 // the inline_new flag so we only change the map if we generate a
3929 // specialized construct stub.
3930 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3931 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3932 int count = fun->shared()->this_property_assignments_count();
3933 if (count > in_object_properties) {
3934 // Inline constructor can only handle inobject properties.
3935 fun->shared()->ForbidInlineConstructor();
3937 DescriptorArray* descriptors;
3938 MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
3939 if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
3941 DescriptorArray::WhitenessWitness witness(descriptors);
3942 for (int i = 0; i < count; i++) {
3943 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3944 ASSERT(name->IsSymbol());
3945 FieldDescriptor field(name, i, NONE, i + 1);
3946 descriptors->Set(i, &field, witness);
3948 descriptors->Sort();
3950 // The descriptors may contain duplicates because the compiler does not
3951 // guarantee the uniqueness of property names (it would have required
3952 // quadratic time). Once the descriptors are sorted we can check for
3953 // duplicates in linear time.
3954 if (HasDuplicates(descriptors)) {
3955 fun->shared()->ForbidInlineConstructor();
3957 map->InitializeDescriptors(descriptors);
3958 map->set_pre_allocated_property_fields(count);
3959 map->set_unused_property_fields(in_object_properties - count);
3964 fun->shared()->StartInobjectSlackTracking(map);
3970 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3971 FixedArray* properties,
3973 obj->set_properties(properties);
3974 obj->initialize_elements();
3975 // TODO(1240798): Initialize the object's body using valid initial values
3976 // according to the object's initial map. For example, if the map's
3977 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3978 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3979 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3980 // verification code has to cope with (temporarily) invalid objects. See
3981 // for example, JSArray::JSArrayVerify).
3983 // We cannot always fill with one_pointer_filler_map because objects
3984 // created from API functions expect their internal fields to be initialized
3985 // with undefined_value.
3986 // Pre-allocated fields need to be initialized with undefined_value as well
3987 // so that object accesses before the constructor completes (e.g. in the
3988 // debugger) will not cause a crash.
3989 if (map->constructor()->IsJSFunction() &&
3990 JSFunction::cast(map->constructor())->shared()->
3991 IsInobjectSlackTrackingInProgress()) {
3992 // We might want to shrink the object later.
3993 ASSERT(obj->GetInternalFieldCount() == 0);
3994 filler = Heap::one_pointer_filler_map();
3996 filler = Heap::undefined_value();
3998 obj->InitializeBody(map, Heap::undefined_value(), filler);
4002 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4003 // JSFunctions should be allocated using AllocateFunction to be
4004 // properly initialized.
4005 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4007 // Both types of global objects should be allocated using
4008 // AllocateGlobalObject to be properly initialized.
4009 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4010 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4012 // Allocate the backing storage for the properties.
4014 map->pre_allocated_property_fields() +
4015 map->unused_property_fields() -
4016 map->inobject_properties();
4017 ASSERT(prop_size >= 0);
4019 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4020 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4023 // Allocate the JSObject.
4024 AllocationSpace space =
4025 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4026 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4028 { MaybeObject* maybe_obj = Allocate(map, space);
4029 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4032 // Initialize the JSObject.
4033 InitializeJSObjectFromMap(JSObject::cast(obj),
4034 FixedArray::cast(properties),
4036 ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
4041 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4042 PretenureFlag pretenure) {
4043 // Allocate the initial map if absent.
4044 if (!constructor->has_initial_map()) {
4045 Object* initial_map;
4046 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4047 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4049 constructor->set_initial_map(Map::cast(initial_map));
4050 Map::cast(initial_map)->set_constructor(constructor);
4052 // Allocate the object based on the constructors initial map.
4053 MaybeObject* result = AllocateJSObjectFromMap(
4054 constructor->initial_map(), pretenure);
4056 // Make sure result is NOT a global object if valid.
4057 Object* non_failure;
4058 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4064 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4065 // Allocate a fresh map. Modules do not have a prototype.
4067 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4068 if (!maybe_map->To(&map)) return maybe_map;
4069 // Allocate the object based on the map.
4071 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4072 if (!maybe_module->To(&module)) return maybe_module;
4073 module->set_context(context);
4074 module->set_scope_info(scope_info);
4079 MaybeObject* Heap::AllocateJSArrayAndStorage(
4080 ElementsKind elements_kind,
4083 ArrayStorageAllocationMode mode,
4084 PretenureFlag pretenure) {
4085 ASSERT(capacity >= length);
4086 if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
4087 elements_kind = GetHoleyElementsKind(elements_kind);
4089 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4091 if (!maybe_array->To(&array)) return maybe_array;
4093 if (capacity == 0) {
4094 array->set_length(Smi::FromInt(0));
4095 array->set_elements(empty_fixed_array());
4099 FixedArrayBase* elms;
4100 MaybeObject* maybe_elms = NULL;
4101 if (elements_kind == FAST_DOUBLE_ELEMENTS) {
4102 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4103 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4105 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4106 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4109 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4110 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4111 maybe_elms = AllocateUninitializedFixedArray(capacity);
4113 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4114 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4117 if (!maybe_elms->To(&elms)) return maybe_elms;
4119 array->set_elements(elms);
4120 array->set_length(Smi::FromInt(length));
4125 MaybeObject* Heap::AllocateJSArrayWithElements(
4126 FixedArrayBase* elements,
4127 ElementsKind elements_kind,
4128 PretenureFlag pretenure) {
4129 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4131 if (!maybe_array->To(&array)) return maybe_array;
4133 array->set_elements(elements);
4134 array->set_length(Smi::FromInt(elements->length()));
4135 array->ValidateElements();
4140 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4142 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4143 // maps. Will probably depend on the identity of the handler object, too.
4145 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4146 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4147 map->set_prototype(prototype);
4149 // Allocate the proxy object.
4151 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4152 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4153 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4154 result->set_handler(handler);
4155 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4160 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4162 Object* construct_trap,
4163 Object* prototype) {
4165 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4166 // maps. Will probably depend on the identity of the handler object, too.
4168 MaybeObject* maybe_map_obj =
4169 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4170 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4171 map->set_prototype(prototype);
4173 // Allocate the proxy object.
4174 JSFunctionProxy* result;
4175 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4176 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4177 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4178 result->set_handler(handler);
4179 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4180 result->set_call_trap(call_trap);
4181 result->set_construct_trap(construct_trap);
4186 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4187 ASSERT(constructor->has_initial_map());
4188 Map* map = constructor->initial_map();
4189 ASSERT(map->is_dictionary_map());
4191 // Make sure no field properties are described in the initial map.
4192 // This guarantees us that normalizing the properties does not
4193 // require us to change property values to JSGlobalPropertyCells.
4194 ASSERT(map->NextFreePropertyIndex() == 0);
4196 // Make sure we don't have a ton of pre-allocated slots in the
4197 // global objects. They will be unused once we normalize the object.
4198 ASSERT(map->unused_property_fields() == 0);
4199 ASSERT(map->inobject_properties() == 0);
4201 // Initial size of the backing store to avoid resize of the storage during
4202 // bootstrapping. The size differs between the JS global object ad the
4204 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4206 // Allocate a dictionary object for backing storage.
4207 StringDictionary* dictionary;
4208 MaybeObject* maybe_dictionary =
4209 StringDictionary::Allocate(
4210 map->NumberOfOwnDescriptors() * 2 + initial_size);
4211 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4213 // The global object might be created from an object template with accessors.
4214 // Fill these accessors into the dictionary.
4215 DescriptorArray* descs = map->instance_descriptors();
4216 for (int i = 0; i < descs->number_of_descriptors(); i++) {
4217 PropertyDetails details = descs->GetDetails(i);
4218 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4219 PropertyDetails d = PropertyDetails(details.attributes(),
4221 details.descriptor_index());
4222 Object* value = descs->GetCallbacksObject(i);
4223 MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4224 if (!maybe_value->ToObject(&value)) return maybe_value;
4226 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4227 if (!maybe_added->To(&dictionary)) return maybe_added;
4230 // Allocate the global object and initialize it with the backing store.
4232 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4233 if (!maybe_global->To(&global)) return maybe_global;
4235 InitializeJSObjectFromMap(global, dictionary, map);
4237 // Create a new map for the global object.
4239 MaybeObject* maybe_map = map->CopyDropDescriptors();
4240 if (!maybe_map->To(&new_map)) return maybe_map;
4241 new_map->set_dictionary_map(true);
4243 // Set up the global object as a normalized object.
4244 global->set_map(new_map);
4245 global->set_properties(dictionary);
4247 // Make sure result is a global object with properties in dictionary.
4248 ASSERT(global->IsGlobalObject());
4249 ASSERT(!global->HasFastProperties());
4254 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4255 // Never used to copy functions. If functions need to be copied we
4256 // have to be careful to clear the literals array.
4257 SLOW_ASSERT(!source->IsJSFunction());
4260 Map* map = source->map();
4261 int object_size = map->instance_size();
4264 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4266 // If we're forced to always allocate, we use the general allocation
4267 // functions which may leave us with an object in old space.
4268 if (always_allocate()) {
4269 { MaybeObject* maybe_clone =
4270 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4271 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4273 Address clone_address = HeapObject::cast(clone)->address();
4274 CopyBlock(clone_address,
4277 // Update write barrier for all fields that lie beyond the header.
4278 RecordWrites(clone_address,
4279 JSObject::kHeaderSize,
4280 (object_size - JSObject::kHeaderSize) / kPointerSize);
4282 wb_mode = SKIP_WRITE_BARRIER;
4283 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4284 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4286 SLOW_ASSERT(InNewSpace(clone));
4287 // Since we know the clone is allocated in new space, we can copy
4288 // the contents without worrying about updating the write barrier.
4289 CopyBlock(HeapObject::cast(clone)->address(),
4295 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4296 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4297 FixedArray* properties = FixedArray::cast(source->properties());
4298 // Update elements if necessary.
4299 if (elements->length() > 0) {
4301 { MaybeObject* maybe_elem;
4302 if (elements->map() == fixed_cow_array_map()) {
4303 maybe_elem = FixedArray::cast(elements);
4304 } else if (source->HasFastDoubleElements()) {
4305 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4307 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4309 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4311 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4313 // Update properties if necessary.
4314 if (properties->length() > 0) {
4316 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4317 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4319 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4321 // Return the new clone.
4326 MaybeObject* Heap::ReinitializeJSReceiver(
4327 JSReceiver* object, InstanceType type, int size) {
4328 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4330 // Allocate fresh map.
4331 // TODO(rossberg): Once we optimize proxies, cache these maps.
4333 MaybeObject* maybe = AllocateMap(type, size);
4334 if (!maybe->To<Map>(&map)) return maybe;
4336 // Check that the receiver has at least the size of the fresh object.
4337 int size_difference = object->map()->instance_size() - map->instance_size();
4338 ASSERT(size_difference >= 0);
4340 map->set_prototype(object->map()->prototype());
4342 // Allocate the backing storage for the properties.
4343 int prop_size = map->unused_property_fields() - map->inobject_properties();
4345 maybe = AllocateFixedArray(prop_size, TENURED);
4346 if (!maybe->ToObject(&properties)) return maybe;
4348 // Functions require some allocation, which might fail here.
4349 SharedFunctionInfo* shared = NULL;
4350 if (type == JS_FUNCTION_TYPE) {
4352 maybe = LookupAsciiSymbol("<freezing call trap>");
4353 if (!maybe->To<String>(&name)) return maybe;
4354 maybe = AllocateSharedFunctionInfo(name);
4355 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4358 // Because of possible retries of this function after failure,
4359 // we must NOT fail after this point, where we have changed the type!
4361 // Reset the map for the object.
4362 object->set_map(map);
4363 JSObject* jsobj = JSObject::cast(object);
4365 // Reinitialize the object from the constructor map.
4366 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4368 // Functions require some minimal initialization.
4369 if (type == JS_FUNCTION_TYPE) {
4370 map->set_function_with_prototype(true);
4371 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4372 JSFunction::cast(object)->set_context(
4373 isolate()->context()->native_context());
4376 // Put in filler if the new object is smaller than the old.
4377 if (size_difference > 0) {
4378 CreateFillerObjectAt(
4379 object->address() + map->instance_size(), size_difference);
4386 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4387 JSGlobalProxy* object) {
4388 ASSERT(constructor->has_initial_map());
4389 Map* map = constructor->initial_map();
4391 // Check that the already allocated object has the same size and type as
4392 // objects allocated using the constructor.
4393 ASSERT(map->instance_size() == object->map()->instance_size());
4394 ASSERT(map->instance_type() == object->map()->instance_type());
4396 // Allocate the backing storage for the properties.
4397 int prop_size = map->unused_property_fields() - map->inobject_properties();
4399 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4400 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4403 // Reset the map for the object.
4404 object->set_map(constructor->initial_map());
4406 // Reinitialize the object from the constructor map.
4407 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4412 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
4413 PretenureFlag pretenure) {
4414 int length = string.length();
4416 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4419 { MaybeObject* maybe_result =
4420 AllocateRawAsciiString(string.length(), pretenure);
4421 if (!maybe_result->ToObject(&result)) return maybe_result;
4424 // Copy the characters into the new object.
4425 CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
4430 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4431 int non_ascii_start,
4432 PretenureFlag pretenure) {
4433 // Continue counting the number of characters in the UTF-8 string, starting
4434 // from the first non-ascii character or word.
4435 int chars = non_ascii_start;
4436 Access<UnicodeCache::Utf8Decoder>
4437 decoder(isolate_->unicode_cache()->utf8_decoder());
4438 decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
4439 while (decoder->has_more()) {
4440 uint32_t r = decoder->GetNext();
4441 if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
4449 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4450 if (!maybe_result->ToObject(&result)) return maybe_result;
4453 // Convert and copy the characters into the new object.
4454 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4455 decoder->Reset(string.start(), string.length());
4458 uint32_t r = decoder->GetNext();
4459 if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4460 twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
4461 twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
4463 twobyte->SeqTwoByteStringSet(i++, r);
4470 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4471 PretenureFlag pretenure) {
4472 // Check if the string is an ASCII string.
4474 int length = string.length();
4475 const uc16* start = string.start();
4477 if (String::IsAscii(start, length)) {
4478 MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
4479 if (!maybe_result->ToObject(&result)) return maybe_result;
4480 CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
4481 } else { // It's not an ASCII string.
4482 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4483 if (!maybe_result->ToObject(&result)) return maybe_result;
4484 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4490 Map* Heap::SymbolMapForString(String* string) {
4491 // If the string is in new space it cannot be used as a symbol.
4492 if (InNewSpace(string)) return NULL;
4494 // Find the corresponding symbol map for strings.
4495 switch (string->map()->instance_type()) {
4496 case STRING_TYPE: return symbol_map();
4497 case ASCII_STRING_TYPE: return ascii_symbol_map();
4498 case CONS_STRING_TYPE: return cons_symbol_map();
4499 case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4500 case EXTERNAL_STRING_TYPE: return external_symbol_map();
4501 case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4502 case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4503 return external_symbol_with_ascii_data_map();
4504 case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4505 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4506 return short_external_ascii_symbol_map();
4507 case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4508 return short_external_symbol_with_ascii_data_map();
4509 default: return NULL; // No match found.
4514 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4516 uint32_t hash_field) {
4518 // Ensure the chars matches the number of characters in the buffer.
4519 ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
4520 // Determine whether the string is ASCII.
4521 bool is_ascii = true;
4522 while (buffer->has_more()) {
4523 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4530 // Compute map and object size.
4535 if (chars > SeqAsciiString::kMaxLength) {
4536 return Failure::OutOfMemoryException();
4538 map = ascii_symbol_map();
4539 size = SeqAsciiString::SizeFor(chars);
4541 if (chars > SeqTwoByteString::kMaxLength) {
4542 return Failure::OutOfMemoryException();
4545 size = SeqTwoByteString::SizeFor(chars);
4550 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4551 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4552 : old_data_space_->AllocateRaw(size);
4553 if (!maybe_result->ToObject(&result)) return maybe_result;
4556 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4557 // Set length and hash fields of the allocated string.
4558 String* answer = String::cast(result);
4559 answer->set_length(chars);
4560 answer->set_hash_field(hash_field);
4562 ASSERT_EQ(size, answer->Size());
4564 // Fill in the characters.
4567 uint32_t character = buffer->GetNext();
4568 if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4569 answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
4570 answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
4572 answer->Set(i++, character);
4579 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4580 if (length < 0 || length > SeqAsciiString::kMaxLength) {
4581 return Failure::OutOfMemoryException();
4584 int size = SeqAsciiString::SizeFor(length);
4585 ASSERT(size <= SeqAsciiString::kMaxSize);
4587 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4588 AllocationSpace retry_space = OLD_DATA_SPACE;
4590 if (space == NEW_SPACE) {
4591 if (size > kMaxObjectSizeInNewSpace) {
4592 // Allocate in large object space, retry space will be ignored.
4594 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4595 // Allocate in new space, retry in large object space.
4596 retry_space = LO_SPACE;
4598 } else if (space == OLD_DATA_SPACE &&
4599 size > Page::kMaxNonCodeHeapObjectSize) {
4603 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4604 if (!maybe_result->ToObject(&result)) return maybe_result;
4607 // Partially initialize the object.
4608 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4609 String::cast(result)->set_length(length);
4610 String::cast(result)->set_hash_field(String::kEmptyHashField);
4611 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4614 if (FLAG_verify_heap) {
4615 // Initialize string's content to ensure ASCII-ness (character range 0-127)
4616 // as required when verifying the heap.
4617 char* dest = SeqAsciiString::cast(result)->GetChars();
4618 memset(dest, 0x0F, length * kCharSize);
4626 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4627 PretenureFlag pretenure) {
4628 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4629 return Failure::OutOfMemoryException();
4631 int size = SeqTwoByteString::SizeFor(length);
4632 ASSERT(size <= SeqTwoByteString::kMaxSize);
4633 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4634 AllocationSpace retry_space = OLD_DATA_SPACE;
4636 if (space == NEW_SPACE) {
4637 if (size > kMaxObjectSizeInNewSpace) {
4638 // Allocate in large object space, retry space will be ignored.
4640 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4641 // Allocate in new space, retry in large object space.
4642 retry_space = LO_SPACE;
4644 } else if (space == OLD_DATA_SPACE &&
4645 size > Page::kMaxNonCodeHeapObjectSize) {
4649 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4650 if (!maybe_result->ToObject(&result)) return maybe_result;
4653 // Partially initialize the object.
4654 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4655 String::cast(result)->set_length(length);
4656 String::cast(result)->set_hash_field(String::kEmptyHashField);
4657 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4662 MaybeObject* Heap::AllocateJSArray(
4663 ElementsKind elements_kind,
4664 PretenureFlag pretenure) {
4665 Context* native_context = isolate()->context()->native_context();
4666 JSFunction* array_function = native_context->array_function();
4667 Map* map = array_function->initial_map();
4668 Object* maybe_map_array = native_context->js_array_maps();
4669 if (!maybe_map_array->IsUndefined()) {
4670 Object* maybe_transitioned_map =
4671 FixedArray::cast(maybe_map_array)->get(elements_kind);
4672 if (!maybe_transitioned_map->IsUndefined()) {
4673 map = Map::cast(maybe_transitioned_map);
4677 return AllocateJSObjectFromMap(map, pretenure);
4681 MaybeObject* Heap::AllocateEmptyFixedArray() {
4682 int size = FixedArray::SizeFor(0);
4684 { MaybeObject* maybe_result =
4685 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4686 if (!maybe_result->ToObject(&result)) return maybe_result;
4688 // Initialize the object.
4689 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4691 reinterpret_cast<FixedArray*>(result)->set_length(0);
4696 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4697 if (length < 0 || length > FixedArray::kMaxLength) {
4698 return Failure::OutOfMemoryException();
4701 // Use the general function if we're forced to always allocate.
4702 if (always_allocate()) return AllocateFixedArray(length, TENURED);
4703 // Allocate the raw data for a fixed array.
4704 int size = FixedArray::SizeFor(length);
4705 return size <= kMaxObjectSizeInNewSpace
4706 ? new_space_.AllocateRaw(size)
4707 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4711 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4712 int len = src->length();
4714 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4715 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4717 if (InNewSpace(obj)) {
4718 HeapObject* dst = HeapObject::cast(obj);
4719 dst->set_map_no_write_barrier(map);
4720 CopyBlock(dst->address() + kPointerSize,
4721 src->address() + kPointerSize,
4722 FixedArray::SizeFor(len) - kPointerSize);
4725 HeapObject::cast(obj)->set_map_no_write_barrier(map);
4726 FixedArray* result = FixedArray::cast(obj);
4727 result->set_length(len);
4730 AssertNoAllocation no_gc;
4731 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4732 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4737 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4739 int len = src->length();
4741 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4742 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4744 HeapObject* dst = HeapObject::cast(obj);
4745 dst->set_map_no_write_barrier(map);
4747 dst->address() + FixedDoubleArray::kLengthOffset,
4748 src->address() + FixedDoubleArray::kLengthOffset,
4749 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4754 MaybeObject* Heap::AllocateFixedArray(int length) {
4755 ASSERT(length >= 0);
4756 if (length == 0) return empty_fixed_array();
4758 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4759 if (!maybe_result->ToObject(&result)) return maybe_result;
4761 // Initialize header.
4762 FixedArray* array = reinterpret_cast<FixedArray*>(result);
4763 array->set_map_no_write_barrier(fixed_array_map());
4764 array->set_length(length);
4766 ASSERT(!InNewSpace(undefined_value()));
4767 MemsetPointer(array->data_start(), undefined_value(), length);
4772 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4773 if (length < 0 || length > FixedArray::kMaxLength) {
4774 return Failure::OutOfMemoryException();
4777 AllocationSpace space =
4778 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4779 int size = FixedArray::SizeFor(length);
4780 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4781 // Too big for new space.
4783 } else if (space == OLD_POINTER_SPACE &&
4784 size > Page::kMaxNonCodeHeapObjectSize) {
4785 // Too big for old pointer space.
4789 AllocationSpace retry_space =
4790 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4792 return AllocateRaw(size, space, retry_space);
4796 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4799 PretenureFlag pretenure,
4801 ASSERT(length >= 0);
4802 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4803 if (length == 0) return heap->empty_fixed_array();
4805 ASSERT(!heap->InNewSpace(filler));
4807 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4808 if (!maybe_result->ToObject(&result)) return maybe_result;
4811 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4812 FixedArray* array = FixedArray::cast(result);
4813 array->set_length(length);
4814 MemsetPointer(array->data_start(), filler, length);
4819 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4820 return AllocateFixedArrayWithFiller(this,
4827 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4828 PretenureFlag pretenure) {
4829 return AllocateFixedArrayWithFiller(this,
4836 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4837 if (length == 0) return empty_fixed_array();
4840 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4841 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4844 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4846 FixedArray::cast(obj)->set_length(length);
4851 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4852 int size = FixedDoubleArray::SizeFor(0);
4854 { MaybeObject* maybe_result =
4855 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4856 if (!maybe_result->ToObject(&result)) return maybe_result;
4858 // Initialize the object.
4859 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4860 fixed_double_array_map());
4861 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4866 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4868 PretenureFlag pretenure) {
4869 if (length == 0) return empty_fixed_array();
4871 Object* elements_object;
4872 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4873 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4874 FixedDoubleArray* elements =
4875 reinterpret_cast<FixedDoubleArray*>(elements_object);
4877 elements->set_map_no_write_barrier(fixed_double_array_map());
4878 elements->set_length(length);
4883 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
4885 PretenureFlag pretenure) {
4886 if (length == 0) return empty_fixed_array();
4888 Object* elements_object;
4889 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4890 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4891 FixedDoubleArray* elements =
4892 reinterpret_cast<FixedDoubleArray*>(elements_object);
4894 for (int i = 0; i < length; ++i) {
4895 elements->set_the_hole(i);
4898 elements->set_map_no_write_barrier(fixed_double_array_map());
4899 elements->set_length(length);
4904 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4905 PretenureFlag pretenure) {
4906 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4907 return Failure::OutOfMemoryException();
4910 AllocationSpace space =
4911 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4912 int size = FixedDoubleArray::SizeFor(length);
4914 #ifndef V8_HOST_ARCH_64_BIT
4915 size += kPointerSize;
4918 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4919 // Too big for new space.
4921 } else if (space == OLD_DATA_SPACE &&
4922 size > Page::kMaxNonCodeHeapObjectSize) {
4923 // Too big for old data space.
4927 AllocationSpace retry_space =
4928 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
4931 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
4932 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
4935 return EnsureDoubleAligned(this, object, size);
4939 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4941 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4942 if (!maybe_result->ToObject(&result)) return maybe_result;
4944 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
4946 ASSERT(result->IsHashTable());
4951 MaybeObject* Heap::AllocateNativeContext() {
4953 { MaybeObject* maybe_result =
4954 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
4955 if (!maybe_result->ToObject(&result)) return maybe_result;
4957 Context* context = reinterpret_cast<Context*>(result);
4958 context->set_map_no_write_barrier(native_context_map());
4959 context->set_js_array_maps(undefined_value());
4960 ASSERT(context->IsNativeContext());
4961 ASSERT(result->IsContext());
4966 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
4967 ScopeInfo* scope_info) {
4969 { MaybeObject* maybe_result =
4970 AllocateFixedArray(scope_info->ContextLength(), TENURED);
4971 if (!maybe_result->ToObject(&result)) return maybe_result;
4973 Context* context = reinterpret_cast<Context*>(result);
4974 context->set_map_no_write_barrier(global_context_map());
4975 context->set_closure(function);
4976 context->set_previous(function->context());
4977 context->set_extension(scope_info);
4978 context->set_global_object(function->context()->global_object());
4979 ASSERT(context->IsGlobalContext());
4980 ASSERT(result->IsContext());
4985 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
4987 { MaybeObject* maybe_result =
4988 AllocateFixedArray(scope_info->ContextLength(), TENURED);
4989 if (!maybe_result->ToObject(&result)) return maybe_result;
4991 Context* context = reinterpret_cast<Context*>(result);
4992 context->set_map_no_write_barrier(module_context_map());
4993 // Context links will be set later.
4994 context->set_extension(Smi::FromInt(0));
4999 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5000 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5002 { MaybeObject* maybe_result = AllocateFixedArray(length);
5003 if (!maybe_result->ToObject(&result)) return maybe_result;
5005 Context* context = reinterpret_cast<Context*>(result);
5006 context->set_map_no_write_barrier(function_context_map());
5007 context->set_closure(function);
5008 context->set_previous(function->context());
5009 context->set_extension(Smi::FromInt(0));
5010 context->set_global_object(function->context()->global_object());
5015 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5018 Object* thrown_object) {
5019 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5021 { MaybeObject* maybe_result =
5022 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5023 if (!maybe_result->ToObject(&result)) return maybe_result;
5025 Context* context = reinterpret_cast<Context*>(result);
5026 context->set_map_no_write_barrier(catch_context_map());
5027 context->set_closure(function);
5028 context->set_previous(previous);
5029 context->set_extension(name);
5030 context->set_global_object(previous->global_object());
5031 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5036 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5038 JSObject* extension) {
5040 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5041 if (!maybe_result->ToObject(&result)) return maybe_result;
5043 Context* context = reinterpret_cast<Context*>(result);
5044 context->set_map_no_write_barrier(with_context_map());
5045 context->set_closure(function);
5046 context->set_previous(previous);
5047 context->set_extension(extension);
5048 context->set_global_object(previous->global_object());
5053 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5055 ScopeInfo* scope_info) {
5057 { MaybeObject* maybe_result =
5058 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5059 if (!maybe_result->ToObject(&result)) return maybe_result;
5061 Context* context = reinterpret_cast<Context*>(result);
5062 context->set_map_no_write_barrier(block_context_map());
5063 context->set_closure(function);
5064 context->set_previous(previous);
5065 context->set_extension(scope_info);
5066 context->set_global_object(previous->global_object());
5071 MaybeObject* Heap::AllocateScopeInfo(int length) {
5072 FixedArray* scope_info;
5073 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5074 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5075 scope_info->set_map_no_write_barrier(scope_info_map());
5080 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5083 #define MAKE_CASE(NAME, Name, name) \
5084 case NAME##_TYPE: map = name##_map(); break;
5085 STRUCT_LIST(MAKE_CASE)
5089 return Failure::InternalError();
5091 int size = map->instance_size();
5092 AllocationSpace space =
5093 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5095 { MaybeObject* maybe_result = Allocate(map, space);
5096 if (!maybe_result->ToObject(&result)) return maybe_result;
5098 Struct::cast(result)->InitializeBody(size);
5103 bool Heap::IsHeapIterable() {
5104 return (!old_pointer_space()->was_swept_conservatively() &&
5105 !old_data_space()->was_swept_conservatively());
5109 void Heap::EnsureHeapIsIterable() {
5110 ASSERT(IsAllocationAllowed());
5111 if (!IsHeapIterable()) {
5112 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5114 ASSERT(IsHeapIterable());
5118 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5119 incremental_marking()->Step(step_size,
5120 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5122 if (incremental_marking()->IsComplete()) {
5123 bool uncommit = false;
5124 if (gc_count_at_last_idle_gc_ == gc_count_) {
5125 // No GC since the last full GC, the mutator is probably not active.
5126 isolate_->compilation_cache()->Clear();
5129 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5130 gc_count_at_last_idle_gc_ = gc_count_;
5132 new_space_.Shrink();
5133 UncommitFromSpace();
5139 bool Heap::IdleNotification(int hint) {
5140 // Hints greater than this value indicate that
5141 // the embedder is requesting a lot of GC work.
5142 const int kMaxHint = 1000;
5143 // Minimal hint that allows to do full GC.
5144 const int kMinHintForFullGC = 100;
5145 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5146 // The size factor is in range [5..250]. The numbers here are chosen from
5147 // experiments. If you changes them, make sure to test with
5148 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5149 intptr_t step_size =
5150 size_factor * IncrementalMarking::kAllocatedThreshold;
5152 if (contexts_disposed_ > 0) {
5153 if (hint >= kMaxHint) {
5154 // The embedder is requesting a lot of GC work after context disposal,
5155 // we age inline caches so that they don't keep objects from
5156 // the old context alive.
5159 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5160 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5161 incremental_marking()->IsStopped()) {
5162 HistogramTimerScope scope(isolate_->counters()->gc_context());
5163 CollectAllGarbage(kReduceMemoryFootprintMask,
5164 "idle notification: contexts disposed");
5166 AdvanceIdleIncrementalMarking(step_size);
5167 contexts_disposed_ = 0;
5169 // Make sure that we have no pending context disposals.
5170 // Take into account that we might have decided to delay full collection
5171 // because incremental marking is in progress.
5172 ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
5173 // After context disposal there is likely a lot of garbage remaining, reset
5174 // the idle notification counters in order to trigger more incremental GCs
5175 // on subsequent idle notifications.
5180 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5181 return IdleGlobalGC();
5184 // By doing small chunks of GC work in each IdleNotification,
5185 // perform a round of incremental GCs and after that wait until
5186 // the mutator creates enough garbage to justify a new round.
5187 // An incremental GC progresses as follows:
5188 // 1. many incremental marking steps,
5189 // 2. one old space mark-sweep-compact,
5190 // 3. many lazy sweep steps.
5191 // Use mark-sweep-compact events to count incremental GCs in a round.
5194 if (incremental_marking()->IsStopped()) {
5195 if (!IsSweepingComplete() &&
5196 !AdvanceSweepers(static_cast<int>(step_size))) {
5201 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5202 if (EnoughGarbageSinceLastIdleRound()) {
5209 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5210 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5211 ms_count_at_last_idle_notification_ = ms_count_;
5213 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5214 mark_sweeps_since_idle_round_started_;
5216 if (remaining_mark_sweeps <= 0) {
5221 if (incremental_marking()->IsStopped()) {
5222 // If there are no more than two GCs left in this idle round and we are
5223 // allowed to do a full GC, then make those GCs full in order to compact
5225 // TODO(ulan): Once we enable code compaction for incremental marking,
5226 // we can get rid of this special case and always start incremental marking.
5227 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5228 CollectAllGarbage(kReduceMemoryFootprintMask,
5229 "idle notification: finalize idle round");
5231 incremental_marking()->Start();
5234 if (!incremental_marking()->IsStopped()) {
5235 AdvanceIdleIncrementalMarking(step_size);
5241 bool Heap::IdleGlobalGC() {
5242 static const int kIdlesBeforeScavenge = 4;
5243 static const int kIdlesBeforeMarkSweep = 7;
5244 static const int kIdlesBeforeMarkCompact = 8;
5245 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5246 static const unsigned int kGCsBetweenCleanup = 4;
5248 if (!last_idle_notification_gc_count_init_) {
5249 last_idle_notification_gc_count_ = gc_count_;
5250 last_idle_notification_gc_count_init_ = true;
5253 bool uncommit = true;
5254 bool finished = false;
5256 // Reset the number of idle notifications received when a number of
5257 // GCs have taken place. This allows another round of cleanup based
5258 // on idle notifications if enough work has been carried out to
5259 // provoke a number of garbage collections.
5260 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5261 number_idle_notifications_ =
5262 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5264 number_idle_notifications_ = 0;
5265 last_idle_notification_gc_count_ = gc_count_;
5268 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5269 CollectGarbage(NEW_SPACE, "idle notification");
5270 new_space_.Shrink();
5271 last_idle_notification_gc_count_ = gc_count_;
5272 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5273 // Before doing the mark-sweep collections we clear the
5274 // compilation cache to avoid hanging on to source code and
5275 // generated code for cached functions.
5276 isolate_->compilation_cache()->Clear();
5278 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5279 new_space_.Shrink();
5280 last_idle_notification_gc_count_ = gc_count_;
5282 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5283 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5284 new_space_.Shrink();
5285 last_idle_notification_gc_count_ = gc_count_;
5286 number_idle_notifications_ = 0;
5288 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5289 // If we have received more than kIdlesBeforeMarkCompact idle
5290 // notifications we do not perform any cleanup because we don't
5291 // expect to gain much by doing so.
5295 if (uncommit) UncommitFromSpace();
5303 void Heap::Print() {
5304 if (!HasBeenSetUp()) return;
5305 isolate()->PrintStack();
5307 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5312 void Heap::ReportCodeStatistics(const char* title) {
5313 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5314 PagedSpace::ResetCodeStatistics();
5315 // We do not look for code in new space, map space, or old space. If code
5316 // somehow ends up in those spaces, we would miss it here.
5317 code_space_->CollectCodeStatistics();
5318 lo_space_->CollectCodeStatistics();
5319 PagedSpace::ReportCodeStatistics();
5323 // This function expects that NewSpace's allocated objects histogram is
5324 // populated (via a call to CollectStatistics or else as a side effect of a
5325 // just-completed scavenge collection).
5326 void Heap::ReportHeapStatistics(const char* title) {
5328 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5330 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5331 old_gen_promotion_limit_);
5332 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5333 old_gen_allocation_limit_);
5334 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5337 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
5338 isolate_->global_handles()->PrintStats();
5341 PrintF("Heap statistics : ");
5342 isolate_->memory_allocator()->ReportStatistics();
5343 PrintF("To space : ");
5344 new_space_.ReportStatistics();
5345 PrintF("Old pointer space : ");
5346 old_pointer_space_->ReportStatistics();
5347 PrintF("Old data space : ");
5348 old_data_space_->ReportStatistics();
5349 PrintF("Code space : ");
5350 code_space_->ReportStatistics();
5351 PrintF("Map space : ");
5352 map_space_->ReportStatistics();
5353 PrintF("Cell space : ");
5354 cell_space_->ReportStatistics();
5355 PrintF("Large object space : ");
5356 lo_space_->ReportStatistics();
5357 PrintF(">>>>>> ========================================= >>>>>>\n");
5362 bool Heap::Contains(HeapObject* value) {
5363 return Contains(value->address());
5367 bool Heap::Contains(Address addr) {
5368 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5369 return HasBeenSetUp() &&
5370 (new_space_.ToSpaceContains(addr) ||
5371 old_pointer_space_->Contains(addr) ||
5372 old_data_space_->Contains(addr) ||
5373 code_space_->Contains(addr) ||
5374 map_space_->Contains(addr) ||
5375 cell_space_->Contains(addr) ||
5376 lo_space_->SlowContains(addr));
5380 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5381 return InSpace(value->address(), space);
5385 bool Heap::InSpace(Address addr, AllocationSpace space) {
5386 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5387 if (!HasBeenSetUp()) return false;
5391 return new_space_.ToSpaceContains(addr);
5392 case OLD_POINTER_SPACE:
5393 return old_pointer_space_->Contains(addr);
5394 case OLD_DATA_SPACE:
5395 return old_data_space_->Contains(addr);
5397 return code_space_->Contains(addr);
5399 return map_space_->Contains(addr);
5401 return cell_space_->Contains(addr);
5403 return lo_space_->SlowContains(addr);
5411 void Heap::Verify() {
5412 CHECK(HasBeenSetUp());
5414 store_buffer()->Verify();
5416 VerifyPointersVisitor visitor;
5417 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5419 new_space_.Verify();
5421 old_pointer_space_->Verify(&visitor);
5422 map_space_->Verify(&visitor);
5424 VerifyPointersVisitor no_dirty_regions_visitor;
5425 old_data_space_->Verify(&no_dirty_regions_visitor);
5426 code_space_->Verify(&no_dirty_regions_visitor);
5427 cell_space_->Verify(&no_dirty_regions_visitor);
5429 lo_space_->Verify();
5434 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
5435 Object* symbol = NULL;
5437 { MaybeObject* maybe_new_table =
5438 symbol_table()->LookupSymbol(string, &symbol);
5439 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5441 // Can't use set_symbol_table because SymbolTable::cast knows that
5442 // SymbolTable is a singleton and checks for identity.
5443 roots_[kSymbolTableRootIndex] = new_table;
5444 ASSERT(symbol != NULL);
5449 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
5450 Object* symbol = NULL;
5452 { MaybeObject* maybe_new_table =
5453 symbol_table()->LookupAsciiSymbol(string, &symbol);
5454 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5456 // Can't use set_symbol_table because SymbolTable::cast knows that
5457 // SymbolTable is a singleton and checks for identity.
5458 roots_[kSymbolTableRootIndex] = new_table;
5459 ASSERT(symbol != NULL);
5464 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
5467 Object* symbol = NULL;
5469 { MaybeObject* maybe_new_table =
5470 symbol_table()->LookupSubStringAsciiSymbol(string,
5474 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5476 // Can't use set_symbol_table because SymbolTable::cast knows that
5477 // SymbolTable is a singleton and checks for identity.
5478 roots_[kSymbolTableRootIndex] = new_table;
5479 ASSERT(symbol != NULL);
5484 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
5485 Object* symbol = NULL;
5487 { MaybeObject* maybe_new_table =
5488 symbol_table()->LookupTwoByteSymbol(string, &symbol);
5489 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5491 // Can't use set_symbol_table because SymbolTable::cast knows that
5492 // SymbolTable is a singleton and checks for identity.
5493 roots_[kSymbolTableRootIndex] = new_table;
5494 ASSERT(symbol != NULL);
5499 MaybeObject* Heap::LookupSymbol(String* string) {
5500 if (string->IsSymbol()) return string;
5501 Object* symbol = NULL;
5503 { MaybeObject* maybe_new_table =
5504 symbol_table()->LookupString(string, &symbol);
5505 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5507 // Can't use set_symbol_table because SymbolTable::cast knows that
5508 // SymbolTable is a singleton and checks for identity.
5509 roots_[kSymbolTableRootIndex] = new_table;
5510 ASSERT(symbol != NULL);
5515 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
5516 if (string->IsSymbol()) {
5520 return symbol_table()->LookupSymbolIfExists(string, symbol);
5523 void Heap::ZapFromSpace() {
5524 NewSpacePageIterator it(new_space_.FromSpaceStart(),
5525 new_space_.FromSpaceEnd());
5526 while (it.has_next()) {
5527 NewSpacePage* page = it.next();
5528 for (Address cursor = page->area_start(), limit = page->area_end();
5530 cursor += kPointerSize) {
5531 Memory::Address_at(cursor) = kFromSpaceZapValue;
5537 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5539 ObjectSlotCallback callback) {
5540 Address slot_address = start;
5542 // We are not collecting slots on new space objects during mutation
5543 // thus we have to scan for pointers to evacuation candidates when we
5544 // promote objects. But we should not record any slots in non-black
5545 // objects. Grey object's slots would be rescanned.
5546 // White object might not survive until the end of collection
5547 // it would be a violation of the invariant to record it's slots.
5548 bool record_slots = false;
5549 if (incremental_marking()->IsCompacting()) {
5550 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5551 record_slots = Marking::IsBlack(mark_bit);
5554 while (slot_address < end) {
5555 Object** slot = reinterpret_cast<Object**>(slot_address);
5556 Object* object = *slot;
5557 // If the store buffer becomes overfull we mark pages as being exempt from
5558 // the store buffer. These pages are scanned to find pointers that point
5559 // to the new space. In that case we may hit newly promoted objects and
5560 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5561 if (object->IsHeapObject()) {
5562 if (Heap::InFromSpace(object)) {
5563 callback(reinterpret_cast<HeapObject**>(slot),
5564 HeapObject::cast(object));
5565 Object* new_object = *slot;
5566 if (InNewSpace(new_object)) {
5567 SLOW_ASSERT(Heap::InToSpace(new_object));
5568 SLOW_ASSERT(new_object->IsHeapObject());
5569 store_buffer_.EnterDirectlyIntoStoreBuffer(
5570 reinterpret_cast<Address>(slot));
5572 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5573 } else if (record_slots &&
5574 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5575 mark_compact_collector()->RecordSlot(slot, slot, object);
5578 slot_address += kPointerSize;
5584 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5587 bool IsAMapPointerAddress(Object** addr) {
5588 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5589 int mod = a % Map::kSize;
5590 return mod >= Map::kPointerFieldsBeginOffset &&
5591 mod < Map::kPointerFieldsEndOffset;
5595 bool EverythingsAPointer(Object** addr) {
5600 static void CheckStoreBuffer(Heap* heap,
5603 Object**** store_buffer_position,
5604 Object*** store_buffer_top,
5605 CheckStoreBufferFilter filter,
5606 Address special_garbage_start,
5607 Address special_garbage_end) {
5608 Map* free_space_map = heap->free_space_map();
5609 for ( ; current < limit; current++) {
5610 Object* o = *current;
5611 Address current_address = reinterpret_cast<Address>(current);
5613 if (o == free_space_map) {
5614 Address current_address = reinterpret_cast<Address>(current);
5615 FreeSpace* free_space =
5616 FreeSpace::cast(HeapObject::FromAddress(current_address));
5617 int skip = free_space->Size();
5618 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5620 current_address += skip - kPointerSize;
5621 current = reinterpret_cast<Object**>(current_address);
5624 // Skip the current linear allocation space between top and limit which is
5625 // unmarked with the free space map, but can contain junk.
5626 if (current_address == special_garbage_start &&
5627 special_garbage_end != special_garbage_start) {
5628 current_address = special_garbage_end - kPointerSize;
5629 current = reinterpret_cast<Object**>(current_address);
5632 if (!(*filter)(current)) continue;
5633 ASSERT(current_address < special_garbage_start ||
5634 current_address >= special_garbage_end);
5635 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5636 // We have to check that the pointer does not point into new space
5637 // without trying to cast it to a heap object since the hash field of
5638 // a string can contain values like 1 and 3 which are tagged null
5640 if (!heap->InNewSpace(o)) continue;
5641 while (**store_buffer_position < current &&
5642 *store_buffer_position < store_buffer_top) {
5643 (*store_buffer_position)++;
5645 if (**store_buffer_position != current ||
5646 *store_buffer_position == store_buffer_top) {
5647 Object** obj_start = current;
5648 while (!(*obj_start)->IsMap()) obj_start--;
5655 // Check that the store buffer contains all intergenerational pointers by
5656 // scanning a page and ensuring that all pointers to young space are in the
5658 void Heap::OldPointerSpaceCheckStoreBuffer() {
5659 OldSpace* space = old_pointer_space();
5660 PageIterator pages(space);
5662 store_buffer()->SortUniq();
5664 while (pages.has_next()) {
5665 Page* page = pages.next();
5666 Object** current = reinterpret_cast<Object**>(page->area_start());
5668 Address end = page->area_end();
5670 Object*** store_buffer_position = store_buffer()->Start();
5671 Object*** store_buffer_top = store_buffer()->Top();
5673 Object** limit = reinterpret_cast<Object**>(end);
5674 CheckStoreBuffer(this,
5677 &store_buffer_position,
5679 &EverythingsAPointer,
5686 void Heap::MapSpaceCheckStoreBuffer() {
5687 MapSpace* space = map_space();
5688 PageIterator pages(space);
5690 store_buffer()->SortUniq();
5692 while (pages.has_next()) {
5693 Page* page = pages.next();
5694 Object** current = reinterpret_cast<Object**>(page->area_start());
5696 Address end = page->area_end();
5698 Object*** store_buffer_position = store_buffer()->Start();
5699 Object*** store_buffer_top = store_buffer()->Top();
5701 Object** limit = reinterpret_cast<Object**>(end);
5702 CheckStoreBuffer(this,
5705 &store_buffer_position,
5707 &IsAMapPointerAddress,
5714 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5715 LargeObjectIterator it(lo_space());
5716 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5717 // We only have code, sequential strings, or fixed arrays in large
5718 // object space, and only fixed arrays can possibly contain pointers to
5719 // the young generation.
5720 if (object->IsFixedArray()) {
5721 Object*** store_buffer_position = store_buffer()->Start();
5722 Object*** store_buffer_top = store_buffer()->Top();
5723 Object** current = reinterpret_cast<Object**>(object->address());
5725 reinterpret_cast<Object**>(object->address() + object->Size());
5726 CheckStoreBuffer(this,
5729 &store_buffer_position,
5731 &EverythingsAPointer,
5740 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5741 IterateStrongRoots(v, mode);
5742 IterateWeakRoots(v, mode);
5746 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5747 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5748 v->Synchronize(VisitorSynchronization::kSymbolTable);
5749 if (mode != VISIT_ALL_IN_SCAVENGE &&
5750 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5751 // Scavenge collections have special processing for this.
5752 external_string_table_.Iterate(v);
5754 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5758 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5759 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5760 v->Synchronize(VisitorSynchronization::kStrongRootList);
5762 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5763 v->Synchronize(VisitorSynchronization::kSymbol);
5765 isolate_->bootstrapper()->Iterate(v);
5766 v->Synchronize(VisitorSynchronization::kBootstrapper);
5767 isolate_->Iterate(v);
5768 v->Synchronize(VisitorSynchronization::kTop);
5769 Relocatable::Iterate(v);
5770 v->Synchronize(VisitorSynchronization::kRelocatable);
5772 #ifdef ENABLE_DEBUGGER_SUPPORT
5773 isolate_->debug()->Iterate(v);
5774 if (isolate_->deoptimizer_data() != NULL) {
5775 isolate_->deoptimizer_data()->Iterate(v);
5778 v->Synchronize(VisitorSynchronization::kDebug);
5779 isolate_->compilation_cache()->Iterate(v);
5780 v->Synchronize(VisitorSynchronization::kCompilationCache);
5782 // Iterate over local handles in handle scopes.
5783 isolate_->handle_scope_implementer()->Iterate(v);
5784 isolate_->IterateDeferredHandles(v);
5785 v->Synchronize(VisitorSynchronization::kHandleScope);
5787 // Iterate over the builtin code objects and code stubs in the
5788 // heap. Note that it is not necessary to iterate over code objects
5789 // on scavenge collections.
5790 if (mode != VISIT_ALL_IN_SCAVENGE) {
5791 isolate_->builtins()->IterateBuiltins(v);
5793 v->Synchronize(VisitorSynchronization::kBuiltins);
5795 // Iterate over global handles.
5797 case VISIT_ONLY_STRONG:
5798 isolate_->global_handles()->IterateStrongRoots(v);
5800 case VISIT_ALL_IN_SCAVENGE:
5801 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5803 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5805 isolate_->global_handles()->IterateAllRoots(v);
5808 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5810 // Iterate over pointers being held by inactive threads.
5811 isolate_->thread_manager()->Iterate(v);
5812 v->Synchronize(VisitorSynchronization::kThreadManager);
5814 // Iterate over the pointers the Serialization/Deserialization code is
5816 // During garbage collection this keeps the partial snapshot cache alive.
5817 // During deserialization of the startup snapshot this creates the partial
5818 // snapshot cache and deserializes the objects it refers to. During
5819 // serialization this does nothing, since the partial snapshot cache is
5820 // empty. However the next thing we do is create the partial snapshot,
5821 // filling up the partial snapshot cache with objects it needs as we go.
5822 SerializerDeserializer::Iterate(v);
5823 // We don't do a v->Synchronize call here, because in debug mode that will
5824 // output a flag to the snapshot. However at this point the serializer and
5825 // deserializer are deliberately a little unsynchronized (see above) so the
5826 // checking of the sync flag in the snapshot would fail.
5830 // TODO(1236194): Since the heap size is configurable on the command line
5831 // and through the API, we should gracefully handle the case that the heap
5832 // size is not big enough to fit all the initial objects.
5833 bool Heap::ConfigureHeap(int max_semispace_size,
5834 intptr_t max_old_gen_size,
5835 intptr_t max_executable_size) {
5836 if (HasBeenSetUp()) return false;
5838 if (FLAG_stress_compaction) {
5839 // This will cause more frequent GCs when stressing.
5840 max_semispace_size_ = Page::kPageSize;
5843 if (max_semispace_size > 0) {
5844 if (max_semispace_size < Page::kPageSize) {
5845 max_semispace_size = Page::kPageSize;
5846 if (FLAG_trace_gc) {
5847 PrintPID("Max semispace size cannot be less than %dkbytes\n",
5848 Page::kPageSize >> 10);
5851 max_semispace_size_ = max_semispace_size;
5854 if (Snapshot::IsEnabled()) {
5855 // If we are using a snapshot we always reserve the default amount
5856 // of memory for each semispace because code in the snapshot has
5857 // write-barrier code that relies on the size and alignment of new
5858 // space. We therefore cannot use a larger max semispace size
5859 // than the default reserved semispace size.
5860 if (max_semispace_size_ > reserved_semispace_size_) {
5861 max_semispace_size_ = reserved_semispace_size_;
5862 if (FLAG_trace_gc) {
5863 PrintPID("Max semispace size cannot be more than %dkbytes\n",
5864 reserved_semispace_size_ >> 10);
5868 // If we are not using snapshots we reserve space for the actual
5869 // max semispace size.
5870 reserved_semispace_size_ = max_semispace_size_;
5873 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5874 if (max_executable_size > 0) {
5875 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5878 // The max executable size must be less than or equal to the max old
5880 if (max_executable_size_ > max_old_generation_size_) {
5881 max_executable_size_ = max_old_generation_size_;
5884 // The new space size must be a power of two to support single-bit testing
5886 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5887 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5888 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5889 external_allocation_limit_ = 16 * max_semispace_size_;
5891 // The old generation is paged and needs at least one page for each space.
5892 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5893 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5895 RoundUp(max_old_generation_size_,
5903 bool Heap::ConfigureHeapDefault() {
5904 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5905 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5906 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5910 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5911 *stats->start_marker = HeapStats::kStartMarker;
5912 *stats->end_marker = HeapStats::kEndMarker;
5913 *stats->new_space_size = new_space_.SizeAsInt();
5914 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5915 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5916 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5917 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5918 *stats->old_data_space_capacity = old_data_space_->Capacity();
5919 *stats->code_space_size = code_space_->SizeOfObjects();
5920 *stats->code_space_capacity = code_space_->Capacity();
5921 *stats->map_space_size = map_space_->SizeOfObjects();
5922 *stats->map_space_capacity = map_space_->Capacity();
5923 *stats->cell_space_size = cell_space_->SizeOfObjects();
5924 *stats->cell_space_capacity = cell_space_->Capacity();
5925 *stats->lo_space_size = lo_space_->Size();
5926 isolate_->global_handles()->RecordStats(stats);
5927 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5928 *stats->memory_allocator_capacity =
5929 isolate()->memory_allocator()->Size() +
5930 isolate()->memory_allocator()->Available();
5931 *stats->os_error = OS::GetLastError();
5932 isolate()->memory_allocator()->Available();
5933 if (take_snapshot) {
5934 HeapIterator iterator;
5935 for (HeapObject* obj = iterator.next();
5937 obj = iterator.next()) {
5938 InstanceType type = obj->map()->instance_type();
5939 ASSERT(0 <= type && type <= LAST_TYPE);
5940 stats->objects_per_type[type]++;
5941 stats->size_per_type[type] += obj->Size();
5947 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5948 return old_pointer_space_->SizeOfObjects()
5949 + old_data_space_->SizeOfObjects()
5950 + code_space_->SizeOfObjects()
5951 + map_space_->SizeOfObjects()
5952 + cell_space_->SizeOfObjects()
5953 + lo_space_->SizeOfObjects();
5957 intptr_t Heap::PromotedExternalMemorySize() {
5958 if (amount_of_external_allocated_memory_
5959 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5960 return amount_of_external_allocated_memory_
5961 - amount_of_external_allocated_memory_at_last_global_gc_;
5966 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5967 static const int kMarkTag = 2;
5970 class HeapDebugUtils {
5972 explicit HeapDebugUtils(Heap* heap)
5973 : search_for_any_global_(false),
5974 search_target_(NULL),
5975 found_target_(false),
5980 class MarkObjectVisitor : public ObjectVisitor {
5982 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5984 void VisitPointers(Object** start, Object** end) {
5985 // Copy all HeapObject pointers in [start, end)
5986 for (Object** p = start; p < end; p++) {
5987 if ((*p)->IsHeapObject())
5988 utils_->MarkObjectRecursively(p);
5992 HeapDebugUtils* utils_;
5995 void MarkObjectRecursively(Object** p) {
5996 if (!(*p)->IsHeapObject()) return;
5998 HeapObject* obj = HeapObject::cast(*p);
6000 Object* map = obj->map();
6002 if (!map->IsHeapObject()) return; // visited before
6004 if (found_target_) return; // stop if target found
6005 object_stack_.Add(obj);
6006 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
6007 (!search_for_any_global_ && (obj == search_target_))) {
6008 found_target_ = true;
6013 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6015 Address map_addr = map_p->address();
6017 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6019 MarkObjectRecursively(&map);
6021 MarkObjectVisitor mark_visitor(this);
6023 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
6026 if (!found_target_) // don't pop if found the target
6027 object_stack_.RemoveLast();
6031 class UnmarkObjectVisitor : public ObjectVisitor {
6033 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
6035 void VisitPointers(Object** start, Object** end) {
6036 // Copy all HeapObject pointers in [start, end)
6037 for (Object** p = start; p < end; p++) {
6038 if ((*p)->IsHeapObject())
6039 utils_->UnmarkObjectRecursively(p);
6043 HeapDebugUtils* utils_;
6047 void UnmarkObjectRecursively(Object** p) {
6048 if (!(*p)->IsHeapObject()) return;
6050 HeapObject* obj = HeapObject::cast(*p);
6052 Object* map = obj->map();
6054 if (map->IsHeapObject()) return; // unmarked already
6056 Address map_addr = reinterpret_cast<Address>(map);
6058 map_addr -= kMarkTag;
6060 ASSERT_TAG_ALIGNED(map_addr);
6062 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6064 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6066 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
6068 UnmarkObjectVisitor unmark_visitor(this);
6070 obj->IterateBody(Map::cast(map_p)->instance_type(),
6071 obj->SizeFromMap(Map::cast(map_p)),
6076 void MarkRootObjectRecursively(Object** root) {
6077 if (search_for_any_global_) {
6078 ASSERT(search_target_ == NULL);
6080 ASSERT(search_target_->IsHeapObject());
6082 found_target_ = false;
6083 object_stack_.Clear();
6085 MarkObjectRecursively(root);
6086 UnmarkObjectRecursively(root);
6088 if (found_target_) {
6089 PrintF("=====================================\n");
6090 PrintF("==== Path to object ====\n");
6091 PrintF("=====================================\n\n");
6093 ASSERT(!object_stack_.is_empty());
6094 for (int i = 0; i < object_stack_.length(); i++) {
6095 if (i > 0) PrintF("\n |\n |\n V\n\n");
6096 Object* obj = object_stack_[i];
6099 PrintF("=====================================\n");
6103 // Helper class for visiting HeapObjects recursively.
6104 class MarkRootVisitor: public ObjectVisitor {
6106 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
6108 void VisitPointers(Object** start, Object** end) {
6109 // Visit all HeapObject pointers in [start, end)
6110 for (Object** p = start; p < end; p++) {
6111 if ((*p)->IsHeapObject())
6112 utils_->MarkRootObjectRecursively(p);
6116 HeapDebugUtils* utils_;
6119 bool search_for_any_global_;
6120 Object* search_target_;
6122 List<Object*> object_stack_;
6131 V8_DECLARE_ONCE(initialize_gc_once);
6133 static void InitializeGCOnce() {
6134 InitializeScavengingVisitorsTables();
6135 NewSpaceScavenger::Initialize();
6136 MarkCompactCollector::Initialize();
6139 bool Heap::SetUp(bool create_heap_objects) {
6141 allocation_timeout_ = FLAG_gc_interval;
6142 debug_utils_ = new HeapDebugUtils(this);
6145 // Initialize heap spaces and initial maps and objects. Whenever something
6146 // goes wrong, just return false. The caller should check the results and
6147 // call Heap::TearDown() to release allocated memory.
6149 // If the heap is not yet configured (e.g. through the API), configure it.
6150 // Configuration is based on the flags new-space-size (really the semispace
6151 // size) and old-space-size if set or the initial values of semispace_size_
6152 // and old_generation_size_ otherwise.
6154 if (!ConfigureHeapDefault()) return false;
6157 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6159 MarkMapPointersAsEncoded(false);
6161 // Set up memory allocator.
6162 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6165 // Set up new space.
6166 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6170 // Initialize old pointer space.
6171 old_pointer_space_ =
6173 max_old_generation_size_,
6176 if (old_pointer_space_ == NULL) return false;
6177 if (!old_pointer_space_->SetUp()) return false;
6179 // Initialize old data space.
6182 max_old_generation_size_,
6185 if (old_data_space_ == NULL) return false;
6186 if (!old_data_space_->SetUp()) return false;
6188 // Initialize the code space, set its maximum capacity to the old
6189 // generation size. It needs executable memory.
6190 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6191 // virtual address space, so that they can call each other with near calls.
6192 if (code_range_size_ > 0) {
6193 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6199 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6200 if (code_space_ == NULL) return false;
6201 if (!code_space_->SetUp()) return false;
6203 // Initialize map space.
6204 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6205 if (map_space_ == NULL) return false;
6206 if (!map_space_->SetUp()) return false;
6208 // Initialize global property cell space.
6209 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6210 if (cell_space_ == NULL) return false;
6211 if (!cell_space_->SetUp()) return false;
6213 // The large object code space may contain code or data. We set the memory
6214 // to be non-executable here for safety, but this means we need to enable it
6215 // explicitly when allocating large code objects.
6216 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6217 if (lo_space_ == NULL) return false;
6218 if (!lo_space_->SetUp()) return false;
6220 // Set up the seed that is used to randomize the string hash function.
6221 ASSERT(hash_seed() == 0);
6222 if (FLAG_randomize_hashes) {
6223 if (FLAG_hash_seed == 0) {
6225 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6227 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6231 if (create_heap_objects) {
6232 // Create initial maps.
6233 if (!CreateInitialMaps()) return false;
6234 if (!CreateApiObjects()) return false;
6236 // Create initial objects
6237 if (!CreateInitialObjects()) return false;
6239 native_contexts_list_ = undefined_value();
6242 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6243 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6245 store_buffer()->SetUp();
6247 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6253 void Heap::SetStackLimits() {
6254 ASSERT(isolate_ != NULL);
6255 ASSERT(isolate_ == isolate());
6256 // On 64 bit machines, pointers are generally out of range of Smis. We write
6257 // something that looks like an out of range Smi to the GC.
6259 // Set up the special root array entries containing the stack limits.
6260 // These are actually addresses, but the tag makes the GC ignore it.
6261 roots_[kStackLimitRootIndex] =
6262 reinterpret_cast<Object*>(
6263 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6264 roots_[kRealStackLimitRootIndex] =
6265 reinterpret_cast<Object*>(
6266 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6270 void Heap::TearDown() {
6272 if (FLAG_verify_heap) {
6277 if (FLAG_print_cumulative_gc_stat) {
6279 PrintF("gc_count=%d ", gc_count_);
6280 PrintF("mark_sweep_count=%d ", ms_count_);
6281 PrintF("max_gc_pause=%d ", get_max_gc_pause());
6282 PrintF("total_gc_time=%d ", total_gc_time_ms_);
6283 PrintF("min_in_mutator=%d ", get_min_in_mutator());
6284 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6285 get_max_alive_after_gc());
6289 isolate_->global_handles()->TearDown();
6291 external_string_table_.TearDown();
6293 new_space_.TearDown();
6295 if (old_pointer_space_ != NULL) {
6296 old_pointer_space_->TearDown();
6297 delete old_pointer_space_;
6298 old_pointer_space_ = NULL;
6301 if (old_data_space_ != NULL) {
6302 old_data_space_->TearDown();
6303 delete old_data_space_;
6304 old_data_space_ = NULL;
6307 if (code_space_ != NULL) {
6308 code_space_->TearDown();
6313 if (map_space_ != NULL) {
6314 map_space_->TearDown();
6319 if (cell_space_ != NULL) {
6320 cell_space_->TearDown();
6325 if (lo_space_ != NULL) {
6326 lo_space_->TearDown();
6331 store_buffer()->TearDown();
6332 incremental_marking()->TearDown();
6334 isolate_->memory_allocator()->TearDown();
6336 delete relocation_mutex_;
6339 delete debug_utils_;
6340 debug_utils_ = NULL;
6345 void Heap::Shrink() {
6346 // Try to shrink all paged spaces.
6348 for (PagedSpace* space = spaces.next();
6350 space = spaces.next()) {
6351 space->ReleaseAllUnusedPages();
6356 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6357 ASSERT(callback != NULL);
6358 GCPrologueCallbackPair pair(callback, gc_type);
6359 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6360 return gc_prologue_callbacks_.Add(pair);
6364 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6365 ASSERT(callback != NULL);
6366 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6367 if (gc_prologue_callbacks_[i].callback == callback) {
6368 gc_prologue_callbacks_.Remove(i);
6376 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6377 ASSERT(callback != NULL);
6378 GCEpilogueCallbackPair pair(callback, gc_type);
6379 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6380 return gc_epilogue_callbacks_.Add(pair);
6384 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6385 ASSERT(callback != NULL);
6386 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6387 if (gc_epilogue_callbacks_[i].callback == callback) {
6388 gc_epilogue_callbacks_.Remove(i);
6398 class PrintHandleVisitor: public ObjectVisitor {
6400 void VisitPointers(Object** start, Object** end) {
6401 for (Object** p = start; p < end; p++)
6402 PrintF(" handle %p to %p\n",
6403 reinterpret_cast<void*>(p),
6404 reinterpret_cast<void*>(*p));
6408 void Heap::PrintHandles() {
6409 PrintF("Handles:\n");
6410 PrintHandleVisitor v;
6411 isolate_->handle_scope_implementer()->Iterate(&v);
6417 Space* AllSpaces::next() {
6418 switch (counter_++) {
6420 return HEAP->new_space();
6421 case OLD_POINTER_SPACE:
6422 return HEAP->old_pointer_space();
6423 case OLD_DATA_SPACE:
6424 return HEAP->old_data_space();
6426 return HEAP->code_space();
6428 return HEAP->map_space();
6430 return HEAP->cell_space();
6432 return HEAP->lo_space();
6439 PagedSpace* PagedSpaces::next() {
6440 switch (counter_++) {
6441 case OLD_POINTER_SPACE:
6442 return HEAP->old_pointer_space();
6443 case OLD_DATA_SPACE:
6444 return HEAP->old_data_space();
6446 return HEAP->code_space();
6448 return HEAP->map_space();
6450 return HEAP->cell_space();
6458 OldSpace* OldSpaces::next() {
6459 switch (counter_++) {
6460 case OLD_POINTER_SPACE:
6461 return HEAP->old_pointer_space();
6462 case OLD_DATA_SPACE:
6463 return HEAP->old_data_space();
6465 return HEAP->code_space();
6472 SpaceIterator::SpaceIterator()
6473 : current_space_(FIRST_SPACE),
6479 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
6480 : current_space_(FIRST_SPACE),
6482 size_func_(size_func) {
6486 SpaceIterator::~SpaceIterator() {
6487 // Delete active iterator if any.
6492 bool SpaceIterator::has_next() {
6493 // Iterate until no more spaces.
6494 return current_space_ != LAST_SPACE;
6498 ObjectIterator* SpaceIterator::next() {
6499 if (iterator_ != NULL) {
6502 // Move to the next space
6504 if (current_space_ > LAST_SPACE) {
6509 // Return iterator for the new current space.
6510 return CreateIterator();
6514 // Create an iterator for the space to iterate.
6515 ObjectIterator* SpaceIterator::CreateIterator() {
6516 ASSERT(iterator_ == NULL);
6518 switch (current_space_) {
6520 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
6522 case OLD_POINTER_SPACE:
6523 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
6525 case OLD_DATA_SPACE:
6526 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
6529 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
6532 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
6535 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
6538 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
6542 // Return the newly allocated iterator;
6543 ASSERT(iterator_ != NULL);
6548 class HeapObjectsFilter {
6550 virtual ~HeapObjectsFilter() {}
6551 virtual bool SkipObject(HeapObject* object) = 0;
6555 class UnreachableObjectsFilter : public HeapObjectsFilter {
6557 UnreachableObjectsFilter() {
6558 MarkReachableObjects();
6561 ~UnreachableObjectsFilter() {
6562 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6565 bool SkipObject(HeapObject* object) {
6566 MarkBit mark_bit = Marking::MarkBitFrom(object);
6567 return !mark_bit.Get();
6571 class MarkingVisitor : public ObjectVisitor {
6573 MarkingVisitor() : marking_stack_(10) {}
6575 void VisitPointers(Object** start, Object** end) {
6576 for (Object** p = start; p < end; p++) {
6577 if (!(*p)->IsHeapObject()) continue;
6578 HeapObject* obj = HeapObject::cast(*p);
6579 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6580 if (!mark_bit.Get()) {
6582 marking_stack_.Add(obj);
6587 void TransitiveClosure() {
6588 while (!marking_stack_.is_empty()) {
6589 HeapObject* obj = marking_stack_.RemoveLast();
6595 List<HeapObject*> marking_stack_;
6598 void MarkReachableObjects() {
6599 Heap* heap = Isolate::Current()->heap();
6600 MarkingVisitor visitor;
6601 heap->IterateRoots(&visitor, VISIT_ALL);
6602 visitor.TransitiveClosure();
6605 AssertNoAllocation no_alloc;
6609 HeapIterator::HeapIterator()
6610 : filtering_(HeapIterator::kNoFiltering),
6616 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6617 : filtering_(filtering),
6623 HeapIterator::~HeapIterator() {
6628 void HeapIterator::Init() {
6629 // Start the iteration.
6630 space_iterator_ = new SpaceIterator;
6631 switch (filtering_) {
6632 case kFilterUnreachable:
6633 filter_ = new UnreachableObjectsFilter;
6638 object_iterator_ = space_iterator_->next();
6642 void HeapIterator::Shutdown() {
6644 // Assert that in filtering mode we have iterated through all
6645 // objects. Otherwise, heap will be left in an inconsistent state.
6646 if (filtering_ != kNoFiltering) {
6647 ASSERT(object_iterator_ == NULL);
6650 // Make sure the last iterator is deallocated.
6651 delete space_iterator_;
6652 space_iterator_ = NULL;
6653 object_iterator_ = NULL;
6659 HeapObject* HeapIterator::next() {
6660 if (filter_ == NULL) return NextObject();
6662 HeapObject* obj = NextObject();
6663 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6668 HeapObject* HeapIterator::NextObject() {
6669 // No iterator means we are done.
6670 if (object_iterator_ == NULL) return NULL;
6672 if (HeapObject* obj = object_iterator_->next_object()) {
6673 // If the current iterator has more objects we are fine.
6676 // Go though the spaces looking for one that has objects.
6677 while (space_iterator_->has_next()) {
6678 object_iterator_ = space_iterator_->next();
6679 if (HeapObject* obj = object_iterator_->next_object()) {
6684 // Done with the last space.
6685 object_iterator_ = NULL;
6690 void HeapIterator::reset() {
6691 // Restart the iterator.
6697 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6699 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6701 class PathTracer::MarkVisitor: public ObjectVisitor {
6703 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6704 void VisitPointers(Object** start, Object** end) {
6705 // Scan all HeapObject pointers in [start, end)
6706 for (Object** p = start; !tracer_->found() && (p < end); p++) {
6707 if ((*p)->IsHeapObject())
6708 tracer_->MarkRecursively(p, this);
6713 PathTracer* tracer_;
6717 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6719 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6720 void VisitPointers(Object** start, Object** end) {
6721 // Scan all HeapObject pointers in [start, end)
6722 for (Object** p = start; p < end; p++) {
6723 if ((*p)->IsHeapObject())
6724 tracer_->UnmarkRecursively(p, this);
6729 PathTracer* tracer_;
6733 void PathTracer::VisitPointers(Object** start, Object** end) {
6734 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6735 // Visit all HeapObject pointers in [start, end)
6736 for (Object** p = start; !done && (p < end); p++) {
6737 if ((*p)->IsHeapObject()) {
6739 done = ((what_to_find_ == FIND_FIRST) && found_target_);
6745 void PathTracer::Reset() {
6746 found_target_ = false;
6747 object_stack_.Clear();
6751 void PathTracer::TracePathFrom(Object** root) {
6752 ASSERT((search_target_ == kAnyGlobalObject) ||
6753 search_target_->IsHeapObject());
6754 found_target_in_trace_ = false;
6757 MarkVisitor mark_visitor(this);
6758 MarkRecursively(root, &mark_visitor);
6760 UnmarkVisitor unmark_visitor(this);
6761 UnmarkRecursively(root, &unmark_visitor);
6767 static bool SafeIsNativeContext(HeapObject* obj) {
6768 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6772 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6773 if (!(*p)->IsHeapObject()) return;
6775 HeapObject* obj = HeapObject::cast(*p);
6777 Object* map = obj->map();
6779 if (!map->IsHeapObject()) return; // visited before
6781 if (found_target_in_trace_) return; // stop if target found
6782 object_stack_.Add(obj);
6783 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6784 (obj == search_target_)) {
6785 found_target_in_trace_ = true;
6786 found_target_ = true;
6790 bool is_native_context = SafeIsNativeContext(obj);
6793 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6795 Address map_addr = map_p->address();
6797 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6799 // Scan the object body.
6800 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6801 // This is specialized to scan Context's properly.
6802 Object** start = reinterpret_cast<Object**>(obj->address() +
6803 Context::kHeaderSize);
6804 Object** end = reinterpret_cast<Object**>(obj->address() +
6805 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6806 mark_visitor->VisitPointers(start, end);
6808 obj->IterateBody(map_p->instance_type(),
6809 obj->SizeFromMap(map_p),
6813 // Scan the map after the body because the body is a lot more interesting
6814 // when doing leak detection.
6815 MarkRecursively(&map, mark_visitor);
6817 if (!found_target_in_trace_) // don't pop if found the target
6818 object_stack_.RemoveLast();
6822 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6823 if (!(*p)->IsHeapObject()) return;
6825 HeapObject* obj = HeapObject::cast(*p);
6827 Object* map = obj->map();
6829 if (map->IsHeapObject()) return; // unmarked already
6831 Address map_addr = reinterpret_cast<Address>(map);
6833 map_addr -= kMarkTag;
6835 ASSERT_TAG_ALIGNED(map_addr);
6837 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6839 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6841 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6843 obj->IterateBody(Map::cast(map_p)->instance_type(),
6844 obj->SizeFromMap(Map::cast(map_p)),
6849 void PathTracer::ProcessResults() {
6850 if (found_target_) {
6851 PrintF("=====================================\n");
6852 PrintF("==== Path to object ====\n");
6853 PrintF("=====================================\n\n");
6855 ASSERT(!object_stack_.is_empty());
6856 for (int i = 0; i < object_stack_.length(); i++) {
6857 if (i > 0) PrintF("\n |\n |\n V\n\n");
6858 Object* obj = object_stack_[i];
6861 PrintF("=====================================\n");
6864 #endif // DEBUG || LIVE_OBJECT_LIST
6868 // Triggers a depth-first traversal of reachable objects from one
6869 // given root object and finds a path to a specific heap object and
6871 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6872 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6873 tracer.VisitPointer(&root);
6877 // Triggers a depth-first traversal of reachable objects from roots
6878 // and finds a path to a specific heap object and prints it.
6879 void Heap::TracePathToObject(Object* target) {
6880 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6881 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6885 // Triggers a depth-first traversal of reachable objects from roots
6886 // and finds a path to any global object and prints it. Useful for
6887 // determining the source for leaks of global objects.
6888 void Heap::TracePathToGlobal() {
6889 PathTracer tracer(PathTracer::kAnyGlobalObject,
6890 PathTracer::FIND_ALL,
6892 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6897 static intptr_t CountTotalHolesSize() {
6898 intptr_t holes_size = 0;
6900 for (OldSpace* space = spaces.next();
6902 space = spaces.next()) {
6903 holes_size += space->Waste() + space->Available();
6909 GCTracer::GCTracer(Heap* heap,
6910 const char* gc_reason,
6911 const char* collector_reason)
6913 start_object_size_(0),
6914 start_memory_size_(0),
6917 allocated_since_last_gc_(0),
6918 spent_in_mutator_(0),
6919 promoted_objects_size_(0),
6921 gc_reason_(gc_reason),
6922 collector_reason_(collector_reason) {
6923 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6924 start_time_ = OS::TimeCurrentMillis();
6925 start_object_size_ = heap_->SizeOfObjects();
6926 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6928 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6932 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6934 allocated_since_last_gc_ =
6935 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6937 if (heap_->last_gc_end_timestamp_ > 0) {
6938 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6941 steps_count_ = heap_->incremental_marking()->steps_count();
6942 steps_took_ = heap_->incremental_marking()->steps_took();
6943 longest_step_ = heap_->incremental_marking()->longest_step();
6944 steps_count_since_last_gc_ =
6945 heap_->incremental_marking()->steps_count_since_last_gc();
6946 steps_took_since_last_gc_ =
6947 heap_->incremental_marking()->steps_took_since_last_gc();
6951 GCTracer::~GCTracer() {
6952 // Printf ONE line iff flag is set.
6953 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6955 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6957 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6958 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6960 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6962 // Update cumulative GC statistics if required.
6963 if (FLAG_print_cumulative_gc_stat) {
6964 heap_->total_gc_time_ms_ += time;
6965 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6966 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6967 heap_->alive_after_last_gc_);
6969 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6970 static_cast<int>(spent_in_mutator_));
6972 } else if (FLAG_trace_gc_verbose) {
6973 heap_->total_gc_time_ms_ += time;
6976 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6978 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6980 if (!FLAG_trace_gc_nvp) {
6981 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6983 double end_memory_size_mb =
6984 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6986 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6988 static_cast<double>(start_object_size_) / MB,
6989 static_cast<double>(start_memory_size_) / MB,
6990 SizeOfHeapObjects(),
6991 end_memory_size_mb);
6993 if (external_time > 0) PrintF("%d / ", external_time);
6994 PrintF("%d ms", time);
6995 if (steps_count_ > 0) {
6996 if (collector_ == SCAVENGER) {
6997 PrintF(" (+ %d ms in %d steps since last GC)",
6998 static_cast<int>(steps_took_since_last_gc_),
6999 steps_count_since_last_gc_);
7001 PrintF(" (+ %d ms in %d steps since start of marking, "
7002 "biggest step %f ms)",
7003 static_cast<int>(steps_took_),
7009 if (gc_reason_ != NULL) {
7010 PrintF(" [%s]", gc_reason_);
7013 if (collector_reason_ != NULL) {
7014 PrintF(" [%s]", collector_reason_);
7019 PrintF("pause=%d ", time);
7020 PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
7022 switch (collector_) {
7026 case MARK_COMPACTOR:
7034 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
7035 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
7036 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
7037 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
7038 PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
7039 PrintF("new_new=%d ",
7040 static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
7041 PrintF("root_new=%d ",
7042 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
7043 PrintF("old_new=%d ",
7044 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
7045 PrintF("compaction_ptrs=%d ",
7046 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
7047 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
7048 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
7049 PrintF("misc_compaction=%d ",
7050 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
7052 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7053 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7054 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7055 in_free_list_or_wasted_before_gc_);
7056 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
7058 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7059 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7061 if (collector_ == SCAVENGER) {
7062 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7063 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
7065 PrintF("stepscount=%d ", steps_count_);
7066 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
7072 heap_->PrintShortHeapStatistics();
7076 const char* GCTracer::CollectorString() {
7077 switch (collector_) {
7080 case MARK_COMPACTOR:
7081 return "Mark-sweep";
7083 return "Unknown GC";
7087 int KeyedLookupCache::Hash(Map* map, String* name) {
7088 // Uses only lower 32 bits if pointers are larger.
7089 uintptr_t addr_hash =
7090 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7091 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7095 int KeyedLookupCache::Lookup(Map* map, String* name) {
7096 int index = (Hash(map, name) & kHashMask);
7097 for (int i = 0; i < kEntriesPerBucket; i++) {
7098 Key& key = keys_[index + i];
7099 if ((key.map == map) && key.name->Equals(name)) {
7100 return field_offsets_[index + i];
7107 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
7109 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
7110 int index = (Hash(map, symbol) & kHashMask);
7111 // After a GC there will be free slots, so we use them in order (this may
7112 // help to get the most frequently used one in position 0).
7113 for (int i = 0; i< kEntriesPerBucket; i++) {
7114 Key& key = keys_[index];
7115 Object* free_entry_indicator = NULL;
7116 if (key.map == free_entry_indicator) {
7119 field_offsets_[index + i] = field_offset;
7123 // No free entry found in this bucket, so we move them all down one and
7124 // put the new entry at position zero.
7125 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7126 Key& key = keys_[index + i];
7127 Key& key2 = keys_[index + i - 1];
7129 field_offsets_[index + i] = field_offsets_[index + i - 1];
7132 // Write the new first entry.
7133 Key& key = keys_[index];
7136 field_offsets_[index] = field_offset;
7141 void KeyedLookupCache::Clear() {
7142 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7146 void DescriptorLookupCache::Clear() {
7147 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7152 void Heap::GarbageCollectionGreedyCheck() {
7153 ASSERT(FLAG_gc_greedy);
7154 if (isolate_->bootstrapper()->IsActive()) return;
7155 if (disallow_allocation_failure()) return;
7156 CollectGarbage(NEW_SPACE);
7161 TranscendentalCache::SubCache::SubCache(Type t)
7163 isolate_(Isolate::Current()) {
7164 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7165 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7166 for (int i = 0; i < kCacheSize; i++) {
7167 elements_[i].in[0] = in0;
7168 elements_[i].in[1] = in1;
7169 elements_[i].output = NULL;
7174 void TranscendentalCache::Clear() {
7175 for (int i = 0; i < kNumberOfCaches; i++) {
7176 if (caches_[i] != NULL) {
7184 void ExternalStringTable::CleanUp() {
7186 for (int i = 0; i < new_space_strings_.length(); ++i) {
7187 if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7190 if (heap_->InNewSpace(new_space_strings_[i])) {
7191 new_space_strings_[last++] = new_space_strings_[i];
7193 old_space_strings_.Add(new_space_strings_[i]);
7196 new_space_strings_.Rewind(last);
7198 for (int i = 0; i < old_space_strings_.length(); ++i) {
7199 if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7202 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7203 old_space_strings_[last++] = old_space_strings_[i];
7205 old_space_strings_.Rewind(last);
7207 if (FLAG_verify_heap) {
7214 void ExternalStringTable::TearDown() {
7215 new_space_strings_.Free();
7216 old_space_strings_.Free();
7220 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7221 chunk->set_next_chunk(chunks_queued_for_free_);
7222 chunks_queued_for_free_ = chunk;
7226 void Heap::FreeQueuedChunks() {
7227 if (chunks_queued_for_free_ == NULL) return;
7230 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7231 next = chunk->next_chunk();
7232 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7234 if (chunk->owner()->identity() == LO_SPACE) {
7235 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7236 // If FromAnyPointerAddress encounters a slot that belongs to a large
7237 // chunk queued for deletion it will fail to find the chunk because
7238 // it try to perform a search in the list of pages owned by of the large
7239 // object space and queued chunks were detached from that list.
7240 // To work around this we split large chunk into normal kPageSize aligned
7241 // pieces and initialize size, owner and flags field of every piece.
7242 // If FromAnyPointerAddress encounters a slot that belongs to one of
7243 // these smaller pieces it will treat it as a slot on a normal Page.
7244 Address chunk_end = chunk->address() + chunk->size();
7245 MemoryChunk* inner = MemoryChunk::FromAddress(
7246 chunk->address() + Page::kPageSize);
7247 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7248 while (inner <= inner_last) {
7249 // Size of a large chunk is always a multiple of
7250 // OS::AllocateAlignment() so there is always
7251 // enough space for a fake MemoryChunk header.
7252 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7253 // Guard against overflow.
7254 if (area_end < inner->address()) area_end = chunk_end;
7255 inner->SetArea(inner->address(), area_end);
7256 inner->set_size(Page::kPageSize);
7257 inner->set_owner(lo_space());
7258 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7259 inner = MemoryChunk::FromAddress(
7260 inner->address() + Page::kPageSize);
7264 isolate_->heap()->store_buffer()->Compact();
7265 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7266 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7267 next = chunk->next_chunk();
7268 isolate_->memory_allocator()->Free(chunk);
7270 chunks_queued_for_free_ = NULL;
7274 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7275 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7276 // Tag the page pointer to make it findable in the dump file.
7278 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7280 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7282 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7283 reinterpret_cast<Address>(p);
7284 remembered_unmapped_pages_index_++;
7285 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7289 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7290 memset(object_counts_, 0, sizeof(object_counts_));
7291 memset(object_sizes_, 0, sizeof(object_sizes_));
7292 if (clear_last_time_stats) {
7293 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7294 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7299 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7302 void Heap::CheckpointObjectStats() {
7303 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7304 Counters* counters = isolate()->counters();
7305 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7306 counters->count_of_##name()->Increment( \
7307 static_cast<int>(object_counts_[name])); \
7308 counters->count_of_##name()->Decrement( \
7309 static_cast<int>(object_counts_last_time_[name])); \
7310 counters->size_of_##name()->Increment( \
7311 static_cast<int>(object_sizes_[name])); \
7312 counters->size_of_##name()->Decrement( \
7313 static_cast<int>(object_sizes_last_time_[name]));
7314 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7315 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7317 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7318 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7319 counters->count_of_CODE_TYPE_##name()->Increment( \
7320 static_cast<int>(object_counts_[index])); \
7321 counters->count_of_CODE_TYPE_##name()->Decrement( \
7322 static_cast<int>(object_counts_last_time_[index])); \
7323 counters->size_of_CODE_TYPE_##name()->Increment( \
7324 static_cast<int>(object_sizes_[index])); \
7325 counters->size_of_CODE_TYPE_##name()->Decrement( \
7326 static_cast<int>(object_sizes_last_time_[index]));
7327 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7328 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7329 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7330 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7331 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7332 static_cast<int>(object_counts_[index])); \
7333 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7334 static_cast<int>(object_counts_last_time_[index])); \
7335 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7336 static_cast<int>(object_sizes_[index])); \
7337 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7338 static_cast<int>(object_sizes_last_time_[index]));
7339 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7340 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7342 memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7343 memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7347 } } // namespace v8::internal