1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "vm-state-inl.h"
51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
52 #include "regexp-macro-assembler.h"
53 #include "arm/regexp-macro-assembler-arm.h"
55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
56 #include "regexp-macro-assembler.h"
57 #include "mips/regexp-macro-assembler-mips.h"
64 static Mutex* gc_initializer_mutex = OS::CreateMutex();
69 // semispace_size_ should be a power of 2 and old_generation_size_ should be
70 // a multiple of Page::kPageSize.
72 #define LUMP_OF_MEMORY (128 * KB)
74 #elif defined(V8_TARGET_ARCH_X64)
75 #define LUMP_OF_MEMORY (2 * MB)
76 code_range_size_(512*MB),
78 #define LUMP_OF_MEMORY MB
81 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83 initial_semispace_size_(Page::kPageSize),
84 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
85 max_executable_size_(128l * LUMP_OF_MEMORY),
87 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
89 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size.
91 survived_since_last_expansion_(0),
93 always_allocate_scope_depth_(0),
94 linear_allocation_scope_depth_(0),
95 contexts_disposed_(0),
96 scan_on_scavenge_pages_(0),
98 old_pointer_space_(NULL),
99 old_data_space_(NULL),
104 gc_state_(NOT_IN_GC),
105 gc_post_processing_depth_(0),
108 unflattened_strings_length_(0),
110 allocation_allowed_(true),
111 allocation_timeout_(0),
112 disallow_allocation_failure_(false),
115 new_space_high_promotion_mode_active_(false),
116 old_gen_promotion_limit_(kMinimumPromotionLimit),
117 old_gen_allocation_limit_(kMinimumAllocationLimit),
118 old_gen_limit_factor_(1),
119 size_of_old_gen_at_last_old_space_gc_(0),
120 external_allocation_limit_(0),
121 amount_of_external_allocated_memory_(0),
122 amount_of_external_allocated_memory_at_last_global_gc_(0),
123 old_gen_exhausted_(false),
124 store_buffer_rebuilder_(store_buffer()),
125 hidden_symbol_(NULL),
126 global_gc_prologue_callback_(NULL),
127 global_gc_epilogue_callback_(NULL),
128 gc_safe_size_of_old_object_(NULL),
129 total_regexp_code_generated_(0),
131 young_survivors_after_last_gc_(0),
132 high_survival_rate_period_length_(0),
134 previous_survival_rate_trend_(Heap::STABLE),
135 survival_rate_trend_(Heap::STABLE),
137 max_alive_after_gc_(0),
138 min_in_mutator_(kMaxInt),
139 alive_after_last_gc_(0),
140 last_gc_end_timestamp_(0.0),
143 incremental_marking_(this),
144 number_idle_notifications_(0),
145 last_idle_notification_gc_count_(0),
146 last_idle_notification_gc_count_init_(false),
147 idle_notification_will_schedule_next_gc_(false),
148 mark_sweeps_since_idle_round_started_(0),
149 ms_count_at_last_idle_notification_(0),
150 gc_count_at_last_idle_gc_(0),
151 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
152 promotion_queue_(this),
154 chunks_queued_for_free_(NULL) {
155 // Allow build-time customization of the max semispace size. Building
156 // V8 with snapshots and a non-default max semispace size is much
157 // easier if you can define it as part of the build environment.
158 #if defined(V8_MAX_SEMISPACE_SIZE)
159 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
162 intptr_t max_virtual = OS::MaxVirtualMemory();
164 if (max_virtual > 0) {
165 if (code_range_size_ > 0) {
166 // Reserve no more than 1/8 of the memory for the code range.
167 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
171 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
172 global_contexts_list_ = NULL;
173 mark_compact_collector_.heap_ = this;
174 external_string_table_.heap_ = this;
178 intptr_t Heap::Capacity() {
179 if (!HasBeenSetUp()) return 0;
181 return new_space_.Capacity() +
182 old_pointer_space_->Capacity() +
183 old_data_space_->Capacity() +
184 code_space_->Capacity() +
185 map_space_->Capacity() +
186 cell_space_->Capacity();
190 intptr_t Heap::CommittedMemory() {
191 if (!HasBeenSetUp()) return 0;
193 return new_space_.CommittedMemory() +
194 old_pointer_space_->CommittedMemory() +
195 old_data_space_->CommittedMemory() +
196 code_space_->CommittedMemory() +
197 map_space_->CommittedMemory() +
198 cell_space_->CommittedMemory() +
202 intptr_t Heap::CommittedMemoryExecutable() {
203 if (!HasBeenSetUp()) return 0;
205 return isolate()->memory_allocator()->SizeExecutable();
209 intptr_t Heap::Available() {
210 if (!HasBeenSetUp()) return 0;
212 return new_space_.Available() +
213 old_pointer_space_->Available() +
214 old_data_space_->Available() +
215 code_space_->Available() +
216 map_space_->Available() +
217 cell_space_->Available();
221 bool Heap::HasBeenSetUp() {
222 return old_pointer_space_ != NULL &&
223 old_data_space_ != NULL &&
224 code_space_ != NULL &&
225 map_space_ != NULL &&
226 cell_space_ != NULL &&
231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
232 if (IntrusiveMarking::IsMarked(object)) {
233 return IntrusiveMarking::SizeOfMarkedObject(object);
235 return object->SizeFromMap(object->map());
239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
240 // Is global GC requested?
241 if (space != NEW_SPACE || FLAG_gc_global) {
242 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
243 return MARK_COMPACTOR;
246 // Is enough data promoted to justify a global GC?
247 if (OldGenerationPromotionLimitReached()) {
248 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
249 return MARK_COMPACTOR;
252 // Have allocation in OLD and LO failed?
253 if (old_gen_exhausted_) {
254 isolate_->counters()->
255 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
256 return MARK_COMPACTOR;
259 // Is there enough space left in OLD to guarantee that a scavenge can
262 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
263 // for object promotion. It counts only the bytes that the memory
264 // allocator has not yet allocated from the OS and assigned to any space,
265 // and does not count available bytes already in the old space or code
266 // space. Undercounting is safe---we may get an unrequested full GC when
267 // a scavenge would have succeeded.
268 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
269 isolate_->counters()->
270 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
271 return MARK_COMPACTOR;
279 // TODO(1238405): Combine the infrastructure for --heap-stats and
280 // --log-gc to avoid the complicated preprocessor and flag testing.
281 void Heap::ReportStatisticsBeforeGC() {
282 // Heap::ReportHeapStatistics will also log NewSpace statistics when
283 // compiled --log-gc is set. The following logic is used to avoid
286 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
287 if (FLAG_heap_stats) {
288 ReportHeapStatistics("Before GC");
289 } else if (FLAG_log_gc) {
290 new_space_.ReportStatistics();
292 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
295 new_space_.CollectStatistics();
296 new_space_.ReportStatistics();
297 new_space_.ClearHistograms();
303 void Heap::PrintShortHeapStatistics() {
304 if (!FLAG_trace_gc_verbose) return;
305 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
306 ", available: %8" V8_PTR_PREFIX "d\n",
307 isolate_->memory_allocator()->Size(),
308 isolate_->memory_allocator()->Available());
309 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
310 ", available: %8" V8_PTR_PREFIX "d\n",
311 Heap::new_space_.Size(),
312 new_space_.Available());
313 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
314 ", available: %8" V8_PTR_PREFIX "d"
315 ", waste: %8" V8_PTR_PREFIX "d\n",
316 old_pointer_space_->Size(),
317 old_pointer_space_->Available(),
318 old_pointer_space_->Waste());
319 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
320 ", available: %8" V8_PTR_PREFIX "d"
321 ", waste: %8" V8_PTR_PREFIX "d\n",
322 old_data_space_->Size(),
323 old_data_space_->Available(),
324 old_data_space_->Waste());
325 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
326 ", available: %8" V8_PTR_PREFIX "d"
327 ", waste: %8" V8_PTR_PREFIX "d\n",
329 code_space_->Available(),
330 code_space_->Waste());
331 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
332 ", available: %8" V8_PTR_PREFIX "d"
333 ", waste: %8" V8_PTR_PREFIX "d\n",
335 map_space_->Available(),
336 map_space_->Waste());
337 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
338 ", available: %8" V8_PTR_PREFIX "d"
339 ", waste: %8" V8_PTR_PREFIX "d\n",
341 cell_space_->Available(),
342 cell_space_->Waste());
343 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
344 ", available: %8" V8_PTR_PREFIX "d\n",
346 lo_space_->Available());
350 // TODO(1238405): Combine the infrastructure for --heap-stats and
351 // --log-gc to avoid the complicated preprocessor and flag testing.
352 void Heap::ReportStatisticsAfterGC() {
353 // Similar to the before GC, we use some complicated logic to ensure that
354 // NewSpace statistics are logged exactly once when --log-gc is turned on.
356 if (FLAG_heap_stats) {
357 new_space_.CollectStatistics();
358 ReportHeapStatistics("After GC");
359 } else if (FLAG_log_gc) {
360 new_space_.ReportStatistics();
363 if (FLAG_log_gc) new_space_.ReportStatistics();
368 void Heap::GarbageCollectionPrologue() {
369 isolate_->transcendental_cache()->Clear();
370 ClearJSFunctionResultCaches();
372 unflattened_strings_length_ = 0;
374 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
375 allow_allocation(false);
377 if (FLAG_verify_heap) {
381 if (FLAG_gc_verbose) Print();
385 ReportStatisticsBeforeGC();
388 LiveObjectList::GCPrologue();
389 store_buffer()->GCPrologue();
392 intptr_t Heap::SizeOfObjects() {
395 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
396 total += space->SizeOfObjects();
401 void Heap::GarbageCollectionEpilogue() {
402 store_buffer()->GCEpilogue();
403 LiveObjectList::GCEpilogue();
405 allow_allocation(true);
408 if (FLAG_verify_heap) {
412 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
413 if (FLAG_print_handles) PrintHandles();
414 if (FLAG_gc_verbose) Print();
415 if (FLAG_code_stats) ReportCodeStatistics("After GC");
418 isolate_->counters()->alive_after_last_gc()->Set(
419 static_cast<int>(SizeOfObjects()));
421 isolate_->counters()->symbol_table_capacity()->Set(
422 symbol_table()->Capacity());
423 isolate_->counters()->number_of_symbols()->Set(
424 symbol_table()->NumberOfElements());
426 ReportStatisticsAfterGC();
428 #ifdef ENABLE_DEBUGGER_SUPPORT
429 isolate_->debug()->AfterGarbageCollection();
430 #endif // ENABLE_DEBUGGER_SUPPORT
434 void Heap::CollectAllGarbage(int flags) {
435 // Since we are ignoring the return value, the exact choice of space does
436 // not matter, so long as we do not specify NEW_SPACE, which would not
438 mark_compact_collector_.SetFlags(flags);
439 CollectGarbage(OLD_POINTER_SPACE);
440 mark_compact_collector_.SetFlags(kNoGCFlags);
444 void Heap::CollectAllAvailableGarbage() {
445 // Since we are ignoring the return value, the exact choice of space does
446 // not matter, so long as we do not specify NEW_SPACE, which would not
448 // Major GC would invoke weak handle callbacks on weakly reachable
449 // handles, but won't collect weakly reachable objects until next
450 // major GC. Therefore if we collect aggressively and weak handle callback
451 // has been invoked, we rerun major GC to release objects which become
453 // Note: as weak callbacks can execute arbitrary code, we cannot
454 // hope that eventually there will be no weak callbacks invocations.
455 // Therefore stop recollecting after several attempts.
456 mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
457 isolate_->compilation_cache()->Clear();
458 const int kMaxNumberOfAttempts = 7;
459 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
460 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
464 mark_compact_collector()->SetFlags(kNoGCFlags);
468 incremental_marking()->UncommitMarkingDeque();
472 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
473 // The VM is in the GC state until exiting this function.
474 VMState state(isolate_, GC);
477 // Reset the allocation timeout to the GC interval, but make sure to
478 // allow at least a few allocations after a collection. The reason
479 // for this is that we have a lot of allocation sequences and we
480 // assume that a garbage collection will allow the subsequent
481 // allocation attempts to go through.
482 allocation_timeout_ = Max(6, FLAG_gc_interval);
485 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
486 if (FLAG_trace_incremental_marking) {
487 PrintF("[IncrementalMarking] Scavenge during marking.\n");
491 if (collector == MARK_COMPACTOR &&
492 !mark_compact_collector()->PreciseSweepingRequired() &&
493 !incremental_marking()->IsStopped() &&
494 !incremental_marking()->should_hurry() &&
495 FLAG_incremental_marking_steps) {
496 if (FLAG_trace_incremental_marking) {
497 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
499 collector = SCAVENGER;
502 bool next_gc_likely_to_collect_more = false;
504 { GCTracer tracer(this);
505 GarbageCollectionPrologue();
506 // The GC count was incremented in the prologue. Tell the tracer about
508 tracer.set_gc_count(gc_count_);
510 // Tell the tracer which collector we've selected.
511 tracer.set_collector(collector);
513 HistogramTimer* rate = (collector == SCAVENGER)
514 ? isolate_->counters()->gc_scavenger()
515 : isolate_->counters()->gc_compactor();
517 next_gc_likely_to_collect_more =
518 PerformGarbageCollection(collector, &tracer);
521 GarbageCollectionEpilogue();
524 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
525 if (incremental_marking()->IsStopped()) {
526 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
527 incremental_marking()->Start();
531 return next_gc_likely_to_collect_more;
535 void Heap::PerformScavenge() {
536 GCTracer tracer(this);
537 if (incremental_marking()->IsStopped()) {
538 PerformGarbageCollection(SCAVENGER, &tracer);
540 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
546 // Helper class for verifying the symbol table.
547 class SymbolTableVerifier : public ObjectVisitor {
549 void VisitPointers(Object** start, Object** end) {
550 // Visit all HeapObject pointers in [start, end).
551 for (Object** p = start; p < end; p++) {
552 if ((*p)->IsHeapObject()) {
553 // Check that the symbol is actually a symbol.
554 ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
562 static void VerifySymbolTable() {
564 SymbolTableVerifier verifier;
565 HEAP->symbol_table()->IterateElements(&verifier);
570 void Heap::ReserveSpace(
572 int pointer_space_size,
577 int large_object_size) {
578 NewSpace* new_space = Heap::new_space();
579 PagedSpace* old_pointer_space = Heap::old_pointer_space();
580 PagedSpace* old_data_space = Heap::old_data_space();
581 PagedSpace* code_space = Heap::code_space();
582 PagedSpace* map_space = Heap::map_space();
583 PagedSpace* cell_space = Heap::cell_space();
584 LargeObjectSpace* lo_space = Heap::lo_space();
585 bool gc_performed = true;
586 while (gc_performed) {
587 gc_performed = false;
588 if (!new_space->ReserveSpace(new_space_size)) {
589 Heap::CollectGarbage(NEW_SPACE);
592 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
593 Heap::CollectGarbage(OLD_POINTER_SPACE);
596 if (!(old_data_space->ReserveSpace(data_space_size))) {
597 Heap::CollectGarbage(OLD_DATA_SPACE);
600 if (!(code_space->ReserveSpace(code_space_size))) {
601 Heap::CollectGarbage(CODE_SPACE);
604 if (!(map_space->ReserveSpace(map_space_size))) {
605 Heap::CollectGarbage(MAP_SPACE);
608 if (!(cell_space->ReserveSpace(cell_space_size))) {
609 Heap::CollectGarbage(CELL_SPACE);
612 // We add a slack-factor of 2 in order to have space for a series of
613 // large-object allocations that are only just larger than the page size.
614 large_object_size *= 2;
615 // The ReserveSpace method on the large object space checks how much
616 // we can expand the old generation. This includes expansion caused by
617 // allocation in the other spaces.
618 large_object_size += cell_space_size + map_space_size + code_space_size +
619 data_space_size + pointer_space_size;
620 if (!(lo_space->ReserveSpace(large_object_size))) {
621 Heap::CollectGarbage(LO_SPACE);
628 void Heap::EnsureFromSpaceIsCommitted() {
629 if (new_space_.CommitFromSpaceIfNeeded()) return;
631 // Committing memory to from space failed.
632 // Try shrinking and try again.
634 if (new_space_.CommitFromSpaceIfNeeded()) return;
636 // Committing memory to from space failed again.
637 // Memory is exhausted and we will die.
638 V8::FatalProcessOutOfMemory("Committing semi space failed.");
642 void Heap::ClearJSFunctionResultCaches() {
643 if (isolate_->bootstrapper()->IsActive()) return;
645 Object* context = global_contexts_list_;
646 while (!context->IsUndefined()) {
647 // Get the caches for this context. GC can happen when the context
648 // is not fully initialized, so the caches can be undefined.
649 Object* caches_or_undefined =
650 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
651 if (!caches_or_undefined->IsUndefined()) {
652 FixedArray* caches = FixedArray::cast(caches_or_undefined);
654 int length = caches->length();
655 for (int i = 0; i < length; i++) {
656 JSFunctionResultCache::cast(caches->get(i))->Clear();
659 // Get the next context:
660 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
666 void Heap::ClearNormalizedMapCaches() {
667 if (isolate_->bootstrapper()->IsActive() &&
668 !incremental_marking()->IsMarking()) {
672 Object* context = global_contexts_list_;
673 while (!context->IsUndefined()) {
674 // GC can happen when the context is not fully initialized,
675 // so the cache can be undefined.
677 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
678 if (!cache->IsUndefined()) {
679 NormalizedMapCache::cast(cache)->Clear();
681 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
686 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
687 double survival_rate =
688 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
689 start_new_space_size;
691 if (survival_rate > kYoungSurvivalRateThreshold) {
692 high_survival_rate_period_length_++;
694 high_survival_rate_period_length_ = 0;
697 double survival_rate_diff = survival_rate_ - survival_rate;
699 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
700 set_survival_rate_trend(DECREASING);
701 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
702 set_survival_rate_trend(INCREASING);
704 set_survival_rate_trend(STABLE);
707 survival_rate_ = survival_rate;
710 bool Heap::PerformGarbageCollection(GarbageCollector collector,
712 bool next_gc_likely_to_collect_more = false;
714 if (collector != SCAVENGER) {
715 PROFILE(isolate_, CodeMovingGCEvent());
718 if (FLAG_verify_heap) {
721 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
722 ASSERT(!allocation_allowed_);
723 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
724 global_gc_prologue_callback_();
728 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
730 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
731 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
732 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
736 EnsureFromSpaceIsCommitted();
738 int start_new_space_size = Heap::new_space()->SizeAsInt();
740 if (IsHighSurvivalRate()) {
741 // We speed up the incremental marker if it is running so that it
742 // does not fall behind the rate of promotion, which would cause a
743 // constantly growing old space.
744 incremental_marking()->NotifyOfHighPromotionRate();
747 if (collector == MARK_COMPACTOR) {
748 // Perform mark-sweep with optional compaction.
751 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
752 IsStableOrIncreasingSurvivalTrend();
754 UpdateSurvivalRateTrend(start_new_space_size);
756 if (!new_space_high_promotion_mode_active_ &&
757 new_space_.Capacity() == new_space_.MaximumCapacity() &&
758 IsStableOrIncreasingSurvivalTrend() &&
759 IsHighSurvivalRate()) {
760 // Stable high survival rates even though young generation is at
761 // maximum capacity indicates that most objects will be promoted.
762 // To decrease scavenger pauses and final mark-sweep pauses, we
763 // have to limit maximal capacity of the young generation.
764 new_space_high_promotion_mode_active_ = true;
766 PrintF("Limited new space size due to high promotion rate: %d MB\n",
767 new_space_.InitialCapacity() / MB);
769 } else if (new_space_high_promotion_mode_active_ &&
770 IsDecreasingSurvivalTrend() &&
771 !IsHighSurvivalRate()) {
772 // Decreasing low survival rates might indicate that the above high
773 // promotion mode is over and we should allow the young generation
775 new_space_high_promotion_mode_active_ = false;
777 PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
778 new_space_.MaximumCapacity() / MB);
782 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
784 if (high_survival_rate_during_scavenges &&
785 IsStableOrIncreasingSurvivalTrend()) {
786 // Stable high survival rates of young objects both during partial and
787 // full collection indicate that mutator is either building or modifying
788 // a structure with a long lifetime.
789 // In this case we aggressively raise old generation memory limits to
790 // postpone subsequent mark-sweep collection and thus trade memory
791 // space for the mutation speed.
792 old_gen_limit_factor_ = 2;
794 old_gen_limit_factor_ = 1;
797 old_gen_promotion_limit_ =
798 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
799 old_gen_allocation_limit_ =
800 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
802 old_gen_exhausted_ = false;
808 UpdateSurvivalRateTrend(start_new_space_size);
811 if (new_space_high_promotion_mode_active_ &&
812 new_space_.Capacity() > new_space_.InitialCapacity()) {
816 isolate_->counters()->objs_since_last_young()->Set(0);
818 gc_post_processing_depth_++;
819 { DisableAssertNoAllocation allow_allocation;
820 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
821 next_gc_likely_to_collect_more =
822 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
824 gc_post_processing_depth_--;
826 // Update relocatables.
827 Relocatable::PostGarbageCollectionProcessing();
829 if (collector == MARK_COMPACTOR) {
830 // Register the amount of external allocated memory.
831 amount_of_external_allocated_memory_at_last_global_gc_ =
832 amount_of_external_allocated_memory_;
835 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
836 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
837 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
838 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
842 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
843 ASSERT(!allocation_allowed_);
844 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
845 global_gc_epilogue_callback_();
847 if (FLAG_verify_heap) {
851 return next_gc_likely_to_collect_more;
855 void Heap::MarkCompact(GCTracer* tracer) {
856 gc_state_ = MARK_COMPACT;
857 LOG(isolate_, ResourceEvent("markcompact", "begin"));
859 mark_compact_collector_.Prepare(tracer);
862 tracer->set_full_gc_count(ms_count_);
864 MarkCompactPrologue();
866 mark_compact_collector_.CollectGarbage();
868 LOG(isolate_, ResourceEvent("markcompact", "end"));
870 gc_state_ = NOT_IN_GC;
872 isolate_->counters()->objs_since_last_full()->Set(0);
874 contexts_disposed_ = 0;
878 void Heap::MarkCompactPrologue() {
879 // At any old GC clear the keyed lookup cache to enable collection of unused
881 isolate_->keyed_lookup_cache()->Clear();
882 isolate_->context_slot_cache()->Clear();
883 isolate_->descriptor_lookup_cache()->Clear();
884 StringSplitCache::Clear(string_split_cache());
886 isolate_->compilation_cache()->MarkCompactPrologue();
888 CompletelyClearInstanceofCache();
890 // TODO(1605) select heuristic for flushing NumberString cache with
891 // FlushNumberStringCache
892 if (FLAG_cleanup_code_caches_at_gc) {
893 polymorphic_code_cache()->set_cache(undefined_value());
896 ClearNormalizedMapCaches();
900 Object* Heap::FindCodeObject(Address a) {
901 return isolate()->inner_pointer_to_code_cache()->
902 GcSafeFindCodeForInnerPointer(a);
906 // Helper class for copying HeapObjects
907 class ScavengeVisitor: public ObjectVisitor {
909 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
911 void VisitPointer(Object** p) { ScavengePointer(p); }
913 void VisitPointers(Object** start, Object** end) {
914 // Copy all HeapObject pointers in [start, end)
915 for (Object** p = start; p < end; p++) ScavengePointer(p);
919 void ScavengePointer(Object** p) {
921 if (!heap_->InNewSpace(object)) return;
922 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
923 reinterpret_cast<HeapObject*>(object));
931 // Visitor class to verify pointers in code or data space do not point into
933 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
935 void VisitPointers(Object** start, Object**end) {
936 for (Object** current = start; current < end; current++) {
937 if ((*current)->IsHeapObject()) {
938 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
945 static void VerifyNonPointerSpacePointers() {
946 // Verify that there are no pointers to new space in spaces where we
947 // do not expect them.
948 VerifyNonPointerSpacePointersVisitor v;
949 HeapObjectIterator code_it(HEAP->code_space());
950 for (HeapObject* object = code_it.Next();
951 object != NULL; object = code_it.Next())
954 // The old data space was normally swept conservatively so that the iterator
955 // doesn't work, so we normally skip the next bit.
956 if (!HEAP->old_data_space()->was_swept_conservatively()) {
957 HeapObjectIterator data_it(HEAP->old_data_space());
958 for (HeapObject* object = data_it.Next();
959 object != NULL; object = data_it.Next())
966 void Heap::CheckNewSpaceExpansionCriteria() {
967 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
968 survived_since_last_expansion_ > new_space_.Capacity() &&
969 !new_space_high_promotion_mode_active_) {
970 // Grow the size of new space if there is room to grow, enough data
971 // has survived scavenge since the last expansion and we are not in
972 // high promotion mode.
974 survived_since_last_expansion_ = 0;
979 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
980 return heap->InNewSpace(*p) &&
981 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
985 void Heap::ScavengeStoreBufferCallback(
988 StoreBufferEvent event) {
989 heap->store_buffer_rebuilder_.Callback(page, event);
993 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
994 if (event == kStoreBufferStartScanningPagesEvent) {
995 start_of_current_page_ = NULL;
996 current_page_ = NULL;
997 } else if (event == kStoreBufferScanningPageEvent) {
998 if (current_page_ != NULL) {
999 // If this page already overflowed the store buffer during this iteration.
1000 if (current_page_->scan_on_scavenge()) {
1001 // Then we should wipe out the entries that have been added for it.
1002 store_buffer_->SetTop(start_of_current_page_);
1003 } else if (store_buffer_->Top() - start_of_current_page_ >=
1004 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1005 // Did we find too many pointers in the previous page? The heuristic is
1006 // that no page can take more then 1/5 the remaining slots in the store
1008 current_page_->set_scan_on_scavenge(true);
1009 store_buffer_->SetTop(start_of_current_page_);
1011 // In this case the page we scanned took a reasonable number of slots in
1012 // the store buffer. It has now been rehabilitated and is no longer
1013 // marked scan_on_scavenge.
1014 ASSERT(!current_page_->scan_on_scavenge());
1017 start_of_current_page_ = store_buffer_->Top();
1018 current_page_ = page;
1019 } else if (event == kStoreBufferFullEvent) {
1020 // The current page overflowed the store buffer again. Wipe out its entries
1021 // in the store buffer and mark it scan-on-scavenge again. This may happen
1022 // several times while scanning.
1023 if (current_page_ == NULL) {
1024 // Store Buffer overflowed while scanning promoted objects. These are not
1025 // in any particular page, though they are likely to be clustered by the
1026 // allocation routines.
1027 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1029 // Store Buffer overflowed while scanning a particular old space page for
1030 // pointers to new space.
1031 ASSERT(current_page_ == page);
1032 ASSERT(page != NULL);
1033 current_page_->set_scan_on_scavenge(true);
1034 ASSERT(start_of_current_page_ != store_buffer_->Top());
1035 store_buffer_->SetTop(start_of_current_page_);
1043 void PromotionQueue::Initialize() {
1044 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1045 // entries (where each is a pair of intptr_t). This allows us to simplify
1046 // the test fpr when to switch pages.
1047 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1049 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1051 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1052 emergency_stack_ = NULL;
1057 void PromotionQueue::RelocateQueueHead() {
1058 ASSERT(emergency_stack_ == NULL);
1060 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1061 intptr_t* head_start = rear_;
1062 intptr_t* head_end =
1063 Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
1066 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1068 emergency_stack_ = new List<Entry>(2 * entries_count);
1070 while (head_start != head_end) {
1071 int size = static_cast<int>(*(head_start++));
1072 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1073 emergency_stack_->Add(Entry(obj, size));
1079 void Heap::Scavenge() {
1081 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1084 gc_state_ = SCAVENGE;
1086 // Implements Cheney's copying algorithm
1087 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1089 // Clear descriptor cache.
1090 isolate_->descriptor_lookup_cache()->Clear();
1092 // Used for updating survived_since_last_expansion_ at function end.
1093 intptr_t survived_watermark = PromotedSpaceSize();
1095 CheckNewSpaceExpansionCriteria();
1097 SelectScavengingVisitorsTable();
1099 incremental_marking()->PrepareForScavenge();
1101 AdvanceSweepers(static_cast<int>(new_space_.Size()));
1103 // Flip the semispaces. After flipping, to space is empty, from space has
1106 new_space_.ResetAllocationInfo();
1108 // We need to sweep newly copied objects which can be either in the
1109 // to space or promoted to the old generation. For to-space
1110 // objects, we treat the bottom of the to space as a queue. Newly
1111 // copied and unswept objects lie between a 'front' mark and the
1112 // allocation pointer.
1114 // Promoted objects can go into various old-generation spaces, and
1115 // can be allocated internally in the spaces (from the free list).
1116 // We treat the top of the to space as a queue of addresses of
1117 // promoted objects. The addresses of newly promoted and unswept
1118 // objects lie between a 'front' mark and a 'rear' mark that is
1119 // updated as a side effect of promoting an object.
1121 // There is guaranteed to be enough room at the top of the to space
1122 // for the addresses of promoted objects: every object promoted
1123 // frees up its size in bytes from the top of the new space, and
1124 // objects are at least one pointer in size.
1125 Address new_space_front = new_space_.ToSpaceStart();
1126 promotion_queue_.Initialize();
1129 store_buffer()->Clean();
1132 ScavengeVisitor scavenge_visitor(this);
1134 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1136 // Copy objects reachable from the old generation.
1138 StoreBufferRebuildScope scope(this,
1140 &ScavengeStoreBufferCallback);
1141 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1144 // Copy objects reachable from cells by scavenging cell values directly.
1145 HeapObjectIterator cell_iterator(cell_space_);
1146 for (HeapObject* cell = cell_iterator.Next();
1147 cell != NULL; cell = cell_iterator.Next()) {
1148 if (cell->IsJSGlobalPropertyCell()) {
1149 Address value_address =
1150 reinterpret_cast<Address>(cell) +
1151 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1152 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1156 // Scavenge object reachable from the global contexts list directly.
1157 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1159 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1160 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1161 &IsUnscavengedHeapObject);
1162 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1164 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1166 UpdateNewSpaceReferencesInExternalStringTable(
1167 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1169 promotion_queue_.Destroy();
1171 LiveObjectList::UpdateReferencesForScavengeGC();
1172 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1173 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1175 ASSERT(new_space_front == new_space_.top());
1178 new_space_.set_age_mark(new_space_.top());
1180 new_space_.LowerInlineAllocationLimit(
1181 new_space_.inline_allocation_limit_step());
1183 // Update how much has survived scavenge.
1184 IncrementYoungSurvivorsCounter(static_cast<int>(
1185 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1187 LOG(isolate_, ResourceEvent("scavenge", "end"));
1189 gc_state_ = NOT_IN_GC;
1191 scavenges_since_last_idle_round_++;
1195 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1197 MapWord first_word = HeapObject::cast(*p)->map_word();
1199 if (!first_word.IsForwardingAddress()) {
1200 // Unreachable external string can be finalized.
1201 heap->FinalizeExternalString(String::cast(*p));
1205 // String is still reachable.
1206 return String::cast(first_word.ToForwardingAddress());
1210 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1211 ExternalStringTableUpdaterCallback updater_func) {
1212 if (FLAG_verify_heap) {
1213 external_string_table_.Verify();
1216 if (external_string_table_.new_space_strings_.is_empty()) return;
1218 Object** start = &external_string_table_.new_space_strings_[0];
1219 Object** end = start + external_string_table_.new_space_strings_.length();
1220 Object** last = start;
1222 for (Object** p = start; p < end; ++p) {
1223 ASSERT(InFromSpace(*p));
1224 String* target = updater_func(this, p);
1226 if (target == NULL) continue;
1228 ASSERT(target->IsExternalString());
1230 if (InNewSpace(target)) {
1231 // String is still in new space. Update the table entry.
1235 // String got promoted. Move it to the old string list.
1236 external_string_table_.AddOldString(target);
1240 ASSERT(last <= end);
1241 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1245 void Heap::UpdateReferencesInExternalStringTable(
1246 ExternalStringTableUpdaterCallback updater_func) {
1248 // Update old space string references.
1249 if (external_string_table_.old_space_strings_.length() > 0) {
1250 Object** start = &external_string_table_.old_space_strings_[0];
1251 Object** end = start + external_string_table_.old_space_strings_.length();
1252 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1255 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1259 static Object* ProcessFunctionWeakReferences(Heap* heap,
1261 WeakObjectRetainer* retainer) {
1262 Object* undefined = heap->undefined_value();
1263 Object* head = undefined;
1264 JSFunction* tail = NULL;
1265 Object* candidate = function;
1266 while (candidate != undefined) {
1267 // Check whether to keep the candidate in the list.
1268 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1269 Object* retain = retainer->RetainAs(candidate);
1270 if (retain != NULL) {
1271 if (head == undefined) {
1272 // First element in the list.
1275 // Subsequent elements in the list.
1276 ASSERT(tail != NULL);
1277 tail->set_next_function_link(retain);
1279 // Retained function is new tail.
1280 candidate_function = reinterpret_cast<JSFunction*>(retain);
1281 tail = candidate_function;
1283 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1285 if (retain == undefined) break;
1288 // Move to next element in the list.
1289 candidate = candidate_function->next_function_link();
1292 // Terminate the list if there is one or more elements.
1294 tail->set_next_function_link(undefined);
1301 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1302 Object* undefined = undefined_value();
1303 Object* head = undefined;
1304 Context* tail = NULL;
1305 Object* candidate = global_contexts_list_;
1306 while (candidate != undefined) {
1307 // Check whether to keep the candidate in the list.
1308 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1309 Object* retain = retainer->RetainAs(candidate);
1310 if (retain != NULL) {
1311 if (head == undefined) {
1312 // First element in the list.
1315 // Subsequent elements in the list.
1316 ASSERT(tail != NULL);
1317 tail->set_unchecked(this,
1318 Context::NEXT_CONTEXT_LINK,
1320 UPDATE_WRITE_BARRIER);
1322 // Retained context is new tail.
1323 candidate_context = reinterpret_cast<Context*>(retain);
1324 tail = candidate_context;
1326 if (retain == undefined) break;
1328 // Process the weak list of optimized functions for the context.
1329 Object* function_list_head =
1330 ProcessFunctionWeakReferences(
1332 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1334 candidate_context->set_unchecked(this,
1335 Context::OPTIMIZED_FUNCTIONS_LIST,
1337 UPDATE_WRITE_BARRIER);
1340 // Move to next element in the list.
1341 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1344 // Terminate the list if there is one or more elements.
1346 tail->set_unchecked(this,
1347 Context::NEXT_CONTEXT_LINK,
1348 Heap::undefined_value(),
1349 UPDATE_WRITE_BARRIER);
1352 // Update the head of the list of contexts.
1353 global_contexts_list_ = head;
1357 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1358 AssertNoAllocation no_allocation;
1360 class VisitorAdapter : public ObjectVisitor {
1362 explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1363 : visitor_(visitor) {}
1364 virtual void VisitPointers(Object** start, Object** end) {
1365 for (Object** p = start; p < end; p++) {
1366 if ((*p)->IsExternalString()) {
1367 visitor_->VisitExternalString(Utils::ToLocal(
1368 Handle<String>(String::cast(*p))));
1373 v8::ExternalResourceVisitor* visitor_;
1374 } visitor_adapter(visitor);
1375 external_string_table_.Iterate(&visitor_adapter);
1379 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1381 static inline void VisitPointer(Heap* heap, Object** p) {
1382 Object* object = *p;
1383 if (!heap->InNewSpace(object)) return;
1384 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1385 reinterpret_cast<HeapObject*>(object));
1390 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1391 Address new_space_front) {
1393 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1394 // The addresses new_space_front and new_space_.top() define a
1395 // queue of unprocessed copied objects. Process them until the
1397 while (new_space_front != new_space_.top()) {
1398 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1399 HeapObject* object = HeapObject::FromAddress(new_space_front);
1401 NewSpaceScavenger::IterateBody(object->map(), object);
1404 NewSpacePage::FromLimit(new_space_front)->next_page()->body();
1408 // Promote and process all the to-be-promoted objects.
1410 StoreBufferRebuildScope scope(this,
1412 &ScavengeStoreBufferCallback);
1413 while (!promotion_queue()->is_empty()) {
1416 promotion_queue()->remove(&target, &size);
1418 // Promoted object might be already partially visited
1419 // during old space pointer iteration. Thus we search specificly
1420 // for pointers to from semispace instead of looking for pointers
1422 ASSERT(!target->IsMap());
1423 IterateAndMarkPointersToFromSpace(target->address(),
1424 target->address() + size,
1429 // Take another spin if there are now unswept objects in new space
1430 // (there are currently no more unswept promoted objects).
1431 } while (new_space_front != new_space_.top());
1433 return new_space_front;
1437 enum LoggingAndProfiling {
1438 LOGGING_AND_PROFILING_ENABLED,
1439 LOGGING_AND_PROFILING_DISABLED
1443 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1446 template<MarksHandling marks_handling,
1447 LoggingAndProfiling logging_and_profiling_mode>
1448 class ScavengingVisitor : public StaticVisitorBase {
1450 static void Initialize() {
1451 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1452 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1453 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1454 table_.Register(kVisitByteArray, &EvacuateByteArray);
1455 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1456 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1458 table_.Register(kVisitGlobalContext,
1459 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1460 template VisitSpecialized<Context::kSize>);
1462 table_.Register(kVisitConsString,
1463 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1464 template VisitSpecialized<ConsString::kSize>);
1466 table_.Register(kVisitSlicedString,
1467 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1468 template VisitSpecialized<SlicedString::kSize>);
1470 table_.Register(kVisitSharedFunctionInfo,
1471 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1472 template VisitSpecialized<SharedFunctionInfo::kSize>);
1474 table_.Register(kVisitJSWeakMap,
1475 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1478 table_.Register(kVisitJSRegExp,
1479 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1482 if (marks_handling == IGNORE_MARKS) {
1483 table_.Register(kVisitJSFunction,
1484 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1485 template VisitSpecialized<JSFunction::kSize>);
1487 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1490 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1492 kVisitDataObjectGeneric>();
1494 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1496 kVisitJSObjectGeneric>();
1498 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1500 kVisitStructGeneric>();
1503 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1508 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1509 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1511 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1512 bool should_record = false;
1514 should_record = FLAG_heap_stats;
1516 should_record = should_record || FLAG_log_gc;
1517 if (should_record) {
1518 if (heap->new_space()->Contains(obj)) {
1519 heap->new_space()->RecordAllocation(obj);
1521 heap->new_space()->RecordPromotion(obj);
1526 // Helper function used by CopyObject to copy a source object to an
1527 // allocated target object and update the forwarding pointer in the source
1528 // object. Returns the target object.
1529 INLINE(static void MigrateObject(Heap* heap,
1533 // Copy the content of source to target.
1534 heap->CopyBlock(target->address(), source->address(), size);
1536 // Set the forwarding address.
1537 source->set_map_word(MapWord::FromForwardingAddress(target));
1539 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1540 // Update NewSpace stats if necessary.
1541 RecordCopiedObject(heap, target);
1542 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1543 Isolate* isolate = heap->isolate();
1544 if (isolate->logger()->is_logging() ||
1545 CpuProfiler::is_profiling(isolate)) {
1546 if (target->IsSharedFunctionInfo()) {
1547 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1548 source->address(), target->address()));
1553 if (marks_handling == TRANSFER_MARKS) {
1554 if (Marking::TransferColor(source, target)) {
1555 MemoryChunk::IncrementLiveBytes(target->address(), size);
1560 template<ObjectContents object_contents, SizeRestriction size_restriction>
1561 static inline void EvacuateObject(Map* map,
1565 SLOW_ASSERT((size_restriction != SMALL) ||
1566 (object_size <= Page::kMaxHeapObjectSize));
1567 SLOW_ASSERT(object->Size() == object_size);
1569 Heap* heap = map->GetHeap();
1570 if (heap->ShouldBePromoted(object->address(), object_size)) {
1571 MaybeObject* maybe_result;
1573 if ((size_restriction != SMALL) &&
1574 (object_size > Page::kMaxHeapObjectSize)) {
1575 maybe_result = heap->lo_space()->AllocateRaw(object_size,
1578 if (object_contents == DATA_OBJECT) {
1579 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1581 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1585 Object* result = NULL; // Initialization to please compiler.
1586 if (maybe_result->ToObject(&result)) {
1587 HeapObject* target = HeapObject::cast(result);
1589 // Order is important: slot might be inside of the target if target
1590 // was allocated over a dead object and slot comes from the store
1593 MigrateObject(heap, object, target, object_size);
1595 if (object_contents == POINTER_OBJECT) {
1596 heap->promotion_queue()->insert(target, object_size);
1599 heap->tracer()->increment_promoted_objects_size(object_size);
1603 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1604 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1605 Object* result = allocation->ToObjectUnchecked();
1606 HeapObject* target = HeapObject::cast(result);
1608 // Order is important: slot might be inside of the target if target
1609 // was allocated over a dead object and slot comes from the store
1612 MigrateObject(heap, object, target, object_size);
1617 static inline void EvacuateJSFunction(Map* map,
1619 HeapObject* object) {
1620 ObjectEvacuationStrategy<POINTER_OBJECT>::
1621 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1623 HeapObject* target = *slot;
1624 MarkBit mark_bit = Marking::MarkBitFrom(target);
1625 if (Marking::IsBlack(mark_bit)) {
1626 // This object is black and it might not be rescanned by marker.
1627 // We should explicitly record code entry slot for compaction because
1628 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1629 // miss it as it is not HeapObject-tagged.
1630 Address code_entry_slot =
1631 target->address() + JSFunction::kCodeEntryOffset;
1632 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1633 map->GetHeap()->mark_compact_collector()->
1634 RecordCodeEntrySlot(code_entry_slot, code);
1639 static inline void EvacuateFixedArray(Map* map,
1641 HeapObject* object) {
1642 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1643 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1650 static inline void EvacuateFixedDoubleArray(Map* map,
1652 HeapObject* object) {
1653 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1654 int object_size = FixedDoubleArray::SizeFor(length);
1655 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1662 static inline void EvacuateByteArray(Map* map,
1664 HeapObject* object) {
1665 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1666 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1670 static inline void EvacuateSeqAsciiString(Map* map,
1672 HeapObject* object) {
1673 int object_size = SeqAsciiString::cast(object)->
1674 SeqAsciiStringSize(map->instance_type());
1675 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1679 static inline void EvacuateSeqTwoByteString(Map* map,
1681 HeapObject* object) {
1682 int object_size = SeqTwoByteString::cast(object)->
1683 SeqTwoByteStringSize(map->instance_type());
1684 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1688 static inline bool IsShortcutCandidate(int type) {
1689 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1692 static inline void EvacuateShortcutCandidate(Map* map,
1694 HeapObject* object) {
1695 ASSERT(IsShortcutCandidate(map->instance_type()));
1697 Heap* heap = map->GetHeap();
1699 if (marks_handling == IGNORE_MARKS &&
1700 ConsString::cast(object)->unchecked_second() ==
1701 heap->empty_string()) {
1703 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1707 if (!heap->InNewSpace(first)) {
1708 object->set_map_word(MapWord::FromForwardingAddress(first));
1712 MapWord first_word = first->map_word();
1713 if (first_word.IsForwardingAddress()) {
1714 HeapObject* target = first_word.ToForwardingAddress();
1717 object->set_map_word(MapWord::FromForwardingAddress(target));
1721 heap->DoScavengeObject(first->map(), slot, first);
1722 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1726 int object_size = ConsString::kSize;
1727 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1730 template<ObjectContents object_contents>
1731 class ObjectEvacuationStrategy {
1733 template<int object_size>
1734 static inline void VisitSpecialized(Map* map,
1736 HeapObject* object) {
1737 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1740 static inline void Visit(Map* map,
1742 HeapObject* object) {
1743 int object_size = map->instance_size();
1744 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1748 static VisitorDispatchTable<ScavengingCallback> table_;
1752 template<MarksHandling marks_handling,
1753 LoggingAndProfiling logging_and_profiling_mode>
1754 VisitorDispatchTable<ScavengingCallback>
1755 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1758 static void InitializeScavengingVisitorsTables() {
1759 ScavengingVisitor<TRANSFER_MARKS,
1760 LOGGING_AND_PROFILING_DISABLED>::Initialize();
1761 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1762 ScavengingVisitor<TRANSFER_MARKS,
1763 LOGGING_AND_PROFILING_ENABLED>::Initialize();
1764 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1768 void Heap::SelectScavengingVisitorsTable() {
1769 bool logging_and_profiling =
1770 isolate()->logger()->is_logging() ||
1771 CpuProfiler::is_profiling(isolate()) ||
1772 (isolate()->heap_profiler() != NULL &&
1773 isolate()->heap_profiler()->is_profiling());
1775 if (!incremental_marking()->IsMarking()) {
1776 if (!logging_and_profiling) {
1777 scavenging_visitors_table_.CopyFrom(
1778 ScavengingVisitor<IGNORE_MARKS,
1779 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1781 scavenging_visitors_table_.CopyFrom(
1782 ScavengingVisitor<IGNORE_MARKS,
1783 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1786 if (!logging_and_profiling) {
1787 scavenging_visitors_table_.CopyFrom(
1788 ScavengingVisitor<TRANSFER_MARKS,
1789 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1791 scavenging_visitors_table_.CopyFrom(
1792 ScavengingVisitor<TRANSFER_MARKS,
1793 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1796 if (incremental_marking()->IsCompacting()) {
1797 // When compacting forbid short-circuiting of cons-strings.
1798 // Scavenging code relies on the fact that new space object
1799 // can't be evacuated into evacuation candidate but
1800 // short-circuiting violates this assumption.
1801 scavenging_visitors_table_.Register(
1802 StaticVisitorBase::kVisitShortcutCandidate,
1803 scavenging_visitors_table_.GetVisitorById(
1804 StaticVisitorBase::kVisitConsString));
1810 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1811 SLOW_ASSERT(HEAP->InFromSpace(object));
1812 MapWord first_word = object->map_word();
1813 SLOW_ASSERT(!first_word.IsForwardingAddress());
1814 Map* map = first_word.ToMap();
1815 map->GetHeap()->DoScavengeObject(map, p, object);
1819 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1820 int instance_size) {
1822 { MaybeObject* maybe_result = AllocateRawMap();
1823 if (!maybe_result->ToObject(&result)) return maybe_result;
1826 // Map::cast cannot be used due to uninitialized map field.
1827 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1828 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1829 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1830 reinterpret_cast<Map*>(result)->set_visitor_id(
1831 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1832 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1833 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1834 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1835 reinterpret_cast<Map*>(result)->set_bit_field(0);
1836 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1841 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
1843 ElementsKind elements_kind) {
1845 { MaybeObject* maybe_result = AllocateRawMap();
1846 if (!maybe_result->ToObject(&result)) return maybe_result;
1849 Map* map = reinterpret_cast<Map*>(result);
1850 map->set_map_no_write_barrier(meta_map());
1851 map->set_instance_type(instance_type);
1852 map->set_visitor_id(
1853 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1854 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
1855 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
1856 map->set_instance_size(instance_size);
1857 map->set_inobject_properties(0);
1858 map->set_pre_allocated_property_fields(0);
1859 map->init_instance_descriptors();
1860 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
1861 map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
1862 map->set_unused_property_fields(0);
1863 map->set_bit_field(0);
1864 map->set_bit_field2(1 << Map::kIsExtensible);
1865 map->set_elements_kind(elements_kind);
1867 // If the map object is aligned fill the padding area with Smi 0 objects.
1868 if (Map::kPadStart < Map::kSize) {
1869 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1871 Map::kSize - Map::kPadStart);
1877 MaybeObject* Heap::AllocateCodeCache() {
1879 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1880 if (!maybe_result->ToObject(&result)) return maybe_result;
1882 CodeCache* code_cache = CodeCache::cast(result);
1883 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
1884 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
1889 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1890 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1894 MaybeObject* Heap::AllocateAccessorPair() {
1896 { MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
1897 if (!maybe_result->ToObject(&result)) return maybe_result;
1899 AccessorPair* accessors = AccessorPair::cast(result);
1900 // Later we will have to distinguish between undefined and the hole...
1901 // accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
1902 // accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
1907 const Heap::StringTypeTable Heap::string_type_table[] = {
1908 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1909 {type, size, k##camel_name##MapRootIndex},
1910 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1911 #undef STRING_TYPE_ELEMENT
1915 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1916 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1917 {contents, k##name##RootIndex},
1918 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1919 #undef CONSTANT_SYMBOL_ELEMENT
1923 const Heap::StructTable Heap::struct_table[] = {
1924 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1925 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1926 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1927 #undef STRUCT_TABLE_ELEMENT
1931 bool Heap::CreateInitialMaps() {
1933 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1934 if (!maybe_obj->ToObject(&obj)) return false;
1936 // Map::cast cannot be used due to uninitialized map field.
1937 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1938 set_meta_map(new_meta_map);
1939 new_meta_map->set_map(new_meta_map);
1941 { MaybeObject* maybe_obj =
1942 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1943 if (!maybe_obj->ToObject(&obj)) return false;
1945 set_fixed_array_map(Map::cast(obj));
1947 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1948 if (!maybe_obj->ToObject(&obj)) return false;
1950 set_oddball_map(Map::cast(obj));
1952 // Allocate the empty array.
1953 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1954 if (!maybe_obj->ToObject(&obj)) return false;
1956 set_empty_fixed_array(FixedArray::cast(obj));
1958 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1959 if (!maybe_obj->ToObject(&obj)) return false;
1961 set_null_value(Oddball::cast(obj));
1962 Oddball::cast(obj)->set_kind(Oddball::kNull);
1964 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1965 if (!maybe_obj->ToObject(&obj)) return false;
1967 set_undefined_value(Oddball::cast(obj));
1968 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
1969 ASSERT(!InNewSpace(undefined_value()));
1971 // Allocate the empty descriptor array.
1972 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1973 if (!maybe_obj->ToObject(&obj)) return false;
1975 set_empty_descriptor_array(DescriptorArray::cast(obj));
1977 // Fix the instance_descriptors for the existing maps.
1978 meta_map()->init_instance_descriptors();
1979 meta_map()->set_code_cache(empty_fixed_array());
1980 meta_map()->set_prototype_transitions(empty_fixed_array());
1982 fixed_array_map()->init_instance_descriptors();
1983 fixed_array_map()->set_code_cache(empty_fixed_array());
1984 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
1986 oddball_map()->init_instance_descriptors();
1987 oddball_map()->set_code_cache(empty_fixed_array());
1988 oddball_map()->set_prototype_transitions(empty_fixed_array());
1990 // Fix prototype object for existing maps.
1991 meta_map()->set_prototype(null_value());
1992 meta_map()->set_constructor(null_value());
1994 fixed_array_map()->set_prototype(null_value());
1995 fixed_array_map()->set_constructor(null_value());
1997 oddball_map()->set_prototype(null_value());
1998 oddball_map()->set_constructor(null_value());
2000 { MaybeObject* maybe_obj =
2001 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2002 if (!maybe_obj->ToObject(&obj)) return false;
2004 set_fixed_cow_array_map(Map::cast(obj));
2005 ASSERT(fixed_array_map() != fixed_cow_array_map());
2007 { MaybeObject* maybe_obj =
2008 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2009 if (!maybe_obj->ToObject(&obj)) return false;
2011 set_scope_info_map(Map::cast(obj));
2013 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2014 if (!maybe_obj->ToObject(&obj)) return false;
2016 set_heap_number_map(Map::cast(obj));
2018 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2019 if (!maybe_obj->ToObject(&obj)) return false;
2021 set_foreign_map(Map::cast(obj));
2023 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2024 const StringTypeTable& entry = string_type_table[i];
2025 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2026 if (!maybe_obj->ToObject(&obj)) return false;
2028 roots_[entry.index] = Map::cast(obj);
2031 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2032 if (!maybe_obj->ToObject(&obj)) return false;
2034 set_undetectable_string_map(Map::cast(obj));
2035 Map::cast(obj)->set_is_undetectable();
2037 { MaybeObject* maybe_obj =
2038 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2039 if (!maybe_obj->ToObject(&obj)) return false;
2041 set_undetectable_ascii_string_map(Map::cast(obj));
2042 Map::cast(obj)->set_is_undetectable();
2044 { MaybeObject* maybe_obj =
2045 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2046 if (!maybe_obj->ToObject(&obj)) return false;
2048 set_fixed_double_array_map(Map::cast(obj));
2050 { MaybeObject* maybe_obj =
2051 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2052 if (!maybe_obj->ToObject(&obj)) return false;
2054 set_byte_array_map(Map::cast(obj));
2056 { MaybeObject* maybe_obj =
2057 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2058 if (!maybe_obj->ToObject(&obj)) return false;
2060 set_free_space_map(Map::cast(obj));
2062 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2063 if (!maybe_obj->ToObject(&obj)) return false;
2065 set_empty_byte_array(ByteArray::cast(obj));
2067 { MaybeObject* maybe_obj =
2068 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2069 if (!maybe_obj->ToObject(&obj)) return false;
2071 set_external_pixel_array_map(Map::cast(obj));
2073 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2074 ExternalArray::kAlignedSize);
2075 if (!maybe_obj->ToObject(&obj)) return false;
2077 set_external_byte_array_map(Map::cast(obj));
2079 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2080 ExternalArray::kAlignedSize);
2081 if (!maybe_obj->ToObject(&obj)) return false;
2083 set_external_unsigned_byte_array_map(Map::cast(obj));
2085 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2086 ExternalArray::kAlignedSize);
2087 if (!maybe_obj->ToObject(&obj)) return false;
2089 set_external_short_array_map(Map::cast(obj));
2091 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2092 ExternalArray::kAlignedSize);
2093 if (!maybe_obj->ToObject(&obj)) return false;
2095 set_external_unsigned_short_array_map(Map::cast(obj));
2097 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2098 ExternalArray::kAlignedSize);
2099 if (!maybe_obj->ToObject(&obj)) return false;
2101 set_external_int_array_map(Map::cast(obj));
2103 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2104 ExternalArray::kAlignedSize);
2105 if (!maybe_obj->ToObject(&obj)) return false;
2107 set_external_unsigned_int_array_map(Map::cast(obj));
2109 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2110 ExternalArray::kAlignedSize);
2111 if (!maybe_obj->ToObject(&obj)) return false;
2113 set_external_float_array_map(Map::cast(obj));
2115 { MaybeObject* maybe_obj =
2116 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2117 if (!maybe_obj->ToObject(&obj)) return false;
2119 set_non_strict_arguments_elements_map(Map::cast(obj));
2121 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2122 ExternalArray::kAlignedSize);
2123 if (!maybe_obj->ToObject(&obj)) return false;
2125 set_external_double_array_map(Map::cast(obj));
2127 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2128 if (!maybe_obj->ToObject(&obj)) return false;
2130 set_code_map(Map::cast(obj));
2132 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2133 JSGlobalPropertyCell::kSize);
2134 if (!maybe_obj->ToObject(&obj)) return false;
2136 set_global_property_cell_map(Map::cast(obj));
2138 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2139 if (!maybe_obj->ToObject(&obj)) return false;
2141 set_one_pointer_filler_map(Map::cast(obj));
2143 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2144 if (!maybe_obj->ToObject(&obj)) return false;
2146 set_two_pointer_filler_map(Map::cast(obj));
2148 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2149 const StructTable& entry = struct_table[i];
2150 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2151 if (!maybe_obj->ToObject(&obj)) return false;
2153 roots_[entry.index] = Map::cast(obj);
2156 { MaybeObject* maybe_obj =
2157 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2158 if (!maybe_obj->ToObject(&obj)) return false;
2160 set_hash_table_map(Map::cast(obj));
2162 { MaybeObject* maybe_obj =
2163 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2164 if (!maybe_obj->ToObject(&obj)) return false;
2166 set_function_context_map(Map::cast(obj));
2168 { MaybeObject* maybe_obj =
2169 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2170 if (!maybe_obj->ToObject(&obj)) return false;
2172 set_catch_context_map(Map::cast(obj));
2174 { MaybeObject* maybe_obj =
2175 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2176 if (!maybe_obj->ToObject(&obj)) return false;
2178 set_with_context_map(Map::cast(obj));
2180 { MaybeObject* maybe_obj =
2181 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2182 if (!maybe_obj->ToObject(&obj)) return false;
2184 set_block_context_map(Map::cast(obj));
2186 { MaybeObject* maybe_obj =
2187 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2188 if (!maybe_obj->ToObject(&obj)) return false;
2190 Map* global_context_map = Map::cast(obj);
2191 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
2192 set_global_context_map(global_context_map);
2194 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2195 SharedFunctionInfo::kAlignedSize);
2196 if (!maybe_obj->ToObject(&obj)) return false;
2198 set_shared_function_info_map(Map::cast(obj));
2200 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2201 JSMessageObject::kSize);
2202 if (!maybe_obj->ToObject(&obj)) return false;
2204 set_message_object_map(Map::cast(obj));
2206 ASSERT(!InNewSpace(empty_fixed_array()));
2211 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2212 // Statically ensure that it is safe to allocate heap numbers in paged
2214 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2215 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2218 { MaybeObject* maybe_result =
2219 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2220 if (!maybe_result->ToObject(&result)) return maybe_result;
2223 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2224 HeapNumber::cast(result)->set_value(value);
2229 MaybeObject* Heap::AllocateHeapNumber(double value) {
2230 // Use general version, if we're forced to always allocate.
2231 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2233 // This version of AllocateHeapNumber is optimized for
2234 // allocation in new space.
2235 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2236 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2238 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2239 if (!maybe_result->ToObject(&result)) return maybe_result;
2241 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2242 HeapNumber::cast(result)->set_value(value);
2247 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2249 { MaybeObject* maybe_result = AllocateRawCell();
2250 if (!maybe_result->ToObject(&result)) return maybe_result;
2252 HeapObject::cast(result)->set_map_no_write_barrier(
2253 global_property_cell_map());
2254 JSGlobalPropertyCell::cast(result)->set_value(value);
2259 MaybeObject* Heap::CreateOddball(const char* to_string,
2263 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2264 if (!maybe_result->ToObject(&result)) return maybe_result;
2266 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2270 bool Heap::CreateApiObjects() {
2273 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2274 if (!maybe_obj->ToObject(&obj)) return false;
2276 // Don't use Smi-only elements optimizations for objects with the neander
2277 // map. There are too many cases where element values are set directly with a
2278 // bottleneck to trap the Smi-only -> fast elements transition, and there
2279 // appears to be no benefit for optimize this case.
2280 Map* new_neander_map = Map::cast(obj);
2281 new_neander_map->set_elements_kind(FAST_ELEMENTS);
2282 set_neander_map(new_neander_map);
2284 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2285 if (!maybe_obj->ToObject(&obj)) return false;
2288 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2289 if (!maybe_elements->ToObject(&elements)) return false;
2291 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2292 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2293 set_message_listeners(JSObject::cast(obj));
2299 void Heap::CreateJSEntryStub() {
2301 set_js_entry_code(*stub.GetCode());
2305 void Heap::CreateJSConstructEntryStub() {
2306 JSConstructEntryStub stub;
2307 set_js_construct_entry_code(*stub.GetCode());
2311 void Heap::CreateFixedStubs() {
2312 // Here we create roots for fixed stubs. They are needed at GC
2313 // for cooking and uncooking (check out frames.cc).
2314 // The eliminates the need for doing dictionary lookup in the
2315 // stub cache for these stubs.
2317 // gcc-4.4 has problem generating correct code of following snippet:
2318 // { JSEntryStub stub;
2319 // js_entry_code_ = *stub.GetCode();
2321 // { JSConstructEntryStub stub;
2322 // js_construct_entry_code_ = *stub.GetCode();
2324 // To workaround the problem, make separate functions without inlining.
2325 Heap::CreateJSEntryStub();
2326 Heap::CreateJSConstructEntryStub();
2328 // Create stubs that should be there, so we don't unexpectedly have to
2329 // create them if we need them during the creation of another stub.
2330 // Stub creation mixes raw pointers and handles in an unsafe manner so
2331 // we cannot create stubs while we are creating stubs.
2332 CodeStub::GenerateStubsAheadOfTime();
2336 bool Heap::CreateInitialObjects() {
2339 // The -0 value must be set before NumberFromDouble works.
2340 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2341 if (!maybe_obj->ToObject(&obj)) return false;
2343 set_minus_zero_value(HeapNumber::cast(obj));
2344 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2346 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2347 if (!maybe_obj->ToObject(&obj)) return false;
2349 set_nan_value(HeapNumber::cast(obj));
2351 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2352 if (!maybe_obj->ToObject(&obj)) return false;
2354 set_infinity_value(HeapNumber::cast(obj));
2356 // The hole has not been created yet, but we want to put something
2357 // predictable in the gaps in the symbol table, so lets make that Smi zero.
2358 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2360 // Allocate initial symbol table.
2361 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2362 if (!maybe_obj->ToObject(&obj)) return false;
2364 // Don't use set_symbol_table() due to asserts.
2365 roots_[kSymbolTableRootIndex] = obj;
2367 // Finish initializing oddballs after creating symboltable.
2368 { MaybeObject* maybe_obj =
2369 undefined_value()->Initialize("undefined",
2371 Oddball::kUndefined);
2372 if (!maybe_obj->ToObject(&obj)) return false;
2375 // Initialize the null_value.
2376 { MaybeObject* maybe_obj =
2377 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2378 if (!maybe_obj->ToObject(&obj)) return false;
2381 { MaybeObject* maybe_obj = CreateOddball("true",
2384 if (!maybe_obj->ToObject(&obj)) return false;
2386 set_true_value(Oddball::cast(obj));
2388 { MaybeObject* maybe_obj = CreateOddball("false",
2391 if (!maybe_obj->ToObject(&obj)) return false;
2393 set_false_value(Oddball::cast(obj));
2395 { MaybeObject* maybe_obj = CreateOddball("hole",
2398 if (!maybe_obj->ToObject(&obj)) return false;
2400 set_the_hole_value(Oddball::cast(obj));
2402 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2404 Oddball::kArgumentMarker);
2405 if (!maybe_obj->ToObject(&obj)) return false;
2407 set_arguments_marker(Oddball::cast(obj));
2409 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2412 if (!maybe_obj->ToObject(&obj)) return false;
2414 set_no_interceptor_result_sentinel(obj);
2416 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2419 if (!maybe_obj->ToObject(&obj)) return false;
2421 set_termination_exception(obj);
2423 { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
2426 if (!maybe_obj->ToObject(&obj)) return false;
2428 set_frame_alignment_marker(Oddball::cast(obj));
2429 STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
2431 // Allocate the empty string.
2432 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2433 if (!maybe_obj->ToObject(&obj)) return false;
2435 set_empty_string(String::cast(obj));
2437 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2438 { MaybeObject* maybe_obj =
2439 LookupAsciiSymbol(constant_symbol_table[i].contents);
2440 if (!maybe_obj->ToObject(&obj)) return false;
2442 roots_[constant_symbol_table[i].index] = String::cast(obj);
2445 // Allocate the hidden symbol which is used to identify the hidden properties
2446 // in JSObjects. The hash code has a special value so that it will not match
2447 // the empty string when searching for the property. It cannot be part of the
2448 // loop above because it needs to be allocated manually with the special
2449 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2450 // that it will always be at the first entry in property descriptors.
2451 { MaybeObject* maybe_obj =
2452 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2453 if (!maybe_obj->ToObject(&obj)) return false;
2455 hidden_symbol_ = String::cast(obj);
2457 // Allocate the foreign for __proto__.
2458 { MaybeObject* maybe_obj =
2459 AllocateForeign((Address) &Accessors::ObjectPrototype);
2460 if (!maybe_obj->ToObject(&obj)) return false;
2462 set_prototype_accessors(Foreign::cast(obj));
2464 // Allocate the code_stubs dictionary. The initial size is set to avoid
2465 // expanding the dictionary during bootstrapping.
2466 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2467 if (!maybe_obj->ToObject(&obj)) return false;
2469 set_code_stubs(UnseededNumberDictionary::cast(obj));
2472 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2473 // is set to avoid expanding the dictionary during bootstrapping.
2474 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2475 if (!maybe_obj->ToObject(&obj)) return false;
2477 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2479 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2480 if (!maybe_obj->ToObject(&obj)) return false;
2482 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2484 set_instanceof_cache_function(Smi::FromInt(0));
2485 set_instanceof_cache_map(Smi::FromInt(0));
2486 set_instanceof_cache_answer(Smi::FromInt(0));
2490 // Allocate the dictionary of intrinsic function names.
2491 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2492 if (!maybe_obj->ToObject(&obj)) return false;
2494 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2496 if (!maybe_obj->ToObject(&obj)) return false;
2498 set_intrinsic_function_names(StringDictionary::cast(obj));
2500 if (InitializeNumberStringCache()->IsFailure()) return false;
2502 // Allocate cache for single character ASCII strings.
2503 { MaybeObject* maybe_obj =
2504 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2505 if (!maybe_obj->ToObject(&obj)) return false;
2507 set_single_character_string_cache(FixedArray::cast(obj));
2509 // Allocate cache for string split.
2510 { MaybeObject* maybe_obj =
2511 AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
2512 if (!maybe_obj->ToObject(&obj)) return false;
2514 set_string_split_cache(FixedArray::cast(obj));
2516 // Allocate cache for external strings pointing to native source code.
2517 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2518 if (!maybe_obj->ToObject(&obj)) return false;
2520 set_natives_source_cache(FixedArray::cast(obj));
2522 // Handling of script id generation is in FACTORY->NewScript.
2523 set_last_script_id(undefined_value());
2525 // Initialize keyed lookup cache.
2526 isolate_->keyed_lookup_cache()->Clear();
2528 // Initialize context slot cache.
2529 isolate_->context_slot_cache()->Clear();
2531 // Initialize descriptor cache.
2532 isolate_->descriptor_lookup_cache()->Clear();
2534 // Initialize compilation cache.
2535 isolate_->compilation_cache()->Clear();
2541 Object* StringSplitCache::Lookup(
2542 FixedArray* cache, String* string, String* pattern) {
2543 if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
2544 uint32_t hash = string->Hash();
2545 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2546 ~(kArrayEntriesPerCacheEntry - 1));
2547 if (cache->get(index + kStringOffset) == string &&
2548 cache->get(index + kPatternOffset) == pattern) {
2549 return cache->get(index + kArrayOffset);
2551 index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2552 if (cache->get(index + kStringOffset) == string &&
2553 cache->get(index + kPatternOffset) == pattern) {
2554 return cache->get(index + kArrayOffset);
2556 return Smi::FromInt(0);
2560 void StringSplitCache::Enter(Heap* heap,
2564 FixedArray* array) {
2565 if (!string->IsSymbol() || !pattern->IsSymbol()) return;
2566 uint32_t hash = string->Hash();
2567 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2568 ~(kArrayEntriesPerCacheEntry - 1));
2569 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2570 cache->set(index + kStringOffset, string);
2571 cache->set(index + kPatternOffset, pattern);
2572 cache->set(index + kArrayOffset, array);
2575 ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2576 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2577 cache->set(index2 + kStringOffset, string);
2578 cache->set(index2 + kPatternOffset, pattern);
2579 cache->set(index2 + kArrayOffset, array);
2581 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2582 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2583 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2584 cache->set(index + kStringOffset, string);
2585 cache->set(index + kPatternOffset, pattern);
2586 cache->set(index + kArrayOffset, array);
2589 if (array->length() < 100) { // Limit how many new symbols we want to make.
2590 for (int i = 0; i < array->length(); i++) {
2591 String* str = String::cast(array->get(i));
2593 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2594 if (maybe_symbol->ToObject(&symbol)) {
2595 array->set(i, symbol);
2599 array->set_map_no_write_barrier(heap->fixed_cow_array_map());
2603 void StringSplitCache::Clear(FixedArray* cache) {
2604 for (int i = 0; i < kStringSplitCacheSize; i++) {
2605 cache->set(i, Smi::FromInt(0));
2610 MaybeObject* Heap::InitializeNumberStringCache() {
2611 // Compute the size of the number string cache based on the max heap size.
2612 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2613 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2614 int number_string_cache_size = max_semispace_size_ / 512;
2615 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
2617 MaybeObject* maybe_obj =
2618 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2619 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2624 void Heap::FlushNumberStringCache() {
2625 // Flush the number to string cache.
2626 int len = number_string_cache()->length();
2627 for (int i = 0; i < len; i++) {
2628 number_string_cache()->set_undefined(this, i);
2633 static inline int double_get_hash(double d) {
2634 DoubleRepresentation rep(d);
2635 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2639 static inline int smi_get_hash(Smi* smi) {
2640 return smi->value();
2644 Object* Heap::GetNumberStringCache(Object* number) {
2646 int mask = (number_string_cache()->length() >> 1) - 1;
2647 if (number->IsSmi()) {
2648 hash = smi_get_hash(Smi::cast(number)) & mask;
2650 hash = double_get_hash(number->Number()) & mask;
2652 Object* key = number_string_cache()->get(hash * 2);
2653 if (key == number) {
2654 return String::cast(number_string_cache()->get(hash * 2 + 1));
2655 } else if (key->IsHeapNumber() &&
2656 number->IsHeapNumber() &&
2657 key->Number() == number->Number()) {
2658 return String::cast(number_string_cache()->get(hash * 2 + 1));
2660 return undefined_value();
2664 void Heap::SetNumberStringCache(Object* number, String* string) {
2666 int mask = (number_string_cache()->length() >> 1) - 1;
2667 if (number->IsSmi()) {
2668 hash = smi_get_hash(Smi::cast(number)) & mask;
2669 number_string_cache()->set(hash * 2, Smi::cast(number));
2671 hash = double_get_hash(number->Number()) & mask;
2672 number_string_cache()->set(hash * 2, number);
2674 number_string_cache()->set(hash * 2 + 1, string);
2678 MaybeObject* Heap::NumberToString(Object* number,
2679 bool check_number_string_cache) {
2680 isolate_->counters()->number_to_string_runtime()->Increment();
2681 if (check_number_string_cache) {
2682 Object* cached = GetNumberStringCache(number);
2683 if (cached != undefined_value()) {
2689 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2691 if (number->IsSmi()) {
2692 int num = Smi::cast(number)->value();
2693 str = IntToCString(num, buffer);
2695 double num = HeapNumber::cast(number)->value();
2696 str = DoubleToCString(num, buffer);
2700 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2701 if (maybe_js_string->ToObject(&js_string)) {
2702 SetNumberStringCache(number, String::cast(js_string));
2704 return maybe_js_string;
2708 MaybeObject* Heap::Uint32ToString(uint32_t value,
2709 bool check_number_string_cache) {
2711 MaybeObject* maybe = NumberFromUint32(value);
2712 if (!maybe->To<Object>(&number)) return maybe;
2713 return NumberToString(number, check_number_string_cache);
2717 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2718 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2722 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2723 ExternalArrayType array_type) {
2724 switch (array_type) {
2725 case kExternalByteArray:
2726 return kExternalByteArrayMapRootIndex;
2727 case kExternalUnsignedByteArray:
2728 return kExternalUnsignedByteArrayMapRootIndex;
2729 case kExternalShortArray:
2730 return kExternalShortArrayMapRootIndex;
2731 case kExternalUnsignedShortArray:
2732 return kExternalUnsignedShortArrayMapRootIndex;
2733 case kExternalIntArray:
2734 return kExternalIntArrayMapRootIndex;
2735 case kExternalUnsignedIntArray:
2736 return kExternalUnsignedIntArrayMapRootIndex;
2737 case kExternalFloatArray:
2738 return kExternalFloatArrayMapRootIndex;
2739 case kExternalDoubleArray:
2740 return kExternalDoubleArrayMapRootIndex;
2741 case kExternalPixelArray:
2742 return kExternalPixelArrayMapRootIndex;
2745 return kUndefinedValueRootIndex;
2750 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
2751 // We need to distinguish the minus zero value and this cannot be
2752 // done after conversion to int. Doing this by comparing bit
2753 // patterns is faster than using fpclassify() et al.
2754 static const DoubleRepresentation minus_zero(-0.0);
2756 DoubleRepresentation rep(value);
2757 if (rep.bits == minus_zero.bits) {
2758 return AllocateHeapNumber(-0.0, pretenure);
2761 int int_value = FastD2I(value);
2762 if (value == int_value && Smi::IsValid(int_value)) {
2763 return Smi::FromInt(int_value);
2766 // Materialize the value in the heap.
2767 return AllocateHeapNumber(value, pretenure);
2771 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2772 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2773 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
2774 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2776 MaybeObject* maybe_result = Allocate(foreign_map(), space);
2777 if (!maybe_result->To(&result)) return maybe_result;
2778 result->set_foreign_address(address);
2783 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2784 SharedFunctionInfo* share;
2785 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2786 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
2788 // Set pointer fields.
2789 share->set_name(name);
2790 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
2791 share->set_code(illegal);
2792 share->set_scope_info(ScopeInfo::Empty());
2793 Code* construct_stub =
2794 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
2795 share->set_construct_stub(construct_stub);
2796 share->set_instance_class_name(Object_symbol());
2797 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
2798 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
2799 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
2800 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
2801 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
2802 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
2803 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2805 // Set integer fields (smi or int, depending on the architecture).
2806 share->set_length(0);
2807 share->set_formal_parameter_count(0);
2808 share->set_expected_nof_properties(0);
2809 share->set_num_literals(0);
2810 share->set_start_position_and_type(0);
2811 share->set_end_position(0);
2812 share->set_function_token_position(0);
2813 // All compiler hints default to false or 0.
2814 share->set_compiler_hints(0);
2815 share->set_this_property_assignments_count(0);
2816 share->set_opt_count(0);
2822 MaybeObject* Heap::AllocateJSMessageObject(String* type,
2827 Object* stack_trace,
2828 Object* stack_frames) {
2830 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2831 if (!maybe_result->ToObject(&result)) return maybe_result;
2833 JSMessageObject* message = JSMessageObject::cast(result);
2834 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
2835 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
2836 message->set_type(type);
2837 message->set_arguments(arguments);
2838 message->set_start_position(start_position);
2839 message->set_end_position(end_position);
2840 message->set_script(script);
2841 message->set_stack_trace(stack_trace);
2842 message->set_stack_frames(stack_frames);
2848 // Returns true for a character in a range. Both limits are inclusive.
2849 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2850 // This makes uses of the the unsigned wraparound.
2851 return character - from <= to - from;
2855 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2860 // Numeric strings have a different hash algorithm not known by
2861 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2862 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2863 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2865 // Now we know the length is 2, we might as well make use of that fact
2866 // when building the new string.
2867 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2868 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
2870 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
2871 if (!maybe_result->ToObject(&result)) return maybe_result;
2873 char* dest = SeqAsciiString::cast(result)->GetChars();
2879 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
2880 if (!maybe_result->ToObject(&result)) return maybe_result;
2882 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2890 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
2891 int first_length = first->length();
2892 if (first_length == 0) {
2896 int second_length = second->length();
2897 if (second_length == 0) {
2901 int length = first_length + second_length;
2903 // Optimization for 2-byte strings often used as keys in a decompression
2904 // dictionary. Check whether we already have the string in the symbol
2905 // table to prevent creation of many unneccesary strings.
2907 unsigned c1 = first->Get(0);
2908 unsigned c2 = second->Get(0);
2909 return MakeOrFindTwoCharacterString(this, c1, c2);
2912 bool first_is_ascii = first->IsAsciiRepresentation();
2913 bool second_is_ascii = second->IsAsciiRepresentation();
2914 bool is_ascii = first_is_ascii && second_is_ascii;
2916 // Make sure that an out of memory exception is thrown if the length
2917 // of the new cons string is too large.
2918 if (length > String::kMaxLength || length < 0) {
2919 isolate()->context()->mark_out_of_memory();
2920 return Failure::OutOfMemoryException();
2923 bool is_ascii_data_in_two_byte_string = false;
2925 // At least one of the strings uses two-byte representation so we
2926 // can't use the fast case code for short ascii strings below, but
2927 // we can try to save memory if all chars actually fit in ascii.
2928 is_ascii_data_in_two_byte_string =
2929 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2930 if (is_ascii_data_in_two_byte_string) {
2931 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2935 // If the resulting string is small make a flat string.
2936 if (length < String::kMinNonFlatLength) {
2937 // Note that neither of the two inputs can be a slice because:
2938 STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
2939 ASSERT(first->IsFlat());
2940 ASSERT(second->IsFlat());
2943 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2944 if (!maybe_result->ToObject(&result)) return maybe_result;
2946 // Copy the characters into the new object.
2947 char* dest = SeqAsciiString::cast(result)->GetChars();
2950 if (first->IsExternalString()) {
2951 src = ExternalAsciiString::cast(first)->GetChars();
2953 src = SeqAsciiString::cast(first)->GetChars();
2955 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2956 // Copy second part.
2957 if (second->IsExternalString()) {
2958 src = ExternalAsciiString::cast(second)->GetChars();
2960 src = SeqAsciiString::cast(second)->GetChars();
2962 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2965 if (is_ascii_data_in_two_byte_string) {
2967 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2968 if (!maybe_result->ToObject(&result)) return maybe_result;
2970 // Copy the characters into the new object.
2971 char* dest = SeqAsciiString::cast(result)->GetChars();
2972 String::WriteToFlat(first, dest, 0, first_length);
2973 String::WriteToFlat(second, dest + first_length, 0, second_length);
2974 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2979 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2980 if (!maybe_result->ToObject(&result)) return maybe_result;
2982 // Copy the characters into the new object.
2983 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2984 String::WriteToFlat(first, dest, 0, first_length);
2985 String::WriteToFlat(second, dest + first_length, 0, second_length);
2990 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2991 cons_ascii_string_map() : cons_string_map();
2994 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2995 if (!maybe_result->ToObject(&result)) return maybe_result;
2998 AssertNoAllocation no_gc;
2999 ConsString* cons_string = ConsString::cast(result);
3000 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3001 cons_string->set_length(length);
3002 cons_string->set_hash_field(String::kEmptyHashField);
3003 cons_string->set_first(first, mode);
3004 cons_string->set_second(second, mode);
3009 MaybeObject* Heap::AllocateSubString(String* buffer,
3012 PretenureFlag pretenure) {
3013 int length = end - start;
3015 return empty_string();
3016 } else if (length == 1) {
3017 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3018 } else if (length == 2) {
3019 // Optimization for 2-byte strings often used as keys in a decompression
3020 // dictionary. Check whether we already have the string in the symbol
3021 // table to prevent creation of many unneccesary strings.
3022 unsigned c1 = buffer->Get(start);
3023 unsigned c2 = buffer->Get(start + 1);
3024 return MakeOrFindTwoCharacterString(this, c1, c2);
3027 // Make an attempt to flatten the buffer to reduce access time.
3028 buffer = buffer->TryFlattenGetString();
3030 if (!FLAG_string_slices ||
3031 !buffer->IsFlat() ||
3032 length < SlicedString::kMinLength ||
3033 pretenure == TENURED) {
3035 // WriteToFlat takes care of the case when an indirect string has a
3036 // different encoding from its underlying string. These encodings may
3037 // differ because of externalization.
3038 bool is_ascii = buffer->IsAsciiRepresentation();
3039 { MaybeObject* maybe_result = is_ascii
3040 ? AllocateRawAsciiString(length, pretenure)
3041 : AllocateRawTwoByteString(length, pretenure);
3042 if (!maybe_result->ToObject(&result)) return maybe_result;
3044 String* string_result = String::cast(result);
3045 // Copy the characters into the new object.
3047 ASSERT(string_result->IsAsciiRepresentation());
3048 char* dest = SeqAsciiString::cast(string_result)->GetChars();
3049 String::WriteToFlat(buffer, dest, start, end);
3051 ASSERT(string_result->IsTwoByteRepresentation());
3052 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3053 String::WriteToFlat(buffer, dest, start, end);
3058 ASSERT(buffer->IsFlat());
3060 if (FLAG_verify_heap) {
3061 buffer->StringVerify();
3066 // When slicing an indirect string we use its encoding for a newly created
3067 // slice and don't check the encoding of the underlying string. This is safe
3068 // even if the encodings are different because of externalization. If an
3069 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3070 // codes of the underlying string must still fit into ASCII (because
3071 // externalization must not change char codes).
3072 { Map* map = buffer->IsAsciiRepresentation()
3073 ? sliced_ascii_string_map()
3074 : sliced_string_map();
3075 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3076 if (!maybe_result->ToObject(&result)) return maybe_result;
3079 AssertNoAllocation no_gc;
3080 SlicedString* sliced_string = SlicedString::cast(result);
3081 sliced_string->set_length(length);
3082 sliced_string->set_hash_field(String::kEmptyHashField);
3083 if (buffer->IsConsString()) {
3084 ConsString* cons = ConsString::cast(buffer);
3085 ASSERT(cons->second()->length() == 0);
3086 sliced_string->set_parent(cons->first());
3087 sliced_string->set_offset(start);
3088 } else if (buffer->IsSlicedString()) {
3089 // Prevent nesting sliced strings.
3090 SlicedString* parent_slice = SlicedString::cast(buffer);
3091 sliced_string->set_parent(parent_slice->parent());
3092 sliced_string->set_offset(start + parent_slice->offset());
3094 sliced_string->set_parent(buffer);
3095 sliced_string->set_offset(start);
3097 ASSERT(sliced_string->parent()->IsSeqString() ||
3098 sliced_string->parent()->IsExternalString());
3103 MaybeObject* Heap::AllocateExternalStringFromAscii(
3104 const ExternalAsciiString::Resource* resource) {
3105 size_t length = resource->length();
3106 if (length > static_cast<size_t>(String::kMaxLength)) {
3107 isolate()->context()->mark_out_of_memory();
3108 return Failure::OutOfMemoryException();
3111 Map* map = external_ascii_string_map();
3113 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3114 if (!maybe_result->ToObject(&result)) return maybe_result;
3117 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3118 external_string->set_length(static_cast<int>(length));
3119 external_string->set_hash_field(String::kEmptyHashField);
3120 external_string->set_resource(resource);
3126 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3127 const ExternalTwoByteString::Resource* resource) {
3128 size_t length = resource->length();
3129 if (length > static_cast<size_t>(String::kMaxLength)) {
3130 isolate()->context()->mark_out_of_memory();
3131 return Failure::OutOfMemoryException();
3134 // For small strings we check whether the resource contains only
3135 // ASCII characters. If yes, we use a different string map.
3136 static const size_t kAsciiCheckLengthLimit = 32;
3137 bool is_ascii = length <= kAsciiCheckLengthLimit &&
3138 String::IsAscii(resource->data(), static_cast<int>(length));
3139 Map* map = is_ascii ?
3140 external_string_with_ascii_data_map() : external_string_map();
3142 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3143 if (!maybe_result->ToObject(&result)) return maybe_result;
3146 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3147 external_string->set_length(static_cast<int>(length));
3148 external_string->set_hash_field(String::kEmptyHashField);
3149 external_string->set_resource(resource);
3155 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3156 if (code <= String::kMaxAsciiCharCode) {
3157 Object* value = single_character_string_cache()->get(code);
3158 if (value != undefined_value()) return value;
3161 buffer[0] = static_cast<char>(code);
3163 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3165 if (!maybe_result->ToObject(&result)) return maybe_result;
3166 single_character_string_cache()->set(code, result);
3171 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3172 if (!maybe_result->ToObject(&result)) return maybe_result;
3174 String* answer = String::cast(result);
3175 answer->Set(0, code);
3180 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3181 if (length < 0 || length > ByteArray::kMaxLength) {
3182 return Failure::OutOfMemoryException();
3184 if (pretenure == NOT_TENURED) {
3185 return AllocateByteArray(length);
3187 int size = ByteArray::SizeFor(length);
3189 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
3190 ? old_data_space_->AllocateRaw(size)
3191 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3192 if (!maybe_result->ToObject(&result)) return maybe_result;
3195 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3197 reinterpret_cast<ByteArray*>(result)->set_length(length);
3202 MaybeObject* Heap::AllocateByteArray(int length) {
3203 if (length < 0 || length > ByteArray::kMaxLength) {
3204 return Failure::OutOfMemoryException();
3206 int size = ByteArray::SizeFor(length);
3207 AllocationSpace space =
3208 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
3210 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3211 if (!maybe_result->ToObject(&result)) return maybe_result;
3214 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3216 reinterpret_cast<ByteArray*>(result)->set_length(length);
3221 void Heap::CreateFillerObjectAt(Address addr, int size) {
3222 if (size == 0) return;
3223 HeapObject* filler = HeapObject::FromAddress(addr);
3224 if (size == kPointerSize) {
3225 filler->set_map_no_write_barrier(one_pointer_filler_map());
3226 } else if (size == 2 * kPointerSize) {
3227 filler->set_map_no_write_barrier(two_pointer_filler_map());
3229 filler->set_map_no_write_barrier(free_space_map());
3230 FreeSpace::cast(filler)->set_size(size);
3235 MaybeObject* Heap::AllocateExternalArray(int length,
3236 ExternalArrayType array_type,
3237 void* external_pointer,
3238 PretenureFlag pretenure) {
3239 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3241 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3244 if (!maybe_result->ToObject(&result)) return maybe_result;
3247 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3248 MapForExternalArrayType(array_type));
3249 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3250 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3257 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3259 Handle<Object> self_reference,
3261 // Allocate ByteArray before the Code object, so that we do not risk
3262 // leaving uninitialized Code object (and breaking the heap).
3263 ByteArray* reloc_info;
3264 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3265 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3268 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3269 int obj_size = Code::SizeFor(body_size);
3270 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3271 MaybeObject* maybe_result;
3272 // Large code objects and code objects which should stay at a fixed address
3273 // are allocated in large object space.
3274 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
3275 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3277 maybe_result = code_space_->AllocateRaw(obj_size);
3281 if (!maybe_result->ToObject(&result)) return maybe_result;
3283 // Initialize the object
3284 HeapObject::cast(result)->set_map_no_write_barrier(code_map());
3285 Code* code = Code::cast(result);
3286 ASSERT(!isolate_->code_range()->exists() ||
3287 isolate_->code_range()->contains(code->address()));
3288 code->set_instruction_size(desc.instr_size);
3289 code->set_relocation_info(reloc_info);
3290 code->set_flags(flags);
3291 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3292 code->set_check_type(RECEIVER_MAP_CHECK);
3294 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3295 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3296 code->set_next_code_flushing_candidate(undefined_value());
3297 // Allow self references to created code object by patching the handle to
3298 // point to the newly allocated Code object.
3299 if (!self_reference.is_null()) {
3300 *(self_reference.location()) = code;
3302 // Migrate generated code.
3303 // The generated code can contain Object** values (typically from handles)
3304 // that are dereferenced during the copy to point directly to the actual heap
3305 // objects. These pointers can include references to the code object itself,
3306 // through the self_reference parameter.
3307 code->CopyFrom(desc);
3310 if (FLAG_verify_heap) {
3318 MaybeObject* Heap::CopyCode(Code* code) {
3319 // Allocate an object the same size as the code object.
3320 int obj_size = code->Size();
3321 MaybeObject* maybe_result;
3322 if (obj_size > MaxObjectSizeInPagedSpace()) {
3323 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3325 maybe_result = code_space_->AllocateRaw(obj_size);
3329 if (!maybe_result->ToObject(&result)) return maybe_result;
3331 // Copy code object.
3332 Address old_addr = code->address();
3333 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3334 CopyBlock(new_addr, old_addr, obj_size);
3335 // Relocate the copy.
3336 Code* new_code = Code::cast(result);
3337 ASSERT(!isolate_->code_range()->exists() ||
3338 isolate_->code_range()->contains(code->address()));
3339 new_code->Relocate(new_addr - old_addr);
3344 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3345 // Allocate ByteArray before the Code object, so that we do not risk
3346 // leaving uninitialized Code object (and breaking the heap).
3347 Object* reloc_info_array;
3348 { MaybeObject* maybe_reloc_info_array =
3349 AllocateByteArray(reloc_info.length(), TENURED);
3350 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3351 return maybe_reloc_info_array;
3355 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3357 int new_obj_size = Code::SizeFor(new_body_size);
3359 Address old_addr = code->address();
3361 size_t relocation_offset =
3362 static_cast<size_t>(code->instruction_end() - old_addr);
3364 MaybeObject* maybe_result;
3365 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
3366 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3368 maybe_result = code_space_->AllocateRaw(new_obj_size);
3372 if (!maybe_result->ToObject(&result)) return maybe_result;
3374 // Copy code object.
3375 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3377 // Copy header and instructions.
3378 memcpy(new_addr, old_addr, relocation_offset);
3380 Code* new_code = Code::cast(result);
3381 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3383 // Copy patched rinfo.
3384 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3386 // Relocate the copy.
3387 ASSERT(!isolate_->code_range()->exists() ||
3388 isolate_->code_range()->contains(code->address()));
3389 new_code->Relocate(new_addr - old_addr);
3392 if (FLAG_verify_heap) {
3400 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3401 ASSERT(gc_state_ == NOT_IN_GC);
3402 ASSERT(map->instance_type() != MAP_TYPE);
3403 // If allocation failures are disallowed, we may allocate in a different
3404 // space when new space is full and the object is not a large object.
3405 AllocationSpace retry_space =
3406 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3408 { MaybeObject* maybe_result =
3409 AllocateRaw(map->instance_size(), space, retry_space);
3410 if (!maybe_result->ToObject(&result)) return maybe_result;
3412 // No need for write barrier since object is white and map is in old space.
3413 HeapObject::cast(result)->set_map_no_write_barrier(map);
3418 void Heap::InitializeFunction(JSFunction* function,
3419 SharedFunctionInfo* shared,
3420 Object* prototype) {
3421 ASSERT(!prototype->IsMap());
3422 function->initialize_properties();
3423 function->initialize_elements();
3424 function->set_shared(shared);
3425 function->set_code(shared->code());
3426 function->set_prototype_or_initial_map(prototype);
3427 function->set_context(undefined_value());
3428 function->set_literals_or_bindings(empty_fixed_array());
3429 function->set_next_function_link(undefined_value());
3433 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3434 // Allocate the prototype. Make sure to use the object function
3435 // from the function's context, since the function can be from a
3436 // different context.
3437 JSFunction* object_function =
3438 function->context()->global_context()->object_function();
3440 // Each function prototype gets a copy of the object function map.
3441 // This avoid unwanted sharing of maps between prototypes of different
3444 ASSERT(object_function->has_initial_map());
3445 { MaybeObject* maybe_map =
3446 object_function->initial_map()->CopyDropTransitions();
3447 if (!maybe_map->To<Map>(&new_map)) return maybe_map;
3450 { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3451 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3453 // When creating the prototype for the function we must set its
3454 // constructor to the function.
3456 { MaybeObject* maybe_result =
3457 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3458 constructor_symbol(), function, DONT_ENUM);
3459 if (!maybe_result->ToObject(&result)) return maybe_result;
3465 MaybeObject* Heap::AllocateFunction(Map* function_map,
3466 SharedFunctionInfo* shared,
3468 PretenureFlag pretenure) {
3469 AllocationSpace space =
3470 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3472 { MaybeObject* maybe_result = Allocate(function_map, space);
3473 if (!maybe_result->ToObject(&result)) return maybe_result;
3475 InitializeFunction(JSFunction::cast(result), shared, prototype);
3480 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3481 // To get fast allocation and map sharing for arguments objects we
3482 // allocate them based on an arguments boilerplate.
3484 JSObject* boilerplate;
3485 int arguments_object_size;
3486 bool strict_mode_callee = callee->IsJSFunction() &&
3487 !JSFunction::cast(callee)->shared()->is_classic_mode();
3488 if (strict_mode_callee) {
3490 isolate()->context()->global_context()->
3491 strict_mode_arguments_boilerplate();
3492 arguments_object_size = kArgumentsObjectSizeStrict;
3495 isolate()->context()->global_context()->arguments_boilerplate();
3496 arguments_object_size = kArgumentsObjectSize;
3499 // This calls Copy directly rather than using Heap::AllocateRaw so we
3500 // duplicate the check here.
3501 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3503 // Check that the size of the boilerplate matches our
3504 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3505 // on the size being a known constant.
3506 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3508 // Do the allocation.
3510 { MaybeObject* maybe_result =
3511 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3512 if (!maybe_result->ToObject(&result)) return maybe_result;
3515 // Copy the content. The arguments boilerplate doesn't have any
3516 // fields that point to new space so it's safe to skip the write
3518 CopyBlock(HeapObject::cast(result)->address(),
3519 boilerplate->address(),
3520 JSObject::kHeaderSize);
3522 // Set the length property.
3523 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3524 Smi::FromInt(length),
3525 SKIP_WRITE_BARRIER);
3526 // Set the callee property for non-strict mode arguments object only.
3527 if (!strict_mode_callee) {
3528 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3532 // Check the state of the object
3533 ASSERT(JSObject::cast(result)->HasFastProperties());
3534 ASSERT(JSObject::cast(result)->HasFastElements());
3540 static bool HasDuplicates(DescriptorArray* descriptors) {
3541 int count = descriptors->number_of_descriptors();
3543 String* prev_key = descriptors->GetKey(0);
3544 for (int i = 1; i != count; i++) {
3545 String* current_key = descriptors->GetKey(i);
3546 if (prev_key == current_key) return true;
3547 prev_key = current_key;
3554 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3555 ASSERT(!fun->has_initial_map());
3557 // First create a new map with the size and number of in-object properties
3558 // suggested by the function.
3559 int instance_size = fun->shared()->CalculateInstanceSize();
3560 int in_object_properties = fun->shared()->CalculateInObjectProperties();
3562 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
3563 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3566 // Fetch or allocate prototype.
3568 if (fun->has_instance_prototype()) {
3569 prototype = fun->instance_prototype();
3571 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3572 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3575 Map* map = Map::cast(map_obj);
3576 map->set_inobject_properties(in_object_properties);
3577 map->set_unused_property_fields(in_object_properties);
3578 map->set_prototype(prototype);
3579 ASSERT(map->has_fast_elements());
3581 // If the function has only simple this property assignments add
3582 // field descriptors for these to the initial map as the object
3583 // cannot be constructed without having these properties. Guard by
3584 // the inline_new flag so we only change the map if we generate a
3585 // specialized construct stub.
3586 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3587 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3588 int count = fun->shared()->this_property_assignments_count();
3589 if (count > in_object_properties) {
3590 // Inline constructor can only handle inobject properties.
3591 fun->shared()->ForbidInlineConstructor();
3593 DescriptorArray* descriptors;
3594 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3595 if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
3596 return maybe_descriptors_obj;
3599 DescriptorArray::WhitenessWitness witness(descriptors);
3600 for (int i = 0; i < count; i++) {
3601 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3602 ASSERT(name->IsSymbol());
3603 FieldDescriptor field(name, i, NONE);
3604 field.SetEnumerationIndex(i);
3605 descriptors->Set(i, &field, witness);
3607 descriptors->SetNextEnumerationIndex(count);
3608 descriptors->SortUnchecked(witness);
3610 // The descriptors may contain duplicates because the compiler does not
3611 // guarantee the uniqueness of property names (it would have required
3612 // quadratic time). Once the descriptors are sorted we can check for
3613 // duplicates in linear time.
3614 if (HasDuplicates(descriptors)) {
3615 fun->shared()->ForbidInlineConstructor();
3617 map->set_instance_descriptors(descriptors);
3618 map->set_pre_allocated_property_fields(count);
3619 map->set_unused_property_fields(in_object_properties - count);
3624 fun->shared()->StartInobjectSlackTracking(map);
3630 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3631 FixedArray* properties,
3633 obj->set_properties(properties);
3634 obj->initialize_elements();
3635 // TODO(1240798): Initialize the object's body using valid initial values
3636 // according to the object's initial map. For example, if the map's
3637 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3638 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3639 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3640 // verification code has to cope with (temporarily) invalid objects. See
3641 // for example, JSArray::JSArrayVerify).
3643 // We cannot always fill with one_pointer_filler_map because objects
3644 // created from API functions expect their internal fields to be initialized
3645 // with undefined_value.
3646 // Pre-allocated fields need to be initialized with undefined_value as well
3647 // so that object accesses before the constructor completes (e.g. in the
3648 // debugger) will not cause a crash.
3649 if (map->constructor()->IsJSFunction() &&
3650 JSFunction::cast(map->constructor())->shared()->
3651 IsInobjectSlackTrackingInProgress()) {
3652 // We might want to shrink the object later.
3653 ASSERT(obj->GetInternalFieldCount() == 0);
3654 filler = Heap::one_pointer_filler_map();
3656 filler = Heap::undefined_value();
3658 obj->InitializeBody(map, Heap::undefined_value(), filler);
3662 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3663 // JSFunctions should be allocated using AllocateFunction to be
3664 // properly initialized.
3665 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3667 // Both types of global objects should be allocated using
3668 // AllocateGlobalObject to be properly initialized.
3669 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3670 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3672 // Allocate the backing storage for the properties.
3674 map->pre_allocated_property_fields() +
3675 map->unused_property_fields() -
3676 map->inobject_properties();
3677 ASSERT(prop_size >= 0);
3679 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3680 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3683 // Allocate the JSObject.
3684 AllocationSpace space =
3685 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3686 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3688 { MaybeObject* maybe_obj = Allocate(map, space);
3689 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3692 // Initialize the JSObject.
3693 InitializeJSObjectFromMap(JSObject::cast(obj),
3694 FixedArray::cast(properties),
3696 ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
3697 JSObject::cast(obj)->HasFastElements());
3702 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3703 PretenureFlag pretenure) {
3704 // Allocate the initial map if absent.
3705 if (!constructor->has_initial_map()) {
3706 Object* initial_map;
3707 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3708 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3710 constructor->set_initial_map(Map::cast(initial_map));
3711 Map::cast(initial_map)->set_constructor(constructor);
3713 // Allocate the object based on the constructors initial map.
3714 MaybeObject* result =
3715 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
3717 // Make sure result is NOT a global object if valid.
3718 Object* non_failure;
3719 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3725 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3727 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3728 // maps. Will probably depend on the identity of the handler object, too.
3730 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3731 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3732 map->set_prototype(prototype);
3734 // Allocate the proxy object.
3736 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3737 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
3738 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3739 result->set_handler(handler);
3740 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
3745 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
3747 Object* construct_trap,
3748 Object* prototype) {
3750 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3751 // maps. Will probably depend on the identity of the handler object, too.
3753 MaybeObject* maybe_map_obj =
3754 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
3755 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3756 map->set_prototype(prototype);
3758 // Allocate the proxy object.
3759 JSFunctionProxy* result;
3760 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3761 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
3762 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3763 result->set_handler(handler);
3764 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
3765 result->set_call_trap(call_trap);
3766 result->set_construct_trap(construct_trap);
3771 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
3772 ASSERT(constructor->has_initial_map());
3773 Map* map = constructor->initial_map();
3775 // Make sure no field properties are described in the initial map.
3776 // This guarantees us that normalizing the properties does not
3777 // require us to change property values to JSGlobalPropertyCells.
3778 ASSERT(map->NextFreePropertyIndex() == 0);
3780 // Make sure we don't have a ton of pre-allocated slots in the
3781 // global objects. They will be unused once we normalize the object.
3782 ASSERT(map->unused_property_fields() == 0);
3783 ASSERT(map->inobject_properties() == 0);
3785 // Initial size of the backing store to avoid resize of the storage during
3786 // bootstrapping. The size differs between the JS global object ad the
3788 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3790 // Allocate a dictionary object for backing storage.
3792 { MaybeObject* maybe_obj =
3793 StringDictionary::Allocate(
3794 map->NumberOfDescribedProperties() * 2 + initial_size);
3795 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3797 StringDictionary* dictionary = StringDictionary::cast(obj);
3799 // The global object might be created from an object template with accessors.
3800 // Fill these accessors into the dictionary.
3801 DescriptorArray* descs = map->instance_descriptors();
3802 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3803 PropertyDetails details(descs->GetDetails(i));
3804 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3806 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3807 Object* value = descs->GetCallbacksObject(i);
3808 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
3809 if (!maybe_value->ToObject(&value)) return maybe_value;
3813 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3814 if (!maybe_result->ToObject(&result)) return maybe_result;
3816 dictionary = StringDictionary::cast(result);
3819 // Allocate the global object and initialize it with the backing store.
3820 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3821 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3823 JSObject* global = JSObject::cast(obj);
3824 InitializeJSObjectFromMap(global, dictionary, map);
3826 // Create a new map for the global object.
3827 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3828 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3830 Map* new_map = Map::cast(obj);
3832 // Set up the global object as a normalized object.
3833 global->set_map(new_map);
3834 global->map()->clear_instance_descriptors();
3835 global->set_properties(dictionary);
3837 // Make sure result is a global object with properties in dictionary.
3838 ASSERT(global->IsGlobalObject());
3839 ASSERT(!global->HasFastProperties());
3844 MaybeObject* Heap::CopyJSObject(JSObject* source) {
3845 // Never used to copy functions. If functions need to be copied we
3846 // have to be careful to clear the literals array.
3847 SLOW_ASSERT(!source->IsJSFunction());
3850 Map* map = source->map();
3851 int object_size = map->instance_size();
3854 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3856 // If we're forced to always allocate, we use the general allocation
3857 // functions which may leave us with an object in old space.
3858 if (always_allocate()) {
3859 { MaybeObject* maybe_clone =
3860 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3861 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3863 Address clone_address = HeapObject::cast(clone)->address();
3864 CopyBlock(clone_address,
3867 // Update write barrier for all fields that lie beyond the header.
3868 RecordWrites(clone_address,
3869 JSObject::kHeaderSize,
3870 (object_size - JSObject::kHeaderSize) / kPointerSize);
3872 wb_mode = SKIP_WRITE_BARRIER;
3873 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3874 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3876 SLOW_ASSERT(InNewSpace(clone));
3877 // Since we know the clone is allocated in new space, we can copy
3878 // the contents without worrying about updating the write barrier.
3879 CopyBlock(HeapObject::cast(clone)->address(),
3885 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3886 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3887 FixedArray* properties = FixedArray::cast(source->properties());
3888 // Update elements if necessary.
3889 if (elements->length() > 0) {
3891 { MaybeObject* maybe_elem;
3892 if (elements->map() == fixed_cow_array_map()) {
3893 maybe_elem = FixedArray::cast(elements);
3894 } else if (source->HasFastDoubleElements()) {
3895 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3897 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
3899 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3901 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
3903 // Update properties if necessary.
3904 if (properties->length() > 0) {
3906 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3907 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3909 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
3911 // Return the new clone.
3916 MaybeObject* Heap::ReinitializeJSReceiver(
3917 JSReceiver* object, InstanceType type, int size) {
3918 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
3920 // Allocate fresh map.
3921 // TODO(rossberg): Once we optimize proxies, cache these maps.
3923 MaybeObject* maybe = AllocateMap(type, size);
3924 if (!maybe->To<Map>(&map)) return maybe;
3926 // Check that the receiver has at least the size of the fresh object.
3927 int size_difference = object->map()->instance_size() - map->instance_size();
3928 ASSERT(size_difference >= 0);
3930 map->set_prototype(object->map()->prototype());
3932 // Allocate the backing storage for the properties.
3933 int prop_size = map->unused_property_fields() - map->inobject_properties();
3935 maybe = AllocateFixedArray(prop_size, TENURED);
3936 if (!maybe->ToObject(&properties)) return maybe;
3938 // Functions require some allocation, which might fail here.
3939 SharedFunctionInfo* shared = NULL;
3940 if (type == JS_FUNCTION_TYPE) {
3942 maybe = LookupAsciiSymbol("<freezing call trap>");
3943 if (!maybe->To<String>(&name)) return maybe;
3944 maybe = AllocateSharedFunctionInfo(name);
3945 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
3948 // Because of possible retries of this function after failure,
3949 // we must NOT fail after this point, where we have changed the type!
3951 // Reset the map for the object.
3952 object->set_map(map);
3953 JSObject* jsobj = JSObject::cast(object);
3955 // Reinitialize the object from the constructor map.
3956 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
3958 // Functions require some minimal initialization.
3959 if (type == JS_FUNCTION_TYPE) {
3960 map->set_function_with_prototype(true);
3961 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3962 JSFunction::cast(object)->set_context(
3963 isolate()->context()->global_context());
3966 // Put in filler if the new object is smaller than the old.
3967 if (size_difference > 0) {
3968 CreateFillerObjectAt(
3969 object->address() + map->instance_size(), size_difference);
3976 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3977 JSGlobalProxy* object) {
3978 ASSERT(constructor->has_initial_map());
3979 Map* map = constructor->initial_map();
3981 // Check that the already allocated object has the same size and type as
3982 // objects allocated using the constructor.
3983 ASSERT(map->instance_size() == object->map()->instance_size());
3984 ASSERT(map->instance_type() == object->map()->instance_type());
3986 // Allocate the backing storage for the properties.
3987 int prop_size = map->unused_property_fields() - map->inobject_properties();
3989 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3990 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3993 // Reset the map for the object.
3994 object->set_map(constructor->initial_map());
3996 // Reinitialize the object from the constructor map.
3997 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4002 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
4003 PretenureFlag pretenure) {
4004 if (string.length() == 1) {
4005 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4008 { MaybeObject* maybe_result =
4009 AllocateRawAsciiString(string.length(), pretenure);
4010 if (!maybe_result->ToObject(&result)) return maybe_result;
4013 // Copy the characters into the new object.
4014 SeqAsciiString* string_result = SeqAsciiString::cast(result);
4015 for (int i = 0; i < string.length(); i++) {
4016 string_result->SeqAsciiStringSet(i, string[i]);
4022 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4023 PretenureFlag pretenure) {
4024 // V8 only supports characters in the Basic Multilingual Plane.
4025 const uc32 kMaxSupportedChar = 0xFFFF;
4026 // Count the number of characters in the UTF-8 string and check if
4027 // it is an ASCII string.
4028 Access<UnicodeCache::Utf8Decoder>
4029 decoder(isolate_->unicode_cache()->utf8_decoder());
4030 decoder->Reset(string.start(), string.length());
4032 while (decoder->has_more()) {
4038 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4039 if (!maybe_result->ToObject(&result)) return maybe_result;
4042 // Convert and copy the characters into the new object.
4043 String* string_result = String::cast(result);
4044 decoder->Reset(string.start(), string.length());
4045 for (int i = 0; i < chars; i++) {
4046 uc32 r = decoder->GetNext();
4047 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
4048 string_result->Set(i, r);
4054 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4055 PretenureFlag pretenure) {
4056 // Check if the string is an ASCII string.
4057 MaybeObject* maybe_result;
4058 if (String::IsAscii(string.start(), string.length())) {
4059 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
4060 } else { // It's not an ASCII string.
4061 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
4064 if (!maybe_result->ToObject(&result)) return maybe_result;
4066 // Copy the characters into the new object, which may be either ASCII or
4068 String* string_result = String::cast(result);
4069 for (int i = 0; i < string.length(); i++) {
4070 string_result->Set(i, string[i]);
4076 Map* Heap::SymbolMapForString(String* string) {
4077 // If the string is in new space it cannot be used as a symbol.
4078 if (InNewSpace(string)) return NULL;
4080 // Find the corresponding symbol map for strings.
4081 switch (string->map()->instance_type()) {
4082 case STRING_TYPE: return symbol_map();
4083 case ASCII_STRING_TYPE: return ascii_symbol_map();
4084 case CONS_STRING_TYPE: return cons_symbol_map();
4085 case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4086 case EXTERNAL_STRING_TYPE: return external_symbol_map();
4087 case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4088 case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4089 return external_symbol_with_ascii_data_map();
4090 case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4091 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4092 return short_external_ascii_symbol_map();
4093 case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4094 return short_external_symbol_with_ascii_data_map();
4095 default: return NULL; // No match found.
4100 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4102 uint32_t hash_field) {
4104 // Ensure the chars matches the number of characters in the buffer.
4105 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
4106 // Determine whether the string is ascii.
4107 bool is_ascii = true;
4108 while (buffer->has_more()) {
4109 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4116 // Compute map and object size.
4121 if (chars > SeqAsciiString::kMaxLength) {
4122 return Failure::OutOfMemoryException();
4124 map = ascii_symbol_map();
4125 size = SeqAsciiString::SizeFor(chars);
4127 if (chars > SeqTwoByteString::kMaxLength) {
4128 return Failure::OutOfMemoryException();
4131 size = SeqTwoByteString::SizeFor(chars);
4136 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
4137 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4138 : old_data_space_->AllocateRaw(size);
4139 if (!maybe_result->ToObject(&result)) return maybe_result;
4142 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4143 // Set length and hash fields of the allocated string.
4144 String* answer = String::cast(result);
4145 answer->set_length(chars);
4146 answer->set_hash_field(hash_field);
4148 ASSERT_EQ(size, answer->Size());
4150 // Fill in the characters.
4151 for (int i = 0; i < chars; i++) {
4152 answer->Set(i, buffer->GetNext());
4158 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4159 if (length < 0 || length > SeqAsciiString::kMaxLength) {
4160 return Failure::OutOfMemoryException();
4163 int size = SeqAsciiString::SizeFor(length);
4164 ASSERT(size <= SeqAsciiString::kMaxSize);
4166 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4167 AllocationSpace retry_space = OLD_DATA_SPACE;
4169 if (space == NEW_SPACE) {
4170 if (size > kMaxObjectSizeInNewSpace) {
4171 // Allocate in large object space, retry space will be ignored.
4173 } else if (size > MaxObjectSizeInPagedSpace()) {
4174 // Allocate in new space, retry in large object space.
4175 retry_space = LO_SPACE;
4177 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4181 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4182 if (!maybe_result->ToObject(&result)) return maybe_result;
4185 // Partially initialize the object.
4186 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4187 String::cast(result)->set_length(length);
4188 String::cast(result)->set_hash_field(String::kEmptyHashField);
4189 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4194 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4195 PretenureFlag pretenure) {
4196 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4197 return Failure::OutOfMemoryException();
4199 int size = SeqTwoByteString::SizeFor(length);
4200 ASSERT(size <= SeqTwoByteString::kMaxSize);
4201 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4202 AllocationSpace retry_space = OLD_DATA_SPACE;
4204 if (space == NEW_SPACE) {
4205 if (size > kMaxObjectSizeInNewSpace) {
4206 // Allocate in large object space, retry space will be ignored.
4208 } else if (size > MaxObjectSizeInPagedSpace()) {
4209 // Allocate in new space, retry in large object space.
4210 retry_space = LO_SPACE;
4212 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4216 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4217 if (!maybe_result->ToObject(&result)) return maybe_result;
4220 // Partially initialize the object.
4221 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4222 String::cast(result)->set_length(length);
4223 String::cast(result)->set_hash_field(String::kEmptyHashField);
4224 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4229 MaybeObject* Heap::AllocateEmptyFixedArray() {
4230 int size = FixedArray::SizeFor(0);
4232 { MaybeObject* maybe_result =
4233 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4234 if (!maybe_result->ToObject(&result)) return maybe_result;
4236 // Initialize the object.
4237 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4239 reinterpret_cast<FixedArray*>(result)->set_length(0);
4244 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4245 if (length < 0 || length > FixedArray::kMaxLength) {
4246 return Failure::OutOfMemoryException();
4249 // Use the general function if we're forced to always allocate.
4250 if (always_allocate()) return AllocateFixedArray(length, TENURED);
4251 // Allocate the raw data for a fixed array.
4252 int size = FixedArray::SizeFor(length);
4253 return size <= kMaxObjectSizeInNewSpace
4254 ? new_space_.AllocateRaw(size)
4255 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4259 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4260 int len = src->length();
4262 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4263 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4265 if (InNewSpace(obj)) {
4266 HeapObject* dst = HeapObject::cast(obj);
4267 dst->set_map_no_write_barrier(map);
4268 CopyBlock(dst->address() + kPointerSize,
4269 src->address() + kPointerSize,
4270 FixedArray::SizeFor(len) - kPointerSize);
4273 HeapObject::cast(obj)->set_map_no_write_barrier(map);
4274 FixedArray* result = FixedArray::cast(obj);
4275 result->set_length(len);
4278 AssertNoAllocation no_gc;
4279 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4280 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4285 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4287 int len = src->length();
4289 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4290 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4292 HeapObject* dst = HeapObject::cast(obj);
4293 dst->set_map_no_write_barrier(map);
4295 dst->address() + FixedDoubleArray::kLengthOffset,
4296 src->address() + FixedDoubleArray::kLengthOffset,
4297 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4302 MaybeObject* Heap::AllocateFixedArray(int length) {
4303 ASSERT(length >= 0);
4304 if (length == 0) return empty_fixed_array();
4306 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4307 if (!maybe_result->ToObject(&result)) return maybe_result;
4309 // Initialize header.
4310 FixedArray* array = reinterpret_cast<FixedArray*>(result);
4311 array->set_map_no_write_barrier(fixed_array_map());
4312 array->set_length(length);
4314 ASSERT(!InNewSpace(undefined_value()));
4315 MemsetPointer(array->data_start(), undefined_value(), length);
4320 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4321 if (length < 0 || length > FixedArray::kMaxLength) {
4322 return Failure::OutOfMemoryException();
4325 AllocationSpace space =
4326 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4327 int size = FixedArray::SizeFor(length);
4328 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4329 // Too big for new space.
4331 } else if (space == OLD_POINTER_SPACE &&
4332 size > MaxObjectSizeInPagedSpace()) {
4333 // Too big for old pointer space.
4337 AllocationSpace retry_space =
4338 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
4340 return AllocateRaw(size, space, retry_space);
4344 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4347 PretenureFlag pretenure,
4349 ASSERT(length >= 0);
4350 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4351 if (length == 0) return heap->empty_fixed_array();
4353 ASSERT(!heap->InNewSpace(filler));
4355 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4356 if (!maybe_result->ToObject(&result)) return maybe_result;
4359 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4360 FixedArray* array = FixedArray::cast(result);
4361 array->set_length(length);
4362 MemsetPointer(array->data_start(), filler, length);
4367 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4368 return AllocateFixedArrayWithFiller(this,
4375 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4376 PretenureFlag pretenure) {
4377 return AllocateFixedArrayWithFiller(this,
4384 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4385 if (length == 0) return empty_fixed_array();
4388 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4389 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4392 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4394 FixedArray::cast(obj)->set_length(length);
4399 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4400 int size = FixedDoubleArray::SizeFor(0);
4402 { MaybeObject* maybe_result =
4403 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4404 if (!maybe_result->ToObject(&result)) return maybe_result;
4406 // Initialize the object.
4407 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4408 fixed_double_array_map());
4409 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4414 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4416 PretenureFlag pretenure) {
4417 if (length == 0) return empty_fixed_double_array();
4420 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4421 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4424 reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
4425 fixed_double_array_map());
4426 FixedDoubleArray::cast(obj)->set_length(length);
4431 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4432 PretenureFlag pretenure) {
4433 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4434 return Failure::OutOfMemoryException();
4437 AllocationSpace space =
4438 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4439 int size = FixedDoubleArray::SizeFor(length);
4440 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4441 // Too big for new space.
4443 } else if (space == OLD_DATA_SPACE &&
4444 size > MaxObjectSizeInPagedSpace()) {
4445 // Too big for old data space.
4449 AllocationSpace retry_space =
4450 (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
4452 return AllocateRaw(size, space, retry_space);
4456 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4458 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4459 if (!maybe_result->ToObject(&result)) return maybe_result;
4461 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
4463 ASSERT(result->IsHashTable());
4468 MaybeObject* Heap::AllocateGlobalContext() {
4470 { MaybeObject* maybe_result =
4471 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
4472 if (!maybe_result->ToObject(&result)) return maybe_result;
4474 Context* context = reinterpret_cast<Context*>(result);
4475 context->set_map_no_write_barrier(global_context_map());
4476 ASSERT(context->IsGlobalContext());
4477 ASSERT(result->IsContext());
4482 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
4483 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
4485 { MaybeObject* maybe_result = AllocateFixedArray(length);
4486 if (!maybe_result->ToObject(&result)) return maybe_result;
4488 Context* context = reinterpret_cast<Context*>(result);
4489 context->set_map_no_write_barrier(function_context_map());
4490 context->set_closure(function);
4491 context->set_previous(function->context());
4492 context->set_extension(NULL);
4493 context->set_global(function->context()->global());
4498 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4501 Object* thrown_object) {
4502 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4504 { MaybeObject* maybe_result =
4505 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4506 if (!maybe_result->ToObject(&result)) return maybe_result;
4508 Context* context = reinterpret_cast<Context*>(result);
4509 context->set_map_no_write_barrier(catch_context_map());
4510 context->set_closure(function);
4511 context->set_previous(previous);
4512 context->set_extension(name);
4513 context->set_global(previous->global());
4514 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4519 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4521 JSObject* extension) {
4523 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
4524 if (!maybe_result->ToObject(&result)) return maybe_result;
4526 Context* context = reinterpret_cast<Context*>(result);
4527 context->set_map_no_write_barrier(with_context_map());
4528 context->set_closure(function);
4529 context->set_previous(previous);
4530 context->set_extension(extension);
4531 context->set_global(previous->global());
4536 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4538 ScopeInfo* scope_info) {
4540 { MaybeObject* maybe_result =
4541 AllocateFixedArrayWithHoles(scope_info->ContextLength());
4542 if (!maybe_result->ToObject(&result)) return maybe_result;
4544 Context* context = reinterpret_cast<Context*>(result);
4545 context->set_map_no_write_barrier(block_context_map());
4546 context->set_closure(function);
4547 context->set_previous(previous);
4548 context->set_extension(scope_info);
4549 context->set_global(previous->global());
4554 MaybeObject* Heap::AllocateScopeInfo(int length) {
4555 FixedArray* scope_info;
4556 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
4557 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
4558 scope_info->set_map_no_write_barrier(scope_info_map());
4563 MaybeObject* Heap::AllocateStruct(InstanceType type) {
4566 #define MAKE_CASE(NAME, Name, name) \
4567 case NAME##_TYPE: map = name##_map(); break;
4568 STRUCT_LIST(MAKE_CASE)
4572 return Failure::InternalError();
4574 int size = map->instance_size();
4575 AllocationSpace space =
4576 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
4578 { MaybeObject* maybe_result = Allocate(map, space);
4579 if (!maybe_result->ToObject(&result)) return maybe_result;
4581 Struct::cast(result)->InitializeBody(size);
4586 bool Heap::IsHeapIterable() {
4587 return (!old_pointer_space()->was_swept_conservatively() &&
4588 !old_data_space()->was_swept_conservatively());
4592 void Heap::EnsureHeapIsIterable() {
4593 ASSERT(IsAllocationAllowed());
4594 if (!IsHeapIterable()) {
4595 CollectAllGarbage(kMakeHeapIterableMask);
4597 ASSERT(IsHeapIterable());
4601 bool Heap::IdleNotification(int hint) {
4602 if (hint >= 1000) return IdleGlobalGC();
4603 if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
4604 FLAG_expose_gc || Serializer::enabled()) {
4608 // By doing small chunks of GC work in each IdleNotification,
4609 // perform a round of incremental GCs and after that wait until
4610 // the mutator creates enough garbage to justify a new round.
4611 // An incremental GC progresses as follows:
4612 // 1. many incremental marking steps,
4613 // 2. one old space mark-sweep-compact,
4614 // 3. many lazy sweep steps.
4615 // Use mark-sweep-compact events to count incremental GCs in a round.
4617 intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
4618 // The size factor is in range [3..100].
4619 intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
4621 if (incremental_marking()->IsStopped()) {
4622 if (!IsSweepingComplete() &&
4623 !AdvanceSweepers(static_cast<int>(step_size))) {
4628 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4629 if (EnoughGarbageSinceLastIdleRound()) {
4636 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
4637 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
4638 ms_count_at_last_idle_notification_ = ms_count_;
4640 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4645 if (incremental_marking()->IsStopped()) {
4646 if (hint < 1000 && !WorthStartingGCWhenIdle()) {
4650 incremental_marking()->Start();
4653 // This flag prevents incremental marking from requesting GC via stack guard
4654 idle_notification_will_schedule_next_gc_ = true;
4655 incremental_marking()->Step(step_size);
4656 idle_notification_will_schedule_next_gc_ = false;
4658 if (incremental_marking()->IsComplete()) {
4659 bool uncommit = false;
4660 if (gc_count_at_last_idle_gc_ == gc_count_) {
4661 // No GC since the last full GC, the mutator is probably not active.
4662 isolate_->compilation_cache()->Clear();
4665 CollectAllGarbage(kNoGCFlags);
4666 gc_count_at_last_idle_gc_ = gc_count_;
4668 new_space_.Shrink();
4669 UncommitFromSpace();
4676 bool Heap::IdleGlobalGC() {
4677 static const int kIdlesBeforeScavenge = 4;
4678 static const int kIdlesBeforeMarkSweep = 7;
4679 static const int kIdlesBeforeMarkCompact = 8;
4680 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4681 static const unsigned int kGCsBetweenCleanup = 4;
4683 if (!last_idle_notification_gc_count_init_) {
4684 last_idle_notification_gc_count_ = gc_count_;
4685 last_idle_notification_gc_count_init_ = true;
4688 bool uncommit = true;
4689 bool finished = false;
4691 // Reset the number of idle notifications received when a number of
4692 // GCs have taken place. This allows another round of cleanup based
4693 // on idle notifications if enough work has been carried out to
4694 // provoke a number of garbage collections.
4695 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4696 number_idle_notifications_ =
4697 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4699 number_idle_notifications_ = 0;
4700 last_idle_notification_gc_count_ = gc_count_;
4703 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4704 if (contexts_disposed_ > 0) {
4705 HistogramTimerScope scope(isolate_->counters()->gc_context());
4706 CollectAllGarbage(kNoGCFlags);
4708 CollectGarbage(NEW_SPACE);
4710 new_space_.Shrink();
4711 last_idle_notification_gc_count_ = gc_count_;
4712 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4713 // Before doing the mark-sweep collections we clear the
4714 // compilation cache to avoid hanging on to source code and
4715 // generated code for cached functions.
4716 isolate_->compilation_cache()->Clear();
4718 CollectAllGarbage(kNoGCFlags);
4719 new_space_.Shrink();
4720 last_idle_notification_gc_count_ = gc_count_;
4722 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4723 CollectAllGarbage(kNoGCFlags);
4724 new_space_.Shrink();
4725 last_idle_notification_gc_count_ = gc_count_;
4726 number_idle_notifications_ = 0;
4728 } else if (contexts_disposed_ > 0) {
4729 if (FLAG_expose_gc) {
4730 contexts_disposed_ = 0;
4732 HistogramTimerScope scope(isolate_->counters()->gc_context());
4733 CollectAllGarbage(kNoGCFlags);
4734 last_idle_notification_gc_count_ = gc_count_;
4736 // If this is the first idle notification, we reset the
4737 // notification count to avoid letting idle notifications for
4738 // context disposal garbage collections start a potentially too
4739 // aggressive idle GC cycle.
4740 if (number_idle_notifications_ <= 1) {
4741 number_idle_notifications_ = 0;
4744 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4745 // If we have received more than kIdlesBeforeMarkCompact idle
4746 // notifications we do not perform any cleanup because we don't
4747 // expect to gain much by doing so.
4751 // Make sure that we have no pending context disposals and
4752 // conditionally uncommit from space.
4753 // Take into account that we might have decided to delay full collection
4754 // because incremental marking is in progress.
4755 ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
4756 if (uncommit) UncommitFromSpace();
4764 void Heap::Print() {
4765 if (!HasBeenSetUp()) return;
4766 isolate()->PrintStack();
4768 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4773 void Heap::ReportCodeStatistics(const char* title) {
4774 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4775 PagedSpace::ResetCodeStatistics();
4776 // We do not look for code in new space, map space, or old space. If code
4777 // somehow ends up in those spaces, we would miss it here.
4778 code_space_->CollectCodeStatistics();
4779 lo_space_->CollectCodeStatistics();
4780 PagedSpace::ReportCodeStatistics();
4784 // This function expects that NewSpace's allocated objects histogram is
4785 // populated (via a call to CollectStatistics or else as a side effect of a
4786 // just-completed scavenge collection).
4787 void Heap::ReportHeapStatistics(const char* title) {
4789 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4791 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4792 old_gen_promotion_limit_);
4793 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4794 old_gen_allocation_limit_);
4795 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
4798 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
4799 isolate_->global_handles()->PrintStats();
4802 PrintF("Heap statistics : ");
4803 isolate_->memory_allocator()->ReportStatistics();
4804 PrintF("To space : ");
4805 new_space_.ReportStatistics();
4806 PrintF("Old pointer space : ");
4807 old_pointer_space_->ReportStatistics();
4808 PrintF("Old data space : ");
4809 old_data_space_->ReportStatistics();
4810 PrintF("Code space : ");
4811 code_space_->ReportStatistics();
4812 PrintF("Map space : ");
4813 map_space_->ReportStatistics();
4814 PrintF("Cell space : ");
4815 cell_space_->ReportStatistics();
4816 PrintF("Large object space : ");
4817 lo_space_->ReportStatistics();
4818 PrintF(">>>>>> ========================================= >>>>>>\n");
4823 bool Heap::Contains(HeapObject* value) {
4824 return Contains(value->address());
4828 bool Heap::Contains(Address addr) {
4829 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4830 return HasBeenSetUp() &&
4831 (new_space_.ToSpaceContains(addr) ||
4832 old_pointer_space_->Contains(addr) ||
4833 old_data_space_->Contains(addr) ||
4834 code_space_->Contains(addr) ||
4835 map_space_->Contains(addr) ||
4836 cell_space_->Contains(addr) ||
4837 lo_space_->SlowContains(addr));
4841 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4842 return InSpace(value->address(), space);
4846 bool Heap::InSpace(Address addr, AllocationSpace space) {
4847 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4848 if (!HasBeenSetUp()) return false;
4852 return new_space_.ToSpaceContains(addr);
4853 case OLD_POINTER_SPACE:
4854 return old_pointer_space_->Contains(addr);
4855 case OLD_DATA_SPACE:
4856 return old_data_space_->Contains(addr);
4858 return code_space_->Contains(addr);
4860 return map_space_->Contains(addr);
4862 return cell_space_->Contains(addr);
4864 return lo_space_->SlowContains(addr);
4872 void Heap::Verify() {
4873 ASSERT(HasBeenSetUp());
4875 store_buffer()->Verify();
4877 VerifyPointersVisitor visitor;
4878 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4880 new_space_.Verify();
4882 old_pointer_space_->Verify(&visitor);
4883 map_space_->Verify(&visitor);
4885 VerifyPointersVisitor no_dirty_regions_visitor;
4886 old_data_space_->Verify(&no_dirty_regions_visitor);
4887 code_space_->Verify(&no_dirty_regions_visitor);
4888 cell_space_->Verify(&no_dirty_regions_visitor);
4890 lo_space_->Verify();
4896 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4897 Object* symbol = NULL;
4899 { MaybeObject* maybe_new_table =
4900 symbol_table()->LookupSymbol(string, &symbol);
4901 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4903 // Can't use set_symbol_table because SymbolTable::cast knows that
4904 // SymbolTable is a singleton and checks for identity.
4905 roots_[kSymbolTableRootIndex] = new_table;
4906 ASSERT(symbol != NULL);
4911 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4912 Object* symbol = NULL;
4914 { MaybeObject* maybe_new_table =
4915 symbol_table()->LookupAsciiSymbol(string, &symbol);
4916 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4918 // Can't use set_symbol_table because SymbolTable::cast knows that
4919 // SymbolTable is a singleton and checks for identity.
4920 roots_[kSymbolTableRootIndex] = new_table;
4921 ASSERT(symbol != NULL);
4926 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4929 Object* symbol = NULL;
4931 { MaybeObject* maybe_new_table =
4932 symbol_table()->LookupSubStringAsciiSymbol(string,
4936 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4938 // Can't use set_symbol_table because SymbolTable::cast knows that
4939 // SymbolTable is a singleton and checks for identity.
4940 roots_[kSymbolTableRootIndex] = new_table;
4941 ASSERT(symbol != NULL);
4946 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4947 Object* symbol = NULL;
4949 { MaybeObject* maybe_new_table =
4950 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4951 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4953 // Can't use set_symbol_table because SymbolTable::cast knows that
4954 // SymbolTable is a singleton and checks for identity.
4955 roots_[kSymbolTableRootIndex] = new_table;
4956 ASSERT(symbol != NULL);
4961 MaybeObject* Heap::LookupSymbol(String* string) {
4962 if (string->IsSymbol()) return string;
4963 Object* symbol = NULL;
4965 { MaybeObject* maybe_new_table =
4966 symbol_table()->LookupString(string, &symbol);
4967 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4969 // Can't use set_symbol_table because SymbolTable::cast knows that
4970 // SymbolTable is a singleton and checks for identity.
4971 roots_[kSymbolTableRootIndex] = new_table;
4972 ASSERT(symbol != NULL);
4977 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4978 if (string->IsSymbol()) {
4982 return symbol_table()->LookupSymbolIfExists(string, symbol);
4987 void Heap::ZapFromSpace() {
4988 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4989 new_space_.FromSpaceEnd());
4990 while (it.has_next()) {
4991 NewSpacePage* page = it.next();
4992 for (Address cursor = page->body(), limit = page->body_limit();
4994 cursor += kPointerSize) {
4995 Memory::Address_at(cursor) = kFromSpaceZapValue;
5002 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5004 ObjectSlotCallback callback) {
5005 Address slot_address = start;
5007 // We are not collecting slots on new space objects during mutation
5008 // thus we have to scan for pointers to evacuation candidates when we
5009 // promote objects. But we should not record any slots in non-black
5010 // objects. Grey object's slots would be rescanned.
5011 // White object might not survive until the end of collection
5012 // it would be a violation of the invariant to record it's slots.
5013 bool record_slots = false;
5014 if (incremental_marking()->IsCompacting()) {
5015 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5016 record_slots = Marking::IsBlack(mark_bit);
5019 while (slot_address < end) {
5020 Object** slot = reinterpret_cast<Object**>(slot_address);
5021 Object* object = *slot;
5022 // If the store buffer becomes overfull we mark pages as being exempt from
5023 // the store buffer. These pages are scanned to find pointers that point
5024 // to the new space. In that case we may hit newly promoted objects and
5025 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5026 if (object->IsHeapObject()) {
5027 if (Heap::InFromSpace(object)) {
5028 callback(reinterpret_cast<HeapObject**>(slot),
5029 HeapObject::cast(object));
5030 Object* new_object = *slot;
5031 if (InNewSpace(new_object)) {
5032 SLOW_ASSERT(Heap::InToSpace(new_object));
5033 SLOW_ASSERT(new_object->IsHeapObject());
5034 store_buffer_.EnterDirectlyIntoStoreBuffer(
5035 reinterpret_cast<Address>(slot));
5037 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5038 } else if (record_slots &&
5039 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5040 mark_compact_collector()->RecordSlot(slot, slot, object);
5043 slot_address += kPointerSize;
5049 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5052 bool IsAMapPointerAddress(Object** addr) {
5053 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5054 int mod = a % Map::kSize;
5055 return mod >= Map::kPointerFieldsBeginOffset &&
5056 mod < Map::kPointerFieldsEndOffset;
5060 bool EverythingsAPointer(Object** addr) {
5065 static void CheckStoreBuffer(Heap* heap,
5068 Object**** store_buffer_position,
5069 Object*** store_buffer_top,
5070 CheckStoreBufferFilter filter,
5071 Address special_garbage_start,
5072 Address special_garbage_end) {
5073 Map* free_space_map = heap->free_space_map();
5074 for ( ; current < limit; current++) {
5075 Object* o = *current;
5076 Address current_address = reinterpret_cast<Address>(current);
5078 if (o == free_space_map) {
5079 Address current_address = reinterpret_cast<Address>(current);
5080 FreeSpace* free_space =
5081 FreeSpace::cast(HeapObject::FromAddress(current_address));
5082 int skip = free_space->Size();
5083 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5085 current_address += skip - kPointerSize;
5086 current = reinterpret_cast<Object**>(current_address);
5089 // Skip the current linear allocation space between top and limit which is
5090 // unmarked with the free space map, but can contain junk.
5091 if (current_address == special_garbage_start &&
5092 special_garbage_end != special_garbage_start) {
5093 current_address = special_garbage_end - kPointerSize;
5094 current = reinterpret_cast<Object**>(current_address);
5097 if (!(*filter)(current)) continue;
5098 ASSERT(current_address < special_garbage_start ||
5099 current_address >= special_garbage_end);
5100 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5101 // We have to check that the pointer does not point into new space
5102 // without trying to cast it to a heap object since the hash field of
5103 // a string can contain values like 1 and 3 which are tagged null
5105 if (!heap->InNewSpace(o)) continue;
5106 while (**store_buffer_position < current &&
5107 *store_buffer_position < store_buffer_top) {
5108 (*store_buffer_position)++;
5110 if (**store_buffer_position != current ||
5111 *store_buffer_position == store_buffer_top) {
5112 Object** obj_start = current;
5113 while (!(*obj_start)->IsMap()) obj_start--;
5120 // Check that the store buffer contains all intergenerational pointers by
5121 // scanning a page and ensuring that all pointers to young space are in the
5123 void Heap::OldPointerSpaceCheckStoreBuffer() {
5124 OldSpace* space = old_pointer_space();
5125 PageIterator pages(space);
5127 store_buffer()->SortUniq();
5129 while (pages.has_next()) {
5130 Page* page = pages.next();
5131 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5133 Address end = page->ObjectAreaEnd();
5135 Object*** store_buffer_position = store_buffer()->Start();
5136 Object*** store_buffer_top = store_buffer()->Top();
5138 Object** limit = reinterpret_cast<Object**>(end);
5139 CheckStoreBuffer(this,
5142 &store_buffer_position,
5144 &EverythingsAPointer,
5151 void Heap::MapSpaceCheckStoreBuffer() {
5152 MapSpace* space = map_space();
5153 PageIterator pages(space);
5155 store_buffer()->SortUniq();
5157 while (pages.has_next()) {
5158 Page* page = pages.next();
5159 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5161 Address end = page->ObjectAreaEnd();
5163 Object*** store_buffer_position = store_buffer()->Start();
5164 Object*** store_buffer_top = store_buffer()->Top();
5166 Object** limit = reinterpret_cast<Object**>(end);
5167 CheckStoreBuffer(this,
5170 &store_buffer_position,
5172 &IsAMapPointerAddress,
5179 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5180 LargeObjectIterator it(lo_space());
5181 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5182 // We only have code, sequential strings, or fixed arrays in large
5183 // object space, and only fixed arrays can possibly contain pointers to
5184 // the young generation.
5185 if (object->IsFixedArray()) {
5186 Object*** store_buffer_position = store_buffer()->Start();
5187 Object*** store_buffer_top = store_buffer()->Top();
5188 Object** current = reinterpret_cast<Object**>(object->address());
5190 reinterpret_cast<Object**>(object->address() + object->Size());
5191 CheckStoreBuffer(this,
5194 &store_buffer_position,
5196 &EverythingsAPointer,
5205 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5206 IterateStrongRoots(v, mode);
5207 IterateWeakRoots(v, mode);
5211 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5212 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5213 v->Synchronize(VisitorSynchronization::kSymbolTable);
5214 if (mode != VISIT_ALL_IN_SCAVENGE &&
5215 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5216 // Scavenge collections have special processing for this.
5217 external_string_table_.Iterate(v);
5219 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5223 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5224 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5225 v->Synchronize(VisitorSynchronization::kStrongRootList);
5227 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5228 v->Synchronize(VisitorSynchronization::kSymbol);
5230 isolate_->bootstrapper()->Iterate(v);
5231 v->Synchronize(VisitorSynchronization::kBootstrapper);
5232 isolate_->Iterate(v);
5233 v->Synchronize(VisitorSynchronization::kTop);
5234 Relocatable::Iterate(v);
5235 v->Synchronize(VisitorSynchronization::kRelocatable);
5237 #ifdef ENABLE_DEBUGGER_SUPPORT
5238 isolate_->debug()->Iterate(v);
5239 if (isolate_->deoptimizer_data() != NULL) {
5240 isolate_->deoptimizer_data()->Iterate(v);
5243 v->Synchronize(VisitorSynchronization::kDebug);
5244 isolate_->compilation_cache()->Iterate(v);
5245 v->Synchronize(VisitorSynchronization::kCompilationCache);
5247 // Iterate over local handles in handle scopes.
5248 isolate_->handle_scope_implementer()->Iterate(v);
5249 v->Synchronize(VisitorSynchronization::kHandleScope);
5251 // Iterate over the builtin code objects and code stubs in the
5252 // heap. Note that it is not necessary to iterate over code objects
5253 // on scavenge collections.
5254 if (mode != VISIT_ALL_IN_SCAVENGE) {
5255 isolate_->builtins()->IterateBuiltins(v);
5257 v->Synchronize(VisitorSynchronization::kBuiltins);
5259 // Iterate over global handles.
5261 case VISIT_ONLY_STRONG:
5262 isolate_->global_handles()->IterateStrongRoots(v);
5264 case VISIT_ALL_IN_SCAVENGE:
5265 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5267 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5269 isolate_->global_handles()->IterateAllRoots(v);
5272 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5274 // Iterate over pointers being held by inactive threads.
5275 isolate_->thread_manager()->Iterate(v);
5276 v->Synchronize(VisitorSynchronization::kThreadManager);
5278 // Iterate over the pointers the Serialization/Deserialization code is
5280 // During garbage collection this keeps the partial snapshot cache alive.
5281 // During deserialization of the startup snapshot this creates the partial
5282 // snapshot cache and deserializes the objects it refers to. During
5283 // serialization this does nothing, since the partial snapshot cache is
5284 // empty. However the next thing we do is create the partial snapshot,
5285 // filling up the partial snapshot cache with objects it needs as we go.
5286 SerializerDeserializer::Iterate(v);
5287 // We don't do a v->Synchronize call here, because in debug mode that will
5288 // output a flag to the snapshot. However at this point the serializer and
5289 // deserializer are deliberately a little unsynchronized (see above) so the
5290 // checking of the sync flag in the snapshot would fail.
5294 // TODO(1236194): Since the heap size is configurable on the command line
5295 // and through the API, we should gracefully handle the case that the heap
5296 // size is not big enough to fit all the initial objects.
5297 bool Heap::ConfigureHeap(int max_semispace_size,
5298 intptr_t max_old_gen_size,
5299 intptr_t max_executable_size) {
5300 if (HasBeenSetUp()) return false;
5302 if (max_semispace_size > 0) {
5303 if (max_semispace_size < Page::kPageSize) {
5304 max_semispace_size = Page::kPageSize;
5305 if (FLAG_trace_gc) {
5306 PrintF("Max semispace size cannot be less than %dkbytes\n",
5307 Page::kPageSize >> 10);
5310 max_semispace_size_ = max_semispace_size;
5313 if (Snapshot::IsEnabled()) {
5314 // If we are using a snapshot we always reserve the default amount
5315 // of memory for each semispace because code in the snapshot has
5316 // write-barrier code that relies on the size and alignment of new
5317 // space. We therefore cannot use a larger max semispace size
5318 // than the default reserved semispace size.
5319 if (max_semispace_size_ > reserved_semispace_size_) {
5320 max_semispace_size_ = reserved_semispace_size_;
5321 if (FLAG_trace_gc) {
5322 PrintF("Max semispace size cannot be more than %dkbytes\n",
5323 reserved_semispace_size_ >> 10);
5327 // If we are not using snapshots we reserve space for the actual
5328 // max semispace size.
5329 reserved_semispace_size_ = max_semispace_size_;
5332 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5333 if (max_executable_size > 0) {
5334 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5337 // The max executable size must be less than or equal to the max old
5339 if (max_executable_size_ > max_old_generation_size_) {
5340 max_executable_size_ = max_old_generation_size_;
5343 // The new space size must be a power of two to support single-bit testing
5345 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5346 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5347 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5348 external_allocation_limit_ = 10 * max_semispace_size_;
5350 // The old generation is paged and needs at least one page for each space.
5351 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5352 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5354 RoundUp(max_old_generation_size_,
5362 bool Heap::ConfigureHeapDefault() {
5363 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5364 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5365 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5369 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5370 *stats->start_marker = HeapStats::kStartMarker;
5371 *stats->end_marker = HeapStats::kEndMarker;
5372 *stats->new_space_size = new_space_.SizeAsInt();
5373 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5374 *stats->old_pointer_space_size = old_pointer_space_->Size();
5375 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5376 *stats->old_data_space_size = old_data_space_->Size();
5377 *stats->old_data_space_capacity = old_data_space_->Capacity();
5378 *stats->code_space_size = code_space_->Size();
5379 *stats->code_space_capacity = code_space_->Capacity();
5380 *stats->map_space_size = map_space_->Size();
5381 *stats->map_space_capacity = map_space_->Capacity();
5382 *stats->cell_space_size = cell_space_->Size();
5383 *stats->cell_space_capacity = cell_space_->Capacity();
5384 *stats->lo_space_size = lo_space_->Size();
5385 isolate_->global_handles()->RecordStats(stats);
5386 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5387 *stats->memory_allocator_capacity =
5388 isolate()->memory_allocator()->Size() +
5389 isolate()->memory_allocator()->Available();
5390 *stats->os_error = OS::GetLastError();
5391 isolate()->memory_allocator()->Available();
5392 if (take_snapshot) {
5393 HeapIterator iterator;
5394 for (HeapObject* obj = iterator.next();
5396 obj = iterator.next()) {
5397 InstanceType type = obj->map()->instance_type();
5398 ASSERT(0 <= type && type <= LAST_TYPE);
5399 stats->objects_per_type[type]++;
5400 stats->size_per_type[type] += obj->Size();
5406 intptr_t Heap::PromotedSpaceSize() {
5407 return old_pointer_space_->Size()
5408 + old_data_space_->Size()
5409 + code_space_->Size()
5410 + map_space_->Size()
5411 + cell_space_->Size()
5412 + lo_space_->Size();
5416 int Heap::PromotedExternalMemorySize() {
5417 if (amount_of_external_allocated_memory_
5418 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5419 return amount_of_external_allocated_memory_
5420 - amount_of_external_allocated_memory_at_last_global_gc_;
5425 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5426 static const int kMarkTag = 2;
5429 class HeapDebugUtils {
5431 explicit HeapDebugUtils(Heap* heap)
5432 : search_for_any_global_(false),
5433 search_target_(NULL),
5434 found_target_(false),
5439 class MarkObjectVisitor : public ObjectVisitor {
5441 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5443 void VisitPointers(Object** start, Object** end) {
5444 // Copy all HeapObject pointers in [start, end)
5445 for (Object** p = start; p < end; p++) {
5446 if ((*p)->IsHeapObject())
5447 utils_->MarkObjectRecursively(p);
5451 HeapDebugUtils* utils_;
5454 void MarkObjectRecursively(Object** p) {
5455 if (!(*p)->IsHeapObject()) return;
5457 HeapObject* obj = HeapObject::cast(*p);
5459 Object* map = obj->map();
5461 if (!map->IsHeapObject()) return; // visited before
5463 if (found_target_) return; // stop if target found
5464 object_stack_.Add(obj);
5465 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5466 (!search_for_any_global_ && (obj == search_target_))) {
5467 found_target_ = true;
5472 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5474 Address map_addr = map_p->address();
5476 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
5478 MarkObjectRecursively(&map);
5480 MarkObjectVisitor mark_visitor(this);
5482 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5485 if (!found_target_) // don't pop if found the target
5486 object_stack_.RemoveLast();
5490 class UnmarkObjectVisitor : public ObjectVisitor {
5492 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5494 void VisitPointers(Object** start, Object** end) {
5495 // Copy all HeapObject pointers in [start, end)
5496 for (Object** p = start; p < end; p++) {
5497 if ((*p)->IsHeapObject())
5498 utils_->UnmarkObjectRecursively(p);
5502 HeapDebugUtils* utils_;
5506 void UnmarkObjectRecursively(Object** p) {
5507 if (!(*p)->IsHeapObject()) return;
5509 HeapObject* obj = HeapObject::cast(*p);
5511 Object* map = obj->map();
5513 if (map->IsHeapObject()) return; // unmarked already
5515 Address map_addr = reinterpret_cast<Address>(map);
5517 map_addr -= kMarkTag;
5519 ASSERT_TAG_ALIGNED(map_addr);
5521 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5523 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
5525 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5527 UnmarkObjectVisitor unmark_visitor(this);
5529 obj->IterateBody(Map::cast(map_p)->instance_type(),
5530 obj->SizeFromMap(Map::cast(map_p)),
5535 void MarkRootObjectRecursively(Object** root) {
5536 if (search_for_any_global_) {
5537 ASSERT(search_target_ == NULL);
5539 ASSERT(search_target_->IsHeapObject());
5541 found_target_ = false;
5542 object_stack_.Clear();
5544 MarkObjectRecursively(root);
5545 UnmarkObjectRecursively(root);
5547 if (found_target_) {
5548 PrintF("=====================================\n");
5549 PrintF("==== Path to object ====\n");
5550 PrintF("=====================================\n\n");
5552 ASSERT(!object_stack_.is_empty());
5553 for (int i = 0; i < object_stack_.length(); i++) {
5554 if (i > 0) PrintF("\n |\n |\n V\n\n");
5555 Object* obj = object_stack_[i];
5558 PrintF("=====================================\n");
5562 // Helper class for visiting HeapObjects recursively.
5563 class MarkRootVisitor: public ObjectVisitor {
5565 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5567 void VisitPointers(Object** start, Object** end) {
5568 // Visit all HeapObject pointers in [start, end)
5569 for (Object** p = start; p < end; p++) {
5570 if ((*p)->IsHeapObject())
5571 utils_->MarkRootObjectRecursively(p);
5575 HeapDebugUtils* utils_;
5578 bool search_for_any_global_;
5579 Object* search_target_;
5581 List<Object*> object_stack_;
5589 bool Heap::SetUp(bool create_heap_objects) {
5591 allocation_timeout_ = FLAG_gc_interval;
5592 debug_utils_ = new HeapDebugUtils(this);
5595 // Initialize heap spaces and initial maps and objects. Whenever something
5596 // goes wrong, just return false. The caller should check the results and
5597 // call Heap::TearDown() to release allocated memory.
5599 // If the heap is not yet configured (eg, through the API), configure it.
5600 // Configuration is based on the flags new-space-size (really the semispace
5601 // size) and old-space-size if set or the initial values of semispace_size_
5602 // and old_generation_size_ otherwise.
5604 if (!ConfigureHeapDefault()) return false;
5607 gc_initializer_mutex->Lock();
5608 static bool initialized_gc = false;
5609 if (!initialized_gc) {
5610 initialized_gc = true;
5611 InitializeScavengingVisitorsTables();
5612 NewSpaceScavenger::Initialize();
5613 MarkCompactCollector::Initialize();
5615 gc_initializer_mutex->Unlock();
5617 MarkMapPointersAsEncoded(false);
5619 // Set up memory allocator.
5620 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5623 // Set up new space.
5624 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
5628 // Initialize old pointer space.
5629 old_pointer_space_ =
5631 max_old_generation_size_,
5634 if (old_pointer_space_ == NULL) return false;
5635 if (!old_pointer_space_->SetUp()) return false;
5637 // Initialize old data space.
5640 max_old_generation_size_,
5643 if (old_data_space_ == NULL) return false;
5644 if (!old_data_space_->SetUp()) return false;
5646 // Initialize the code space, set its maximum capacity to the old
5647 // generation size. It needs executable memory.
5648 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5649 // virtual address space, so that they can call each other with near calls.
5650 if (code_range_size_ > 0) {
5651 if (!isolate_->code_range()->SetUp(code_range_size_)) {
5657 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5658 if (code_space_ == NULL) return false;
5659 if (!code_space_->SetUp()) return false;
5661 // Initialize map space.
5662 map_space_ = new MapSpace(this,
5663 max_old_generation_size_,
5664 FLAG_max_map_space_pages,
5666 if (map_space_ == NULL) return false;
5667 if (!map_space_->SetUp()) return false;
5669 // Initialize global property cell space.
5670 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5671 if (cell_space_ == NULL) return false;
5672 if (!cell_space_->SetUp()) return false;
5674 // The large object code space may contain code or data. We set the memory
5675 // to be non-executable here for safety, but this means we need to enable it
5676 // explicitly when allocating large code objects.
5677 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5678 if (lo_space_ == NULL) return false;
5679 if (!lo_space_->SetUp()) return false;
5681 // Set up the seed that is used to randomize the string hash function.
5682 ASSERT(hash_seed() == 0);
5683 if (FLAG_randomize_hashes) {
5684 if (FLAG_hash_seed == 0) {
5686 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
5688 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5692 if (create_heap_objects) {
5693 // Create initial maps.
5694 if (!CreateInitialMaps()) return false;
5695 if (!CreateApiObjects()) return false;
5697 // Create initial objects
5698 if (!CreateInitialObjects()) return false;
5700 global_contexts_list_ = undefined_value();
5703 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5704 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5706 store_buffer()->SetUp();
5712 void Heap::SetStackLimits() {
5713 ASSERT(isolate_ != NULL);
5714 ASSERT(isolate_ == isolate());
5715 // On 64 bit machines, pointers are generally out of range of Smis. We write
5716 // something that looks like an out of range Smi to the GC.
5718 // Set up the special root array entries containing the stack limits.
5719 // These are actually addresses, but the tag makes the GC ignore it.
5720 roots_[kStackLimitRootIndex] =
5721 reinterpret_cast<Object*>(
5722 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5723 roots_[kRealStackLimitRootIndex] =
5724 reinterpret_cast<Object*>(
5725 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5729 void Heap::TearDown() {
5730 if (FLAG_print_cumulative_gc_stat) {
5732 PrintF("gc_count=%d ", gc_count_);
5733 PrintF("mark_sweep_count=%d ", ms_count_);
5734 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5735 PrintF("min_in_mutator=%d ", get_min_in_mutator());
5736 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5737 get_max_alive_after_gc());
5741 isolate_->global_handles()->TearDown();
5743 external_string_table_.TearDown();
5745 new_space_.TearDown();
5747 if (old_pointer_space_ != NULL) {
5748 old_pointer_space_->TearDown();
5749 delete old_pointer_space_;
5750 old_pointer_space_ = NULL;
5753 if (old_data_space_ != NULL) {
5754 old_data_space_->TearDown();
5755 delete old_data_space_;
5756 old_data_space_ = NULL;
5759 if (code_space_ != NULL) {
5760 code_space_->TearDown();
5765 if (map_space_ != NULL) {
5766 map_space_->TearDown();
5771 if (cell_space_ != NULL) {
5772 cell_space_->TearDown();
5777 if (lo_space_ != NULL) {
5778 lo_space_->TearDown();
5783 store_buffer()->TearDown();
5784 incremental_marking()->TearDown();
5786 isolate_->memory_allocator()->TearDown();
5789 delete debug_utils_;
5790 debug_utils_ = NULL;
5795 void Heap::Shrink() {
5796 // Try to shrink all paged spaces.
5798 for (PagedSpace* space = spaces.next();
5800 space = spaces.next()) {
5801 space->ReleaseAllUnusedPages();
5806 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5807 ASSERT(callback != NULL);
5808 GCPrologueCallbackPair pair(callback, gc_type);
5809 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5810 return gc_prologue_callbacks_.Add(pair);
5814 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5815 ASSERT(callback != NULL);
5816 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5817 if (gc_prologue_callbacks_[i].callback == callback) {
5818 gc_prologue_callbacks_.Remove(i);
5826 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5827 ASSERT(callback != NULL);
5828 GCEpilogueCallbackPair pair(callback, gc_type);
5829 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5830 return gc_epilogue_callbacks_.Add(pair);
5834 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5835 ASSERT(callback != NULL);
5836 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5837 if (gc_epilogue_callbacks_[i].callback == callback) {
5838 gc_epilogue_callbacks_.Remove(i);
5848 class PrintHandleVisitor: public ObjectVisitor {
5850 void VisitPointers(Object** start, Object** end) {
5851 for (Object** p = start; p < end; p++)
5852 PrintF(" handle %p to %p\n",
5853 reinterpret_cast<void*>(p),
5854 reinterpret_cast<void*>(*p));
5858 void Heap::PrintHandles() {
5859 PrintF("Handles:\n");
5860 PrintHandleVisitor v;
5861 isolate_->handle_scope_implementer()->Iterate(&v);
5867 Space* AllSpaces::next() {
5868 switch (counter_++) {
5870 return HEAP->new_space();
5871 case OLD_POINTER_SPACE:
5872 return HEAP->old_pointer_space();
5873 case OLD_DATA_SPACE:
5874 return HEAP->old_data_space();
5876 return HEAP->code_space();
5878 return HEAP->map_space();
5880 return HEAP->cell_space();
5882 return HEAP->lo_space();
5889 PagedSpace* PagedSpaces::next() {
5890 switch (counter_++) {
5891 case OLD_POINTER_SPACE:
5892 return HEAP->old_pointer_space();
5893 case OLD_DATA_SPACE:
5894 return HEAP->old_data_space();
5896 return HEAP->code_space();
5898 return HEAP->map_space();
5900 return HEAP->cell_space();
5908 OldSpace* OldSpaces::next() {
5909 switch (counter_++) {
5910 case OLD_POINTER_SPACE:
5911 return HEAP->old_pointer_space();
5912 case OLD_DATA_SPACE:
5913 return HEAP->old_data_space();
5915 return HEAP->code_space();
5922 SpaceIterator::SpaceIterator()
5923 : current_space_(FIRST_SPACE),
5929 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5930 : current_space_(FIRST_SPACE),
5932 size_func_(size_func) {
5936 SpaceIterator::~SpaceIterator() {
5937 // Delete active iterator if any.
5942 bool SpaceIterator::has_next() {
5943 // Iterate until no more spaces.
5944 return current_space_ != LAST_SPACE;
5948 ObjectIterator* SpaceIterator::next() {
5949 if (iterator_ != NULL) {
5952 // Move to the next space
5954 if (current_space_ > LAST_SPACE) {
5959 // Return iterator for the new current space.
5960 return CreateIterator();
5964 // Create an iterator for the space to iterate.
5965 ObjectIterator* SpaceIterator::CreateIterator() {
5966 ASSERT(iterator_ == NULL);
5968 switch (current_space_) {
5970 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
5972 case OLD_POINTER_SPACE:
5973 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
5975 case OLD_DATA_SPACE:
5976 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
5979 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
5982 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
5985 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
5988 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
5992 // Return the newly allocated iterator;
5993 ASSERT(iterator_ != NULL);
5998 class HeapObjectsFilter {
6000 virtual ~HeapObjectsFilter() {}
6001 virtual bool SkipObject(HeapObject* object) = 0;
6005 class UnreachableObjectsFilter : public HeapObjectsFilter {
6007 UnreachableObjectsFilter() {
6008 MarkReachableObjects();
6011 ~UnreachableObjectsFilter() {
6012 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6015 bool SkipObject(HeapObject* object) {
6016 MarkBit mark_bit = Marking::MarkBitFrom(object);
6017 return !mark_bit.Get();
6021 class MarkingVisitor : public ObjectVisitor {
6023 MarkingVisitor() : marking_stack_(10) {}
6025 void VisitPointers(Object** start, Object** end) {
6026 for (Object** p = start; p < end; p++) {
6027 if (!(*p)->IsHeapObject()) continue;
6028 HeapObject* obj = HeapObject::cast(*p);
6029 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6030 if (!mark_bit.Get()) {
6032 marking_stack_.Add(obj);
6037 void TransitiveClosure() {
6038 while (!marking_stack_.is_empty()) {
6039 HeapObject* obj = marking_stack_.RemoveLast();
6045 List<HeapObject*> marking_stack_;
6048 void MarkReachableObjects() {
6049 Heap* heap = Isolate::Current()->heap();
6050 MarkingVisitor visitor;
6051 heap->IterateRoots(&visitor, VISIT_ALL);
6052 visitor.TransitiveClosure();
6055 AssertNoAllocation no_alloc;
6059 HeapIterator::HeapIterator()
6060 : filtering_(HeapIterator::kNoFiltering),
6066 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6067 : filtering_(filtering),
6073 HeapIterator::~HeapIterator() {
6078 void HeapIterator::Init() {
6079 // Start the iteration.
6080 space_iterator_ = new SpaceIterator;
6081 switch (filtering_) {
6082 case kFilterUnreachable:
6083 filter_ = new UnreachableObjectsFilter;
6088 object_iterator_ = space_iterator_->next();
6092 void HeapIterator::Shutdown() {
6094 // Assert that in filtering mode we have iterated through all
6095 // objects. Otherwise, heap will be left in an inconsistent state.
6096 if (filtering_ != kNoFiltering) {
6097 ASSERT(object_iterator_ == NULL);
6100 // Make sure the last iterator is deallocated.
6101 delete space_iterator_;
6102 space_iterator_ = NULL;
6103 object_iterator_ = NULL;
6109 HeapObject* HeapIterator::next() {
6110 if (filter_ == NULL) return NextObject();
6112 HeapObject* obj = NextObject();
6113 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6118 HeapObject* HeapIterator::NextObject() {
6119 // No iterator means we are done.
6120 if (object_iterator_ == NULL) return NULL;
6122 if (HeapObject* obj = object_iterator_->next_object()) {
6123 // If the current iterator has more objects we are fine.
6126 // Go though the spaces looking for one that has objects.
6127 while (space_iterator_->has_next()) {
6128 object_iterator_ = space_iterator_->next();
6129 if (HeapObject* obj = object_iterator_->next_object()) {
6134 // Done with the last space.
6135 object_iterator_ = NULL;
6140 void HeapIterator::reset() {
6141 // Restart the iterator.
6147 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6149 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6151 class PathTracer::MarkVisitor: public ObjectVisitor {
6153 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6154 void VisitPointers(Object** start, Object** end) {
6155 // Scan all HeapObject pointers in [start, end)
6156 for (Object** p = start; !tracer_->found() && (p < end); p++) {
6157 if ((*p)->IsHeapObject())
6158 tracer_->MarkRecursively(p, this);
6163 PathTracer* tracer_;
6167 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6169 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6170 void VisitPointers(Object** start, Object** end) {
6171 // Scan all HeapObject pointers in [start, end)
6172 for (Object** p = start; p < end; p++) {
6173 if ((*p)->IsHeapObject())
6174 tracer_->UnmarkRecursively(p, this);
6179 PathTracer* tracer_;
6183 void PathTracer::VisitPointers(Object** start, Object** end) {
6184 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6185 // Visit all HeapObject pointers in [start, end)
6186 for (Object** p = start; !done && (p < end); p++) {
6187 if ((*p)->IsHeapObject()) {
6189 done = ((what_to_find_ == FIND_FIRST) && found_target_);
6195 void PathTracer::Reset() {
6196 found_target_ = false;
6197 object_stack_.Clear();
6201 void PathTracer::TracePathFrom(Object** root) {
6202 ASSERT((search_target_ == kAnyGlobalObject) ||
6203 search_target_->IsHeapObject());
6204 found_target_in_trace_ = false;
6205 object_stack_.Clear();
6207 MarkVisitor mark_visitor(this);
6208 MarkRecursively(root, &mark_visitor);
6210 UnmarkVisitor unmark_visitor(this);
6211 UnmarkRecursively(root, &unmark_visitor);
6217 static bool SafeIsGlobalContext(HeapObject* obj) {
6218 return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
6222 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6223 if (!(*p)->IsHeapObject()) return;
6225 HeapObject* obj = HeapObject::cast(*p);
6227 Object* map = obj->map();
6229 if (!map->IsHeapObject()) return; // visited before
6231 if (found_target_in_trace_) return; // stop if target found
6232 object_stack_.Add(obj);
6233 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6234 (obj == search_target_)) {
6235 found_target_in_trace_ = true;
6236 found_target_ = true;
6240 bool is_global_context = SafeIsGlobalContext(obj);
6243 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6245 Address map_addr = map_p->address();
6247 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6249 // Scan the object body.
6250 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6251 // This is specialized to scan Context's properly.
6252 Object** start = reinterpret_cast<Object**>(obj->address() +
6253 Context::kHeaderSize);
6254 Object** end = reinterpret_cast<Object**>(obj->address() +
6255 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6256 mark_visitor->VisitPointers(start, end);
6258 obj->IterateBody(map_p->instance_type(),
6259 obj->SizeFromMap(map_p),
6263 // Scan the map after the body because the body is a lot more interesting
6264 // when doing leak detection.
6265 MarkRecursively(&map, mark_visitor);
6267 if (!found_target_in_trace_) // don't pop if found the target
6268 object_stack_.RemoveLast();
6272 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6273 if (!(*p)->IsHeapObject()) return;
6275 HeapObject* obj = HeapObject::cast(*p);
6277 Object* map = obj->map();
6279 if (map->IsHeapObject()) return; // unmarked already
6281 Address map_addr = reinterpret_cast<Address>(map);
6283 map_addr -= kMarkTag;
6285 ASSERT_TAG_ALIGNED(map_addr);
6287 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6289 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6291 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6293 obj->IterateBody(Map::cast(map_p)->instance_type(),
6294 obj->SizeFromMap(Map::cast(map_p)),
6299 void PathTracer::ProcessResults() {
6300 if (found_target_) {
6301 PrintF("=====================================\n");
6302 PrintF("==== Path to object ====\n");
6303 PrintF("=====================================\n\n");
6305 ASSERT(!object_stack_.is_empty());
6306 for (int i = 0; i < object_stack_.length(); i++) {
6307 if (i > 0) PrintF("\n |\n |\n V\n\n");
6308 Object* obj = object_stack_[i];
6315 PrintF("=====================================\n");
6318 #endif // DEBUG || LIVE_OBJECT_LIST
6322 // Triggers a depth-first traversal of reachable objects from roots
6323 // and finds a path to a specific heap object and prints it.
6324 void Heap::TracePathToObject(Object* target) {
6325 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6326 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6330 // Triggers a depth-first traversal of reachable objects from roots
6331 // and finds a path to any global object and prints it. Useful for
6332 // determining the source for leaks of global objects.
6333 void Heap::TracePathToGlobal() {
6334 PathTracer tracer(PathTracer::kAnyGlobalObject,
6335 PathTracer::FIND_ALL,
6337 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6342 static intptr_t CountTotalHolesSize() {
6343 intptr_t holes_size = 0;
6345 for (OldSpace* space = spaces.next();
6347 space = spaces.next()) {
6348 holes_size += space->Waste() + space->Available();
6354 GCTracer::GCTracer(Heap* heap)
6359 allocated_since_last_gc_(0),
6360 spent_in_mutator_(0),
6361 promoted_objects_size_(0),
6363 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6364 start_time_ = OS::TimeCurrentMillis();
6365 start_size_ = heap_->SizeOfObjects();
6367 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6371 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6373 allocated_since_last_gc_ =
6374 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6376 if (heap_->last_gc_end_timestamp_ > 0) {
6377 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6380 steps_count_ = heap_->incremental_marking()->steps_count();
6381 steps_took_ = heap_->incremental_marking()->steps_took();
6382 longest_step_ = heap_->incremental_marking()->longest_step();
6383 steps_count_since_last_gc_ =
6384 heap_->incremental_marking()->steps_count_since_last_gc();
6385 steps_took_since_last_gc_ =
6386 heap_->incremental_marking()->steps_took_since_last_gc();
6390 GCTracer::~GCTracer() {
6391 // Printf ONE line iff flag is set.
6392 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6394 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6396 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6397 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6399 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6401 // Update cumulative GC statistics if required.
6402 if (FLAG_print_cumulative_gc_stat) {
6403 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6404 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6405 heap_->alive_after_last_gc_);
6407 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6408 static_cast<int>(spent_in_mutator_));
6412 if (!FLAG_trace_gc_nvp) {
6413 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6415 PrintF("%s %.1f -> %.1f MB, ",
6417 static_cast<double>(start_size_) / MB,
6418 SizeOfHeapObjects());
6420 if (external_time > 0) PrintF("%d / ", external_time);
6421 PrintF("%d ms", time);
6422 if (steps_count_ > 0) {
6423 if (collector_ == SCAVENGER) {
6424 PrintF(" (+ %d ms in %d steps since last GC)",
6425 static_cast<int>(steps_took_since_last_gc_),
6426 steps_count_since_last_gc_);
6428 PrintF(" (+ %d ms in %d steps since start of marking, "
6429 "biggest step %f ms)",
6430 static_cast<int>(steps_took_),
6437 PrintF("pause=%d ", time);
6438 PrintF("mutator=%d ",
6439 static_cast<int>(spent_in_mutator_));
6442 switch (collector_) {
6446 case MARK_COMPACTOR:
6454 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6455 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6456 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6457 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6458 PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
6459 PrintF("new_new=%d ",
6460 static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
6461 PrintF("root_new=%d ",
6462 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6463 PrintF("old_new=%d ",
6464 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6465 PrintF("compaction_ptrs=%d ",
6466 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6467 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6468 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
6469 PrintF("misc_compaction=%d ",
6470 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
6472 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
6473 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6474 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6475 in_free_list_or_wasted_before_gc_);
6476 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6478 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6479 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6481 if (collector_ == SCAVENGER) {
6482 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6483 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6485 PrintF("stepscount=%d ", steps_count_);
6486 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6492 heap_->PrintShortHeapStatistics();
6496 const char* GCTracer::CollectorString() {
6497 switch (collector_) {
6500 case MARK_COMPACTOR:
6501 return "Mark-sweep";
6503 return "Unknown GC";
6507 int KeyedLookupCache::Hash(Map* map, String* name) {
6508 // Uses only lower 32 bits if pointers are larger.
6509 uintptr_t addr_hash =
6510 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6511 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6515 int KeyedLookupCache::Lookup(Map* map, String* name) {
6516 int index = Hash(map, name);
6517 Key& key = keys_[index];
6518 if ((key.map == map) && key.name->Equals(name)) {
6519 return field_offsets_[index];
6525 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6527 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
6528 int index = Hash(map, symbol);
6529 Key& key = keys_[index];
6532 field_offsets_[index] = field_offset;
6537 void KeyedLookupCache::Clear() {
6538 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6542 void DescriptorLookupCache::Clear() {
6543 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
6548 void Heap::GarbageCollectionGreedyCheck() {
6549 ASSERT(FLAG_gc_greedy);
6550 if (isolate_->bootstrapper()->IsActive()) return;
6551 if (disallow_allocation_failure()) return;
6552 CollectGarbage(NEW_SPACE);
6557 TranscendentalCache::SubCache::SubCache(Type t)
6559 isolate_(Isolate::Current()) {
6560 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
6561 uint32_t in1 = 0xffffffffu; // generated by the FPU.
6562 for (int i = 0; i < kCacheSize; i++) {
6563 elements_[i].in[0] = in0;
6564 elements_[i].in[1] = in1;
6565 elements_[i].output = NULL;
6570 void TranscendentalCache::Clear() {
6571 for (int i = 0; i < kNumberOfCaches; i++) {
6572 if (caches_[i] != NULL) {
6580 void ExternalStringTable::CleanUp() {
6582 for (int i = 0; i < new_space_strings_.length(); ++i) {
6583 if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
6586 if (heap_->InNewSpace(new_space_strings_[i])) {
6587 new_space_strings_[last++] = new_space_strings_[i];
6589 old_space_strings_.Add(new_space_strings_[i]);
6592 new_space_strings_.Rewind(last);
6594 for (int i = 0; i < old_space_strings_.length(); ++i) {
6595 if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
6598 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6599 old_space_strings_[last++] = old_space_strings_[i];
6601 old_space_strings_.Rewind(last);
6602 if (FLAG_verify_heap) {
6608 void ExternalStringTable::TearDown() {
6609 new_space_strings_.Free();
6610 old_space_strings_.Free();
6614 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6615 chunk->set_next_chunk(chunks_queued_for_free_);
6616 chunks_queued_for_free_ = chunk;
6620 void Heap::FreeQueuedChunks() {
6621 if (chunks_queued_for_free_ == NULL) return;
6624 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6625 next = chunk->next_chunk();
6626 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6628 if (chunk->owner()->identity() == LO_SPACE) {
6629 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6630 // If FromAnyPointerAddress encounters a slot that belongs to a large
6631 // chunk queued for deletion it will fail to find the chunk because
6632 // it try to perform a search in the list of pages owned by of the large
6633 // object space and queued chunks were detached from that list.
6634 // To work around this we split large chunk into normal kPageSize aligned
6635 // pieces and initialize size, owner and flags field of every piece.
6636 // If FromAnyPointerAddress encounters a slot that belongs to one of
6637 // these smaller pieces it will treat it as a slot on a normal Page.
6638 MemoryChunk* inner = MemoryChunk::FromAddress(
6639 chunk->address() + Page::kPageSize);
6640 MemoryChunk* inner_last = MemoryChunk::FromAddress(
6641 chunk->address() + chunk->size() - 1);
6642 while (inner <= inner_last) {
6643 // Size of a large chunk is always a multiple of
6644 // OS::AllocateAlignment() so there is always
6645 // enough space for a fake MemoryChunk header.
6646 inner->set_size(Page::kPageSize);
6647 inner->set_owner(lo_space());
6648 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6649 inner = MemoryChunk::FromAddress(
6650 inner->address() + Page::kPageSize);
6654 isolate_->heap()->store_buffer()->Compact();
6655 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6656 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6657 next = chunk->next_chunk();
6658 isolate_->memory_allocator()->Free(chunk);
6660 chunks_queued_for_free_ = NULL;
6663 } } // namespace v8::internal