1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if defined(V8_TARGET_ARCH_X64)
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
110 gc_state_(NOT_IN_GC),
111 gc_post_processing_depth_(0),
114 remembered_unmapped_pages_index_(0),
115 unflattened_strings_length_(0),
117 allocation_allowed_(true),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
121 new_space_high_promotion_mode_active_(false),
122 old_gen_promotion_limit_(kMinimumPromotionLimit),
123 old_gen_allocation_limit_(kMinimumAllocationLimit),
124 old_gen_limit_factor_(1),
125 size_of_old_gen_at_last_old_space_gc_(0),
126 external_allocation_limit_(0),
127 amount_of_external_allocated_memory_(0),
128 amount_of_external_allocated_memory_at_last_global_gc_(0),
129 old_gen_exhausted_(false),
130 store_buffer_rebuilder_(store_buffer()),
131 hidden_symbol_(NULL),
132 global_gc_prologue_callback_(NULL),
133 global_gc_epilogue_callback_(NULL),
134 gc_safe_size_of_old_object_(NULL),
135 total_regexp_code_generated_(0),
137 young_survivors_after_last_gc_(0),
138 high_survival_rate_period_length_(0),
139 low_survival_rate_period_length_(0),
141 previous_survival_rate_trend_(Heap::STABLE),
142 survival_rate_trend_(Heap::STABLE),
144 total_gc_time_ms_(0),
145 max_alive_after_gc_(0),
146 min_in_mutator_(kMaxInt),
147 alive_after_last_gc_(0),
148 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 ms_count_at_last_idle_notification_(0),
157 gc_count_at_last_idle_gc_(0),
158 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
159 promotion_queue_(this),
161 chunks_queued_for_free_(NULL),
162 relocation_mutex_(NULL) {
163 // Allow build-time customization of the max semispace size. Building
164 // V8 with snapshots and a non-default max semispace size is much
165 // easier if you can define it as part of the build environment.
166 #if defined(V8_MAX_SEMISPACE_SIZE)
167 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170 intptr_t max_virtual = OS::MaxVirtualMemory();
172 if (max_virtual > 0) {
173 if (code_range_size_ > 0) {
174 // Reserve no more than 1/8 of the memory for the code range.
175 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
180 native_contexts_list_ = NULL;
181 mark_compact_collector_.heap_ = this;
182 external_string_table_.heap_ = this;
183 // Put a dummy entry in the remembered pages so we can find the list the
184 // minidump even if there are no real unmapped pages.
185 RememberUnmappedPage(NULL, false);
187 ClearObjectStats(true);
191 intptr_t Heap::Capacity() {
192 if (!HasBeenSetUp()) return 0;
194 return new_space_.Capacity() +
195 old_pointer_space_->Capacity() +
196 old_data_space_->Capacity() +
197 code_space_->Capacity() +
198 map_space_->Capacity() +
199 cell_space_->Capacity();
203 intptr_t Heap::CommittedMemory() {
204 if (!HasBeenSetUp()) return 0;
206 return new_space_.CommittedMemory() +
207 old_pointer_space_->CommittedMemory() +
208 old_data_space_->CommittedMemory() +
209 code_space_->CommittedMemory() +
210 map_space_->CommittedMemory() +
211 cell_space_->CommittedMemory() +
216 size_t Heap::CommittedPhysicalMemory() {
217 if (!HasBeenSetUp()) return 0;
219 return new_space_.CommittedPhysicalMemory() +
220 old_pointer_space_->CommittedPhysicalMemory() +
221 old_data_space_->CommittedPhysicalMemory() +
222 code_space_->CommittedPhysicalMemory() +
223 map_space_->CommittedPhysicalMemory() +
224 cell_space_->CommittedPhysicalMemory() +
225 lo_space_->CommittedPhysicalMemory();
229 intptr_t Heap::CommittedMemoryExecutable() {
230 if (!HasBeenSetUp()) return 0;
232 return isolate()->memory_allocator()->SizeExecutable();
236 intptr_t Heap::Available() {
237 if (!HasBeenSetUp()) return 0;
239 return new_space_.Available() +
240 old_pointer_space_->Available() +
241 old_data_space_->Available() +
242 code_space_->Available() +
243 map_space_->Available() +
244 cell_space_->Available();
248 bool Heap::HasBeenSetUp() {
249 return old_pointer_space_ != NULL &&
250 old_data_space_ != NULL &&
251 code_space_ != NULL &&
252 map_space_ != NULL &&
253 cell_space_ != NULL &&
258 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
259 if (IntrusiveMarking::IsMarked(object)) {
260 return IntrusiveMarking::SizeOfMarkedObject(object);
262 return object->SizeFromMap(object->map());
266 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
267 const char** reason) {
268 // Is global GC requested?
269 if (space != NEW_SPACE) {
270 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
271 *reason = "GC in old space requested";
272 return MARK_COMPACTOR;
275 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
276 *reason = "GC in old space forced by flags";
277 return MARK_COMPACTOR;
280 // Is enough data promoted to justify a global GC?
281 if (OldGenerationPromotionLimitReached()) {
282 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
283 *reason = "promotion limit reached";
284 return MARK_COMPACTOR;
287 // Have allocation in OLD and LO failed?
288 if (old_gen_exhausted_) {
289 isolate_->counters()->
290 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
291 *reason = "old generations exhausted";
292 return MARK_COMPACTOR;
295 // Is there enough space left in OLD to guarantee that a scavenge can
298 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
299 // for object promotion. It counts only the bytes that the memory
300 // allocator has not yet allocated from the OS and assigned to any space,
301 // and does not count available bytes already in the old space or code
302 // space. Undercounting is safe---we may get an unrequested full GC when
303 // a scavenge would have succeeded.
304 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
305 isolate_->counters()->
306 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
307 *reason = "scavenge might not succeed";
308 return MARK_COMPACTOR;
317 // TODO(1238405): Combine the infrastructure for --heap-stats and
318 // --log-gc to avoid the complicated preprocessor and flag testing.
319 void Heap::ReportStatisticsBeforeGC() {
320 // Heap::ReportHeapStatistics will also log NewSpace statistics when
321 // compiled --log-gc is set. The following logic is used to avoid
324 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
325 if (FLAG_heap_stats) {
326 ReportHeapStatistics("Before GC");
327 } else if (FLAG_log_gc) {
328 new_space_.ReportStatistics();
330 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
333 new_space_.CollectStatistics();
334 new_space_.ReportStatistics();
335 new_space_.ClearHistograms();
341 void Heap::PrintShortHeapStatistics() {
342 if (!FLAG_trace_gc_verbose) return;
343 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
344 ", available: %6" V8_PTR_PREFIX "d KB\n",
345 isolate_->memory_allocator()->Size() / KB,
346 isolate_->memory_allocator()->Available() / KB);
347 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
348 ", available: %6" V8_PTR_PREFIX "d KB"
349 ", committed: %6" V8_PTR_PREFIX "d KB\n",
350 new_space_.Size() / KB,
351 new_space_.Available() / KB,
352 new_space_.CommittedMemory() / KB);
353 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
354 ", available: %6" V8_PTR_PREFIX "d KB"
355 ", committed: %6" V8_PTR_PREFIX "d KB\n",
356 old_pointer_space_->SizeOfObjects() / KB,
357 old_pointer_space_->Available() / KB,
358 old_pointer_space_->CommittedMemory() / KB);
359 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
360 ", available: %6" V8_PTR_PREFIX "d KB"
361 ", committed: %6" V8_PTR_PREFIX "d KB\n",
362 old_data_space_->SizeOfObjects() / KB,
363 old_data_space_->Available() / KB,
364 old_data_space_->CommittedMemory() / KB);
365 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
366 ", available: %6" V8_PTR_PREFIX "d KB"
367 ", committed: %6" V8_PTR_PREFIX "d KB\n",
368 code_space_->SizeOfObjects() / KB,
369 code_space_->Available() / KB,
370 code_space_->CommittedMemory() / KB);
371 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
372 ", available: %6" V8_PTR_PREFIX "d KB"
373 ", committed: %6" V8_PTR_PREFIX "d KB\n",
374 map_space_->SizeOfObjects() / KB,
375 map_space_->Available() / KB,
376 map_space_->CommittedMemory() / KB);
377 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
378 ", available: %6" V8_PTR_PREFIX "d KB"
379 ", committed: %6" V8_PTR_PREFIX "d KB\n",
380 cell_space_->SizeOfObjects() / KB,
381 cell_space_->Available() / KB,
382 cell_space_->CommittedMemory() / KB);
383 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
384 ", available: %6" V8_PTR_PREFIX "d KB"
385 ", committed: %6" V8_PTR_PREFIX "d KB\n",
386 lo_space_->SizeOfObjects() / KB,
387 lo_space_->Available() / KB,
388 lo_space_->CommittedMemory() / KB);
389 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
390 ", available: %6" V8_PTR_PREFIX "d KB"
391 ", committed: %6" V8_PTR_PREFIX "d KB\n",
392 this->SizeOfObjects() / KB,
393 this->Available() / KB,
394 this->CommittedMemory() / KB);
395 PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
399 // TODO(1238405): Combine the infrastructure for --heap-stats and
400 // --log-gc to avoid the complicated preprocessor and flag testing.
401 void Heap::ReportStatisticsAfterGC() {
402 // Similar to the before GC, we use some complicated logic to ensure that
403 // NewSpace statistics are logged exactly once when --log-gc is turned on.
405 if (FLAG_heap_stats) {
406 new_space_.CollectStatistics();
407 ReportHeapStatistics("After GC");
408 } else if (FLAG_log_gc) {
409 new_space_.ReportStatistics();
412 if (FLAG_log_gc) new_space_.ReportStatistics();
417 void Heap::GarbageCollectionPrologue() {
418 isolate_->transcendental_cache()->Clear();
419 ClearJSFunctionResultCaches();
421 unflattened_strings_length_ = 0;
423 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
424 mark_compact_collector()->EnableCodeFlushing(true);
428 if (FLAG_verify_heap) {
434 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
435 allow_allocation(false);
437 if (FLAG_gc_verbose) Print();
439 ReportStatisticsBeforeGC();
442 LiveObjectList::GCPrologue();
443 store_buffer()->GCPrologue();
447 intptr_t Heap::SizeOfObjects() {
450 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
451 total += space->SizeOfObjects();
457 void Heap::RepairFreeListsAfterBoot() {
459 for (PagedSpace* space = spaces.next();
461 space = spaces.next()) {
462 space->RepairFreeListsAfterBoot();
467 void Heap::GarbageCollectionEpilogue() {
468 store_buffer()->GCEpilogue();
469 LiveObjectList::GCEpilogue();
471 // In release mode, we only zap the from space under heap verification.
472 if (Heap::ShouldZapGarbage()) {
477 if (FLAG_verify_heap) {
483 allow_allocation(true);
484 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
485 if (FLAG_print_handles) PrintHandles();
486 if (FLAG_gc_verbose) Print();
487 if (FLAG_code_stats) ReportCodeStatistics("After GC");
490 isolate_->counters()->alive_after_last_gc()->Set(
491 static_cast<int>(SizeOfObjects()));
493 isolate_->counters()->symbol_table_capacity()->Set(
494 symbol_table()->Capacity());
495 isolate_->counters()->number_of_symbols()->Set(
496 symbol_table()->NumberOfElements());
498 if (CommittedMemory() > 0) {
499 isolate_->counters()->external_fragmentation_total()->AddSample(
500 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
502 isolate_->counters()->heap_fraction_map_space()->AddSample(
504 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
505 isolate_->counters()->heap_fraction_cell_space()->AddSample(
507 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
509 isolate_->counters()->heap_sample_total_committed()->AddSample(
510 static_cast<int>(CommittedMemory() / KB));
511 isolate_->counters()->heap_sample_total_used()->AddSample(
512 static_cast<int>(SizeOfObjects() / KB));
513 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
514 static_cast<int>(map_space()->CommittedMemory() / KB));
515 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
516 static_cast<int>(cell_space()->CommittedMemory() / KB));
519 #define UPDATE_COUNTERS_FOR_SPACE(space) \
520 isolate_->counters()->space##_bytes_available()->Set( \
521 static_cast<int>(space()->Available())); \
522 isolate_->counters()->space##_bytes_committed()->Set( \
523 static_cast<int>(space()->CommittedMemory())); \
524 isolate_->counters()->space##_bytes_used()->Set( \
525 static_cast<int>(space()->SizeOfObjects()));
526 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
527 if (space()->CommittedMemory() > 0) { \
528 isolate_->counters()->external_fragmentation_##space()->AddSample( \
529 static_cast<int>(100 - \
530 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
532 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
533 UPDATE_COUNTERS_FOR_SPACE(space) \
534 UPDATE_FRAGMENTATION_FOR_SPACE(space)
536 UPDATE_COUNTERS_FOR_SPACE(new_space)
537 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
538 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
539 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
540 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
541 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
542 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
543 #undef UPDATE_COUNTERS_FOR_SPACE
544 #undef UPDATE_FRAGMENTATION_FOR_SPACE
545 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
548 ReportStatisticsAfterGC();
550 #ifdef ENABLE_DEBUGGER_SUPPORT
551 isolate_->debug()->AfterGarbageCollection();
552 #endif // ENABLE_DEBUGGER_SUPPORT
556 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
557 // Since we are ignoring the return value, the exact choice of space does
558 // not matter, so long as we do not specify NEW_SPACE, which would not
560 mark_compact_collector_.SetFlags(flags);
561 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
562 mark_compact_collector_.SetFlags(kNoGCFlags);
566 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
567 // Since we are ignoring the return value, the exact choice of space does
568 // not matter, so long as we do not specify NEW_SPACE, which would not
570 // Major GC would invoke weak handle callbacks on weakly reachable
571 // handles, but won't collect weakly reachable objects until next
572 // major GC. Therefore if we collect aggressively and weak handle callback
573 // has been invoked, we rerun major GC to release objects which become
575 // Note: as weak callbacks can execute arbitrary code, we cannot
576 // hope that eventually there will be no weak callbacks invocations.
577 // Therefore stop recollecting after several attempts.
578 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
579 kReduceMemoryFootprintMask);
580 isolate_->compilation_cache()->Clear();
581 const int kMaxNumberOfAttempts = 7;
582 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
583 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
587 mark_compact_collector()->SetFlags(kNoGCFlags);
591 incremental_marking()->UncommitMarkingDeque();
595 bool Heap::CollectGarbage(AllocationSpace space,
596 GarbageCollector collector,
597 const char* gc_reason,
598 const char* collector_reason) {
599 // The VM is in the GC state until exiting this function.
600 VMState state(isolate_, GC);
603 // Reset the allocation timeout to the GC interval, but make sure to
604 // allow at least a few allocations after a collection. The reason
605 // for this is that we have a lot of allocation sequences and we
606 // assume that a garbage collection will allow the subsequent
607 // allocation attempts to go through.
608 allocation_timeout_ = Max(6, FLAG_gc_interval);
611 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
612 if (FLAG_trace_incremental_marking) {
613 PrintF("[IncrementalMarking] Scavenge during marking.\n");
617 if (collector == MARK_COMPACTOR &&
618 !mark_compact_collector()->abort_incremental_marking() &&
619 !incremental_marking()->IsStopped() &&
620 !incremental_marking()->should_hurry() &&
621 FLAG_incremental_marking_steps) {
622 // Make progress in incremental marking.
623 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
624 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
625 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
626 if (!incremental_marking()->IsComplete()) {
627 if (FLAG_trace_incremental_marking) {
628 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
630 collector = SCAVENGER;
631 collector_reason = "incremental marking delaying mark-sweep";
635 bool next_gc_likely_to_collect_more = false;
637 { GCTracer tracer(this, gc_reason, collector_reason);
638 GarbageCollectionPrologue();
639 // The GC count was incremented in the prologue. Tell the tracer about
641 tracer.set_gc_count(gc_count_);
643 // Tell the tracer which collector we've selected.
644 tracer.set_collector(collector);
647 HistogramTimerScope histogram_timer_scope(
648 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
649 : isolate_->counters()->gc_compactor());
650 next_gc_likely_to_collect_more =
651 PerformGarbageCollection(collector, &tracer);
654 GarbageCollectionEpilogue();
657 // Start incremental marking for the next cycle. The heap snapshot
658 // generator needs incremental marking to stay off after it aborted.
659 if (!mark_compact_collector()->abort_incremental_marking() &&
660 incremental_marking()->IsStopped() &&
661 incremental_marking()->WorthActivating() &&
662 NextGCIsLikelyToBeFull()) {
663 incremental_marking()->Start();
666 return next_gc_likely_to_collect_more;
670 void Heap::PerformScavenge() {
671 GCTracer tracer(this, NULL, NULL);
672 if (incremental_marking()->IsStopped()) {
673 PerformGarbageCollection(SCAVENGER, &tracer);
675 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
681 // Helper class for verifying the symbol table.
682 class SymbolTableVerifier : public ObjectVisitor {
684 void VisitPointers(Object** start, Object** end) {
685 // Visit all HeapObject pointers in [start, end).
686 for (Object** p = start; p < end; p++) {
687 if ((*p)->IsHeapObject()) {
688 // Check that the symbol is actually a symbol.
689 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
696 static void VerifySymbolTable() {
697 SymbolTableVerifier verifier;
698 HEAP->symbol_table()->IterateElements(&verifier);
700 #endif // VERIFY_HEAP
703 static bool AbortIncrementalMarkingAndCollectGarbage(
705 AllocationSpace space,
706 const char* gc_reason = NULL) {
707 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
708 bool result = heap->CollectGarbage(space, gc_reason);
709 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
714 void Heap::ReserveSpace(
716 Address *locations_out) {
717 bool gc_performed = true;
719 static const int kThreshold = 20;
720 while (gc_performed && counter++ < kThreshold) {
721 gc_performed = false;
722 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
723 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
724 if (sizes[space] != 0) {
725 MaybeObject* allocation;
726 if (space == NEW_SPACE) {
727 allocation = new_space()->AllocateRaw(sizes[space]);
729 allocation = paged_space(space)->AllocateRaw(sizes[space]);
732 if (!allocation->To<FreeListNode>(&node)) {
733 if (space == NEW_SPACE) {
734 Heap::CollectGarbage(NEW_SPACE,
735 "failed to reserve space in the new space");
737 AbortIncrementalMarkingAndCollectGarbage(
739 static_cast<AllocationSpace>(space),
740 "failed to reserve space in paged space");
745 // Mark with a free list node, in case we have a GC before
747 node->set_size(this, sizes[space]);
748 locations_out[space] = node->address();
755 // Failed to reserve the space after several attempts.
756 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
761 void Heap::EnsureFromSpaceIsCommitted() {
762 if (new_space_.CommitFromSpaceIfNeeded()) return;
764 // Committing memory to from space failed.
765 // Try shrinking and try again.
767 if (new_space_.CommitFromSpaceIfNeeded()) return;
769 // Committing memory to from space failed again.
770 // Memory is exhausted and we will die.
771 V8::FatalProcessOutOfMemory("Committing semi space failed.");
775 void Heap::ClearJSFunctionResultCaches() {
776 if (isolate_->bootstrapper()->IsActive()) return;
778 Object* context = native_contexts_list_;
779 while (!context->IsUndefined()) {
780 // Get the caches for this context. GC can happen when the context
781 // is not fully initialized, so the caches can be undefined.
782 Object* caches_or_undefined =
783 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
784 if (!caches_or_undefined->IsUndefined()) {
785 FixedArray* caches = FixedArray::cast(caches_or_undefined);
787 int length = caches->length();
788 for (int i = 0; i < length; i++) {
789 JSFunctionResultCache::cast(caches->get(i))->Clear();
792 // Get the next context:
793 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
799 void Heap::ClearNormalizedMapCaches() {
800 if (isolate_->bootstrapper()->IsActive() &&
801 !incremental_marking()->IsMarking()) {
805 Object* context = native_contexts_list_;
806 while (!context->IsUndefined()) {
807 // GC can happen when the context is not fully initialized,
808 // so the cache can be undefined.
810 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
811 if (!cache->IsUndefined()) {
812 NormalizedMapCache::cast(cache)->Clear();
814 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
819 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
820 double survival_rate =
821 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
822 start_new_space_size;
824 if (survival_rate > kYoungSurvivalRateHighThreshold) {
825 high_survival_rate_period_length_++;
827 high_survival_rate_period_length_ = 0;
830 if (survival_rate < kYoungSurvivalRateLowThreshold) {
831 low_survival_rate_period_length_++;
833 low_survival_rate_period_length_ = 0;
836 double survival_rate_diff = survival_rate_ - survival_rate;
838 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
839 set_survival_rate_trend(DECREASING);
840 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
841 set_survival_rate_trend(INCREASING);
843 set_survival_rate_trend(STABLE);
846 survival_rate_ = survival_rate;
849 bool Heap::PerformGarbageCollection(GarbageCollector collector,
851 bool next_gc_likely_to_collect_more = false;
853 if (collector != SCAVENGER) {
854 PROFILE(isolate_, CodeMovingGCEvent());
858 if (FLAG_verify_heap) {
863 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
864 ASSERT(!allocation_allowed_);
865 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
866 global_gc_prologue_callback_();
870 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
872 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
873 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
874 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
878 EnsureFromSpaceIsCommitted();
880 int start_new_space_size = Heap::new_space()->SizeAsInt();
882 if (IsHighSurvivalRate()) {
883 // We speed up the incremental marker if it is running so that it
884 // does not fall behind the rate of promotion, which would cause a
885 // constantly growing old space.
886 incremental_marking()->NotifyOfHighPromotionRate();
889 if (collector == MARK_COMPACTOR) {
890 // Perform mark-sweep with optional compaction.
893 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
894 IsStableOrIncreasingSurvivalTrend();
896 UpdateSurvivalRateTrend(start_new_space_size);
898 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
900 if (high_survival_rate_during_scavenges &&
901 IsStableOrIncreasingSurvivalTrend()) {
902 // Stable high survival rates of young objects both during partial and
903 // full collection indicate that mutator is either building or modifying
904 // a structure with a long lifetime.
905 // In this case we aggressively raise old generation memory limits to
906 // postpone subsequent mark-sweep collection and thus trade memory
907 // space for the mutation speed.
908 old_gen_limit_factor_ = 2;
910 old_gen_limit_factor_ = 1;
913 old_gen_promotion_limit_ =
914 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
915 old_gen_allocation_limit_ =
916 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
918 old_gen_exhausted_ = false;
924 UpdateSurvivalRateTrend(start_new_space_size);
927 if (!new_space_high_promotion_mode_active_ &&
928 new_space_.Capacity() == new_space_.MaximumCapacity() &&
929 IsStableOrIncreasingSurvivalTrend() &&
930 IsHighSurvivalRate()) {
931 // Stable high survival rates even though young generation is at
932 // maximum capacity indicates that most objects will be promoted.
933 // To decrease scavenger pauses and final mark-sweep pauses, we
934 // have to limit maximal capacity of the young generation.
935 new_space_high_promotion_mode_active_ = true;
937 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
938 new_space_.InitialCapacity() / MB);
940 } else if (new_space_high_promotion_mode_active_ &&
941 IsStableOrDecreasingSurvivalTrend() &&
942 IsLowSurvivalRate()) {
943 // Decreasing low survival rates might indicate that the above high
944 // promotion mode is over and we should allow the young generation
946 new_space_high_promotion_mode_active_ = false;
948 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
949 new_space_.MaximumCapacity() / MB);
953 if (new_space_high_promotion_mode_active_ &&
954 new_space_.Capacity() > new_space_.InitialCapacity()) {
958 isolate_->counters()->objs_since_last_young()->Set(0);
960 // Callbacks that fire after this point might trigger nested GCs and
961 // restart incremental marking, the assertion can't be moved down.
962 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
964 gc_post_processing_depth_++;
965 { DisableAssertNoAllocation allow_allocation;
966 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
967 next_gc_likely_to_collect_more =
968 isolate_->global_handles()->PostGarbageCollectionProcessing(
971 gc_post_processing_depth_--;
973 // Update relocatables.
974 Relocatable::PostGarbageCollectionProcessing();
976 if (collector == MARK_COMPACTOR) {
977 // Register the amount of external allocated memory.
978 amount_of_external_allocated_memory_at_last_global_gc_ =
979 amount_of_external_allocated_memory_;
982 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
983 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
984 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
985 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
989 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
990 ASSERT(!allocation_allowed_);
991 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
992 global_gc_epilogue_callback_();
996 if (FLAG_verify_heap) {
1001 return next_gc_likely_to_collect_more;
1005 void Heap::MarkCompact(GCTracer* tracer) {
1006 gc_state_ = MARK_COMPACT;
1007 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1009 mark_compact_collector_.Prepare(tracer);
1012 tracer->set_full_gc_count(ms_count_);
1014 MarkCompactPrologue();
1016 mark_compact_collector_.CollectGarbage();
1018 LOG(isolate_, ResourceEvent("markcompact", "end"));
1020 gc_state_ = NOT_IN_GC;
1022 isolate_->counters()->objs_since_last_full()->Set(0);
1024 contexts_disposed_ = 0;
1026 flush_monomorphic_ics_ = false;
1030 void Heap::MarkCompactPrologue() {
1031 // At any old GC clear the keyed lookup cache to enable collection of unused
1033 isolate_->keyed_lookup_cache()->Clear();
1034 isolate_->context_slot_cache()->Clear();
1035 isolate_->descriptor_lookup_cache()->Clear();
1036 RegExpResultsCache::Clear(string_split_cache());
1037 RegExpResultsCache::Clear(regexp_multiple_cache());
1039 isolate_->compilation_cache()->MarkCompactPrologue();
1041 CompletelyClearInstanceofCache();
1043 FlushNumberStringCache();
1044 if (FLAG_cleanup_code_caches_at_gc) {
1045 polymorphic_code_cache()->set_cache(undefined_value());
1048 ClearNormalizedMapCaches();
1052 Object* Heap::FindCodeObject(Address a) {
1053 return isolate()->inner_pointer_to_code_cache()->
1054 GcSafeFindCodeForInnerPointer(a);
1058 // Helper class for copying HeapObjects
1059 class ScavengeVisitor: public ObjectVisitor {
1061 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1063 void VisitPointer(Object** p) { ScavengePointer(p); }
1065 void VisitPointers(Object** start, Object** end) {
1066 // Copy all HeapObject pointers in [start, end)
1067 for (Object** p = start; p < end; p++) ScavengePointer(p);
1071 void ScavengePointer(Object** p) {
1072 Object* object = *p;
1073 if (!heap_->InNewSpace(object)) return;
1074 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1075 reinterpret_cast<HeapObject*>(object));
1083 // Visitor class to verify pointers in code or data space do not point into
1085 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1087 void VisitPointers(Object** start, Object**end) {
1088 for (Object** current = start; current < end; current++) {
1089 if ((*current)->IsHeapObject()) {
1090 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1097 static void VerifyNonPointerSpacePointers() {
1098 // Verify that there are no pointers to new space in spaces where we
1099 // do not expect them.
1100 VerifyNonPointerSpacePointersVisitor v;
1101 HeapObjectIterator code_it(HEAP->code_space());
1102 for (HeapObject* object = code_it.Next();
1103 object != NULL; object = code_it.Next())
1104 object->Iterate(&v);
1106 // The old data space was normally swept conservatively so that the iterator
1107 // doesn't work, so we normally skip the next bit.
1108 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1109 HeapObjectIterator data_it(HEAP->old_data_space());
1110 for (HeapObject* object = data_it.Next();
1111 object != NULL; object = data_it.Next())
1112 object->Iterate(&v);
1115 #endif // VERIFY_HEAP
1118 void Heap::CheckNewSpaceExpansionCriteria() {
1119 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1120 survived_since_last_expansion_ > new_space_.Capacity() &&
1121 !new_space_high_promotion_mode_active_) {
1122 // Grow the size of new space if there is room to grow, enough data
1123 // has survived scavenge since the last expansion and we are not in
1124 // high promotion mode.
1126 survived_since_last_expansion_ = 0;
1131 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1132 return heap->InNewSpace(*p) &&
1133 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1137 void Heap::ScavengeStoreBufferCallback(
1140 StoreBufferEvent event) {
1141 heap->store_buffer_rebuilder_.Callback(page, event);
1145 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1146 if (event == kStoreBufferStartScanningPagesEvent) {
1147 start_of_current_page_ = NULL;
1148 current_page_ = NULL;
1149 } else if (event == kStoreBufferScanningPageEvent) {
1150 if (current_page_ != NULL) {
1151 // If this page already overflowed the store buffer during this iteration.
1152 if (current_page_->scan_on_scavenge()) {
1153 // Then we should wipe out the entries that have been added for it.
1154 store_buffer_->SetTop(start_of_current_page_);
1155 } else if (store_buffer_->Top() - start_of_current_page_ >=
1156 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1157 // Did we find too many pointers in the previous page? The heuristic is
1158 // that no page can take more then 1/5 the remaining slots in the store
1160 current_page_->set_scan_on_scavenge(true);
1161 store_buffer_->SetTop(start_of_current_page_);
1163 // In this case the page we scanned took a reasonable number of slots in
1164 // the store buffer. It has now been rehabilitated and is no longer
1165 // marked scan_on_scavenge.
1166 ASSERT(!current_page_->scan_on_scavenge());
1169 start_of_current_page_ = store_buffer_->Top();
1170 current_page_ = page;
1171 } else if (event == kStoreBufferFullEvent) {
1172 // The current page overflowed the store buffer again. Wipe out its entries
1173 // in the store buffer and mark it scan-on-scavenge again. This may happen
1174 // several times while scanning.
1175 if (current_page_ == NULL) {
1176 // Store Buffer overflowed while scanning promoted objects. These are not
1177 // in any particular page, though they are likely to be clustered by the
1178 // allocation routines.
1179 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1181 // Store Buffer overflowed while scanning a particular old space page for
1182 // pointers to new space.
1183 ASSERT(current_page_ == page);
1184 ASSERT(page != NULL);
1185 current_page_->set_scan_on_scavenge(true);
1186 ASSERT(start_of_current_page_ != store_buffer_->Top());
1187 store_buffer_->SetTop(start_of_current_page_);
1195 void PromotionQueue::Initialize() {
1196 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1197 // entries (where each is a pair of intptr_t). This allows us to simplify
1198 // the test fpr when to switch pages.
1199 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1201 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1203 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1204 emergency_stack_ = NULL;
1209 void PromotionQueue::RelocateQueueHead() {
1210 ASSERT(emergency_stack_ == NULL);
1212 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1213 intptr_t* head_start = rear_;
1214 intptr_t* head_end =
1215 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1218 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1220 emergency_stack_ = new List<Entry>(2 * entries_count);
1222 while (head_start != head_end) {
1223 int size = static_cast<int>(*(head_start++));
1224 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1225 emergency_stack_->Add(Entry(obj, size));
1231 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1233 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1235 virtual Object* RetainAs(Object* object) {
1236 if (!heap_->InFromSpace(object)) {
1240 MapWord map_word = HeapObject::cast(object)->map_word();
1241 if (map_word.IsForwardingAddress()) {
1242 return map_word.ToForwardingAddress();
1252 void Heap::Scavenge() {
1253 RelocationLock relocation_lock(this);
1256 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1259 gc_state_ = SCAVENGE;
1261 // Implements Cheney's copying algorithm
1262 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1264 // Clear descriptor cache.
1265 isolate_->descriptor_lookup_cache()->Clear();
1267 // Used for updating survived_since_last_expansion_ at function end.
1268 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1270 CheckNewSpaceExpansionCriteria();
1272 SelectScavengingVisitorsTable();
1274 incremental_marking()->PrepareForScavenge();
1276 AdvanceSweepers(static_cast<int>(new_space_.Size()));
1278 // Flip the semispaces. After flipping, to space is empty, from space has
1281 new_space_.ResetAllocationInfo();
1283 // We need to sweep newly copied objects which can be either in the
1284 // to space or promoted to the old generation. For to-space
1285 // objects, we treat the bottom of the to space as a queue. Newly
1286 // copied and unswept objects lie between a 'front' mark and the
1287 // allocation pointer.
1289 // Promoted objects can go into various old-generation spaces, and
1290 // can be allocated internally in the spaces (from the free list).
1291 // We treat the top of the to space as a queue of addresses of
1292 // promoted objects. The addresses of newly promoted and unswept
1293 // objects lie between a 'front' mark and a 'rear' mark that is
1294 // updated as a side effect of promoting an object.
1296 // There is guaranteed to be enough room at the top of the to space
1297 // for the addresses of promoted objects: every object promoted
1298 // frees up its size in bytes from the top of the new space, and
1299 // objects are at least one pointer in size.
1300 Address new_space_front = new_space_.ToSpaceStart();
1301 promotion_queue_.Initialize();
1304 store_buffer()->Clean();
1307 ScavengeVisitor scavenge_visitor(this);
1309 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1311 // Copy objects reachable from the old generation.
1313 StoreBufferRebuildScope scope(this,
1315 &ScavengeStoreBufferCallback);
1316 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1319 // Copy objects reachable from cells by scavenging cell values directly.
1320 HeapObjectIterator cell_iterator(cell_space_);
1321 for (HeapObject* heap_object = cell_iterator.Next();
1322 heap_object != NULL;
1323 heap_object = cell_iterator.Next()) {
1324 if (heap_object->IsJSGlobalPropertyCell()) {
1325 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1326 Address value_address = cell->ValueAddress();
1327 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1331 // Copy objects reachable from the code flushing candidates list.
1332 MarkCompactCollector* collector = mark_compact_collector();
1333 if (collector->is_code_flushing_enabled()) {
1334 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1337 // Scavenge object reachable from the native contexts list directly.
1338 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1340 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1342 while (isolate()->global_handles()->IterateObjectGroups(
1343 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1344 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1346 isolate()->global_handles()->RemoveObjectGroups();
1348 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1349 &IsUnscavengedHeapObject);
1350 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1352 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1354 UpdateNewSpaceReferencesInExternalStringTable(
1355 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1357 promotion_queue_.Destroy();
1359 LiveObjectList::UpdateReferencesForScavengeGC();
1360 if (!FLAG_watch_ic_patching) {
1361 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1363 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1365 ScavengeWeakObjectRetainer weak_object_retainer(this);
1366 ProcessWeakReferences(&weak_object_retainer);
1368 ASSERT(new_space_front == new_space_.top());
1371 new_space_.set_age_mark(new_space_.top());
1373 new_space_.LowerInlineAllocationLimit(
1374 new_space_.inline_allocation_limit_step());
1376 // Update how much has survived scavenge.
1377 IncrementYoungSurvivorsCounter(static_cast<int>(
1378 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1380 LOG(isolate_, ResourceEvent("scavenge", "end"));
1382 gc_state_ = NOT_IN_GC;
1384 scavenges_since_last_idle_round_++;
1388 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1390 MapWord first_word = HeapObject::cast(*p)->map_word();
1392 if (!first_word.IsForwardingAddress()) {
1393 // Unreachable external string can be finalized.
1394 heap->FinalizeExternalString(String::cast(*p));
1398 // String is still reachable.
1399 return String::cast(first_word.ToForwardingAddress());
1403 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1404 ExternalStringTableUpdaterCallback updater_func) {
1406 if (FLAG_verify_heap) {
1407 external_string_table_.Verify();
1411 if (external_string_table_.new_space_strings_.is_empty()) return;
1413 Object** start = &external_string_table_.new_space_strings_[0];
1414 Object** end = start + external_string_table_.new_space_strings_.length();
1415 Object** last = start;
1417 for (Object** p = start; p < end; ++p) {
1418 ASSERT(InFromSpace(*p));
1419 String* target = updater_func(this, p);
1421 if (target == NULL) continue;
1423 ASSERT(target->IsExternalString());
1425 if (InNewSpace(target)) {
1426 // String is still in new space. Update the table entry.
1430 // String got promoted. Move it to the old string list.
1431 external_string_table_.AddOldString(target);
1435 ASSERT(last <= end);
1436 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1440 void Heap::UpdateReferencesInExternalStringTable(
1441 ExternalStringTableUpdaterCallback updater_func) {
1443 // Update old space string references.
1444 if (external_string_table_.old_space_strings_.length() > 0) {
1445 Object** start = &external_string_table_.old_space_strings_[0];
1446 Object** end = start + external_string_table_.old_space_strings_.length();
1447 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1450 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1454 static Object* ProcessFunctionWeakReferences(Heap* heap,
1456 WeakObjectRetainer* retainer,
1457 bool record_slots) {
1458 Object* undefined = heap->undefined_value();
1459 Object* head = undefined;
1460 JSFunction* tail = NULL;
1461 Object* candidate = function;
1462 while (candidate != undefined) {
1463 // Check whether to keep the candidate in the list.
1464 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1465 Object* retain = retainer->RetainAs(candidate);
1466 if (retain != NULL) {
1467 if (head == undefined) {
1468 // First element in the list.
1471 // Subsequent elements in the list.
1472 ASSERT(tail != NULL);
1473 tail->set_next_function_link(retain);
1475 Object** next_function =
1476 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1477 heap->mark_compact_collector()->RecordSlot(
1478 next_function, next_function, retain);
1481 // Retained function is new tail.
1482 candidate_function = reinterpret_cast<JSFunction*>(retain);
1483 tail = candidate_function;
1485 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1487 if (retain == undefined) break;
1490 // Move to next element in the list.
1491 candidate = candidate_function->next_function_link();
1494 // Terminate the list if there is one or more elements.
1496 tail->set_next_function_link(undefined);
1503 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1504 Object* undefined = undefined_value();
1505 Object* head = undefined;
1506 Context* tail = NULL;
1507 Object* candidate = native_contexts_list_;
1509 // We don't record weak slots during marking or scavenges.
1510 // Instead we do it once when we complete mark-compact cycle.
1511 // Note that write barrier has no effect if we are already in the middle of
1512 // compacting mark-sweep cycle and we have to record slots manually.
1514 gc_state() == MARK_COMPACT &&
1515 mark_compact_collector()->is_compacting();
1517 while (candidate != undefined) {
1518 // Check whether to keep the candidate in the list.
1519 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1520 Object* retain = retainer->RetainAs(candidate);
1521 if (retain != NULL) {
1522 if (head == undefined) {
1523 // First element in the list.
1526 // Subsequent elements in the list.
1527 ASSERT(tail != NULL);
1528 tail->set_unchecked(this,
1529 Context::NEXT_CONTEXT_LINK,
1531 UPDATE_WRITE_BARRIER);
1534 Object** next_context =
1535 HeapObject::RawField(
1536 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1537 mark_compact_collector()->RecordSlot(
1538 next_context, next_context, retain);
1541 // Retained context is new tail.
1542 candidate_context = reinterpret_cast<Context*>(retain);
1543 tail = candidate_context;
1545 if (retain == undefined) break;
1547 // Process the weak list of optimized functions for the context.
1548 Object* function_list_head =
1549 ProcessFunctionWeakReferences(
1551 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1554 candidate_context->set_unchecked(this,
1555 Context::OPTIMIZED_FUNCTIONS_LIST,
1557 UPDATE_WRITE_BARRIER);
1559 Object** optimized_functions =
1560 HeapObject::RawField(
1561 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1562 mark_compact_collector()->RecordSlot(
1563 optimized_functions, optimized_functions, function_list_head);
1567 // Move to next element in the list.
1568 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1571 // Terminate the list if there is one or more elements.
1573 tail->set_unchecked(this,
1574 Context::NEXT_CONTEXT_LINK,
1575 Heap::undefined_value(),
1576 UPDATE_WRITE_BARRIER);
1579 // Update the head of the list of contexts.
1580 native_contexts_list_ = head;
1584 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1585 AssertNoAllocation no_allocation;
1587 // Both the external string table and the symbol table may contain
1588 // external strings, but neither lists them exhaustively, nor is the
1589 // intersection set empty. Therefore we iterate over the external string
1590 // table first, ignoring symbols, and then over the symbol table.
1592 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1594 explicit ExternalStringTableVisitorAdapter(
1595 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1596 virtual void VisitPointers(Object** start, Object** end) {
1597 for (Object** p = start; p < end; p++) {
1598 // Visit non-symbol external strings,
1599 // since symbols are listed in the symbol table.
1600 if (!(*p)->IsSymbol()) {
1601 ASSERT((*p)->IsExternalString());
1602 visitor_->VisitExternalString(Utils::ToLocal(
1603 Handle<String>(String::cast(*p))));
1608 v8::ExternalResourceVisitor* visitor_;
1609 } external_string_table_visitor(visitor);
1611 external_string_table_.Iterate(&external_string_table_visitor);
1613 class SymbolTableVisitorAdapter : public ObjectVisitor {
1615 explicit SymbolTableVisitorAdapter(
1616 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1617 virtual void VisitPointers(Object** start, Object** end) {
1618 for (Object** p = start; p < end; p++) {
1619 if ((*p)->IsExternalString()) {
1620 ASSERT((*p)->IsSymbol());
1621 visitor_->VisitExternalString(Utils::ToLocal(
1622 Handle<String>(String::cast(*p))));
1627 v8::ExternalResourceVisitor* visitor_;
1628 } symbol_table_visitor(visitor);
1630 symbol_table()->IterateElements(&symbol_table_visitor);
1634 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1636 static inline void VisitPointer(Heap* heap, Object** p) {
1637 Object* object = *p;
1638 if (!heap->InNewSpace(object)) return;
1639 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1640 reinterpret_cast<HeapObject*>(object));
1645 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1646 Address new_space_front) {
1648 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1649 // The addresses new_space_front and new_space_.top() define a
1650 // queue of unprocessed copied objects. Process them until the
1652 while (new_space_front != new_space_.top()) {
1653 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1654 HeapObject* object = HeapObject::FromAddress(new_space_front);
1656 NewSpaceScavenger::IterateBody(object->map(), object);
1659 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1663 // Promote and process all the to-be-promoted objects.
1665 StoreBufferRebuildScope scope(this,
1667 &ScavengeStoreBufferCallback);
1668 while (!promotion_queue()->is_empty()) {
1671 promotion_queue()->remove(&target, &size);
1673 // Promoted object might be already partially visited
1674 // during old space pointer iteration. Thus we search specificly
1675 // for pointers to from semispace instead of looking for pointers
1677 ASSERT(!target->IsMap());
1678 IterateAndMarkPointersToFromSpace(target->address(),
1679 target->address() + size,
1684 // Take another spin if there are now unswept objects in new space
1685 // (there are currently no more unswept promoted objects).
1686 } while (new_space_front != new_space_.top());
1688 return new_space_front;
1692 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1695 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1699 static HeapObject* EnsureDoubleAligned(Heap* heap,
1702 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1703 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1704 return HeapObject::FromAddress(object->address() + kPointerSize);
1706 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1713 enum LoggingAndProfiling {
1714 LOGGING_AND_PROFILING_ENABLED,
1715 LOGGING_AND_PROFILING_DISABLED
1719 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1722 template<MarksHandling marks_handling,
1723 LoggingAndProfiling logging_and_profiling_mode>
1724 class ScavengingVisitor : public StaticVisitorBase {
1726 static void Initialize() {
1727 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1728 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1729 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1730 table_.Register(kVisitByteArray, &EvacuateByteArray);
1731 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1732 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1734 table_.Register(kVisitNativeContext,
1735 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1736 template VisitSpecialized<Context::kSize>);
1738 table_.Register(kVisitConsString,
1739 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1740 template VisitSpecialized<ConsString::kSize>);
1742 table_.Register(kVisitSlicedString,
1743 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1744 template VisitSpecialized<SlicedString::kSize>);
1746 table_.Register(kVisitSharedFunctionInfo,
1747 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1748 template VisitSpecialized<SharedFunctionInfo::kSize>);
1750 table_.Register(kVisitJSWeakMap,
1751 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1754 table_.Register(kVisitJSRegExp,
1755 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1758 if (marks_handling == IGNORE_MARKS) {
1759 table_.Register(kVisitJSFunction,
1760 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1761 template VisitSpecialized<JSFunction::kSize>);
1763 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1766 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1768 kVisitDataObjectGeneric>();
1770 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1772 kVisitJSObjectGeneric>();
1774 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1776 kVisitStructGeneric>();
1779 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1784 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1785 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1787 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1788 bool should_record = false;
1790 should_record = FLAG_heap_stats;
1792 should_record = should_record || FLAG_log_gc;
1793 if (should_record) {
1794 if (heap->new_space()->Contains(obj)) {
1795 heap->new_space()->RecordAllocation(obj);
1797 heap->new_space()->RecordPromotion(obj);
1802 // Helper function used by CopyObject to copy a source object to an
1803 // allocated target object and update the forwarding pointer in the source
1804 // object. Returns the target object.
1805 INLINE(static void MigrateObject(Heap* heap,
1809 // Copy the content of source to target.
1810 heap->CopyBlock(target->address(), source->address(), size);
1812 // Set the forwarding address.
1813 source->set_map_word(MapWord::FromForwardingAddress(target));
1815 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1816 // Update NewSpace stats if necessary.
1817 RecordCopiedObject(heap, target);
1818 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1819 Isolate* isolate = heap->isolate();
1820 if (isolate->logger()->is_logging_code_events() ||
1821 CpuProfiler::is_profiling(isolate)) {
1822 if (target->IsSharedFunctionInfo()) {
1823 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1824 source->address(), target->address()));
1829 if (marks_handling == TRANSFER_MARKS) {
1830 if (Marking::TransferColor(source, target)) {
1831 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1837 template<ObjectContents object_contents,
1838 SizeRestriction size_restriction,
1840 static inline void EvacuateObject(Map* map,
1844 SLOW_ASSERT((size_restriction != SMALL) ||
1845 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1846 SLOW_ASSERT(object->Size() == object_size);
1848 int allocation_size = object_size;
1849 if (alignment != kObjectAlignment) {
1850 ASSERT(alignment == kDoubleAlignment);
1851 allocation_size += kPointerSize;
1854 Heap* heap = map->GetHeap();
1855 if (heap->ShouldBePromoted(object->address(), object_size)) {
1856 MaybeObject* maybe_result;
1858 if ((size_restriction != SMALL) &&
1859 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1860 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1863 if (object_contents == DATA_OBJECT) {
1864 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1867 heap->old_pointer_space()->AllocateRaw(allocation_size);
1871 Object* result = NULL; // Initialization to please compiler.
1872 if (maybe_result->ToObject(&result)) {
1873 HeapObject* target = HeapObject::cast(result);
1875 if (alignment != kObjectAlignment) {
1876 target = EnsureDoubleAligned(heap, target, allocation_size);
1879 // Order is important: slot might be inside of the target if target
1880 // was allocated over a dead object and slot comes from the store
1883 MigrateObject(heap, object, target, object_size);
1885 if (object_contents == POINTER_OBJECT) {
1886 if (map->instance_type() == JS_FUNCTION_TYPE) {
1887 heap->promotion_queue()->insert(
1888 target, JSFunction::kNonWeakFieldsEndOffset);
1890 heap->promotion_queue()->insert(target, object_size);
1894 heap->tracer()->increment_promoted_objects_size(object_size);
1898 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1899 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1900 Object* result = allocation->ToObjectUnchecked();
1901 HeapObject* target = HeapObject::cast(result);
1903 if (alignment != kObjectAlignment) {
1904 target = EnsureDoubleAligned(heap, target, allocation_size);
1907 // Order is important: slot might be inside of the target if target
1908 // was allocated over a dead object and slot comes from the store
1911 MigrateObject(heap, object, target, object_size);
1916 static inline void EvacuateJSFunction(Map* map,
1918 HeapObject* object) {
1919 ObjectEvacuationStrategy<POINTER_OBJECT>::
1920 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1922 HeapObject* target = *slot;
1923 MarkBit mark_bit = Marking::MarkBitFrom(target);
1924 if (Marking::IsBlack(mark_bit)) {
1925 // This object is black and it might not be rescanned by marker.
1926 // We should explicitly record code entry slot for compaction because
1927 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1928 // miss it as it is not HeapObject-tagged.
1929 Address code_entry_slot =
1930 target->address() + JSFunction::kCodeEntryOffset;
1931 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1932 map->GetHeap()->mark_compact_collector()->
1933 RecordCodeEntrySlot(code_entry_slot, code);
1938 static inline void EvacuateFixedArray(Map* map,
1940 HeapObject* object) {
1941 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1942 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1949 static inline void EvacuateFixedDoubleArray(Map* map,
1951 HeapObject* object) {
1952 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1953 int object_size = FixedDoubleArray::SizeFor(length);
1954 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1962 static inline void EvacuateByteArray(Map* map,
1964 HeapObject* object) {
1965 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1966 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1967 map, slot, object, object_size);
1971 static inline void EvacuateSeqOneByteString(Map* map,
1973 HeapObject* object) {
1974 int object_size = SeqOneByteString::cast(object)->
1975 SeqOneByteStringSize(map->instance_type());
1976 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1977 map, slot, object, object_size);
1981 static inline void EvacuateSeqTwoByteString(Map* map,
1983 HeapObject* object) {
1984 int object_size = SeqTwoByteString::cast(object)->
1985 SeqTwoByteStringSize(map->instance_type());
1986 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1987 map, slot, object, object_size);
1991 static inline bool IsShortcutCandidate(int type) {
1992 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1995 static inline void EvacuateShortcutCandidate(Map* map,
1997 HeapObject* object) {
1998 ASSERT(IsShortcutCandidate(map->instance_type()));
2000 Heap* heap = map->GetHeap();
2002 if (marks_handling == IGNORE_MARKS &&
2003 ConsString::cast(object)->unchecked_second() ==
2004 heap->empty_string()) {
2006 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2010 if (!heap->InNewSpace(first)) {
2011 object->set_map_word(MapWord::FromForwardingAddress(first));
2015 MapWord first_word = first->map_word();
2016 if (first_word.IsForwardingAddress()) {
2017 HeapObject* target = first_word.ToForwardingAddress();
2020 object->set_map_word(MapWord::FromForwardingAddress(target));
2024 heap->DoScavengeObject(first->map(), slot, first);
2025 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2029 int object_size = ConsString::kSize;
2030 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2031 map, slot, object, object_size);
2034 template<ObjectContents object_contents>
2035 class ObjectEvacuationStrategy {
2037 template<int object_size>
2038 static inline void VisitSpecialized(Map* map,
2040 HeapObject* object) {
2041 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2042 map, slot, object, object_size);
2045 static inline void Visit(Map* map,
2047 HeapObject* object) {
2048 int object_size = map->instance_size();
2049 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2050 map, slot, object, object_size);
2054 static VisitorDispatchTable<ScavengingCallback> table_;
2058 template<MarksHandling marks_handling,
2059 LoggingAndProfiling logging_and_profiling_mode>
2060 VisitorDispatchTable<ScavengingCallback>
2061 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2064 static void InitializeScavengingVisitorsTables() {
2065 ScavengingVisitor<TRANSFER_MARKS,
2066 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2067 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2068 ScavengingVisitor<TRANSFER_MARKS,
2069 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2070 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2074 void Heap::SelectScavengingVisitorsTable() {
2075 bool logging_and_profiling =
2076 isolate()->logger()->is_logging() ||
2077 CpuProfiler::is_profiling(isolate()) ||
2078 (isolate()->heap_profiler() != NULL &&
2079 isolate()->heap_profiler()->is_profiling());
2081 if (!incremental_marking()->IsMarking()) {
2082 if (!logging_and_profiling) {
2083 scavenging_visitors_table_.CopyFrom(
2084 ScavengingVisitor<IGNORE_MARKS,
2085 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2087 scavenging_visitors_table_.CopyFrom(
2088 ScavengingVisitor<IGNORE_MARKS,
2089 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2092 if (!logging_and_profiling) {
2093 scavenging_visitors_table_.CopyFrom(
2094 ScavengingVisitor<TRANSFER_MARKS,
2095 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2097 scavenging_visitors_table_.CopyFrom(
2098 ScavengingVisitor<TRANSFER_MARKS,
2099 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2102 if (incremental_marking()->IsCompacting()) {
2103 // When compacting forbid short-circuiting of cons-strings.
2104 // Scavenging code relies on the fact that new space object
2105 // can't be evacuated into evacuation candidate but
2106 // short-circuiting violates this assumption.
2107 scavenging_visitors_table_.Register(
2108 StaticVisitorBase::kVisitShortcutCandidate,
2109 scavenging_visitors_table_.GetVisitorById(
2110 StaticVisitorBase::kVisitConsString));
2116 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2117 SLOW_ASSERT(HEAP->InFromSpace(object));
2118 MapWord first_word = object->map_word();
2119 SLOW_ASSERT(!first_word.IsForwardingAddress());
2120 Map* map = first_word.ToMap();
2121 map->GetHeap()->DoScavengeObject(map, p, object);
2125 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2126 int instance_size) {
2128 MaybeObject* maybe_result = AllocateRawMap();
2129 if (!maybe_result->ToObject(&result)) return maybe_result;
2131 // Map::cast cannot be used due to uninitialized map field.
2132 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2133 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2134 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2135 reinterpret_cast<Map*>(result)->set_visitor_id(
2136 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2137 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2138 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2139 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2140 reinterpret_cast<Map*>(result)->set_bit_field(0);
2141 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2142 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2143 Map::OwnsDescriptors::encode(true);
2144 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2149 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2151 ElementsKind elements_kind) {
2153 MaybeObject* maybe_result = AllocateRawMap();
2154 if (!maybe_result->To(&result)) return maybe_result;
2156 Map* map = reinterpret_cast<Map*>(result);
2157 map->set_map_no_write_barrier(meta_map());
2158 map->set_instance_type(instance_type);
2159 map->set_visitor_id(
2160 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2161 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2162 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2163 map->set_instance_size(instance_size);
2164 map->set_inobject_properties(0);
2165 map->set_pre_allocated_property_fields(0);
2166 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2167 map->init_back_pointer(undefined_value());
2168 map->set_unused_property_fields(0);
2169 map->set_instance_descriptors(empty_descriptor_array());
2170 map->set_bit_field(0);
2171 map->set_bit_field2(1 << Map::kIsExtensible);
2172 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2173 Map::OwnsDescriptors::encode(true);
2174 map->set_bit_field3(bit_field3);
2175 map->set_elements_kind(elements_kind);
2181 MaybeObject* Heap::AllocateCodeCache() {
2182 CodeCache* code_cache;
2183 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2184 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2186 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2187 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2192 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2193 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2197 MaybeObject* Heap::AllocateAccessorPair() {
2198 AccessorPair* accessors;
2199 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2200 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2202 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2203 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2208 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2209 TypeFeedbackInfo* info;
2210 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2211 if (!maybe_info->To(&info)) return maybe_info;
2213 info->initialize_storage();
2214 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2215 SKIP_WRITE_BARRIER);
2220 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2221 AliasedArgumentsEntry* entry;
2222 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2223 if (!maybe_entry->To(&entry)) return maybe_entry;
2225 entry->set_aliased_context_slot(aliased_context_slot);
2230 const Heap::StringTypeTable Heap::string_type_table[] = {
2231 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2232 {type, size, k##camel_name##MapRootIndex},
2233 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2234 #undef STRING_TYPE_ELEMENT
2238 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
2239 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
2240 {contents, k##name##RootIndex},
2241 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
2242 #undef CONSTANT_SYMBOL_ELEMENT
2246 const Heap::StructTable Heap::struct_table[] = {
2247 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2248 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2249 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2250 #undef STRUCT_TABLE_ELEMENT
2254 bool Heap::CreateInitialMaps() {
2256 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2257 if (!maybe_obj->ToObject(&obj)) return false;
2259 // Map::cast cannot be used due to uninitialized map field.
2260 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2261 set_meta_map(new_meta_map);
2262 new_meta_map->set_map(new_meta_map);
2264 { MaybeObject* maybe_obj =
2265 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2266 if (!maybe_obj->ToObject(&obj)) return false;
2268 set_fixed_array_map(Map::cast(obj));
2270 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2271 if (!maybe_obj->ToObject(&obj)) return false;
2273 set_oddball_map(Map::cast(obj));
2275 // Allocate the empty array.
2276 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2277 if (!maybe_obj->ToObject(&obj)) return false;
2279 set_empty_fixed_array(FixedArray::cast(obj));
2281 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2282 if (!maybe_obj->ToObject(&obj)) return false;
2284 set_null_value(Oddball::cast(obj));
2285 Oddball::cast(obj)->set_kind(Oddball::kNull);
2287 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2288 if (!maybe_obj->ToObject(&obj)) return false;
2290 set_undefined_value(Oddball::cast(obj));
2291 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2292 ASSERT(!InNewSpace(undefined_value()));
2294 // Allocate the empty descriptor array.
2295 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2296 if (!maybe_obj->ToObject(&obj)) return false;
2298 set_empty_descriptor_array(DescriptorArray::cast(obj));
2300 // Fix the instance_descriptors for the existing maps.
2301 meta_map()->set_code_cache(empty_fixed_array());
2302 meta_map()->init_back_pointer(undefined_value());
2303 meta_map()->set_instance_descriptors(empty_descriptor_array());
2305 fixed_array_map()->set_code_cache(empty_fixed_array());
2306 fixed_array_map()->init_back_pointer(undefined_value());
2307 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2309 oddball_map()->set_code_cache(empty_fixed_array());
2310 oddball_map()->init_back_pointer(undefined_value());
2311 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2313 // Fix prototype object for existing maps.
2314 meta_map()->set_prototype(null_value());
2315 meta_map()->set_constructor(null_value());
2317 fixed_array_map()->set_prototype(null_value());
2318 fixed_array_map()->set_constructor(null_value());
2320 oddball_map()->set_prototype(null_value());
2321 oddball_map()->set_constructor(null_value());
2323 { MaybeObject* maybe_obj =
2324 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2325 if (!maybe_obj->ToObject(&obj)) return false;
2327 set_fixed_cow_array_map(Map::cast(obj));
2328 ASSERT(fixed_array_map() != fixed_cow_array_map());
2330 { MaybeObject* maybe_obj =
2331 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2332 if (!maybe_obj->ToObject(&obj)) return false;
2334 set_scope_info_map(Map::cast(obj));
2336 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2337 if (!maybe_obj->ToObject(&obj)) return false;
2339 set_heap_number_map(Map::cast(obj));
2341 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2342 if (!maybe_obj->ToObject(&obj)) return false;
2344 set_foreign_map(Map::cast(obj));
2346 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2347 const StringTypeTable& entry = string_type_table[i];
2348 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2349 if (!maybe_obj->ToObject(&obj)) return false;
2351 roots_[entry.index] = Map::cast(obj);
2354 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2355 if (!maybe_obj->ToObject(&obj)) return false;
2357 set_undetectable_string_map(Map::cast(obj));
2358 Map::cast(obj)->set_is_undetectable();
2360 { MaybeObject* maybe_obj =
2361 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2362 if (!maybe_obj->ToObject(&obj)) return false;
2364 set_undetectable_ascii_string_map(Map::cast(obj));
2365 Map::cast(obj)->set_is_undetectable();
2367 { MaybeObject* maybe_obj =
2368 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2369 if (!maybe_obj->ToObject(&obj)) return false;
2371 set_fixed_double_array_map(Map::cast(obj));
2373 { MaybeObject* maybe_obj =
2374 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2375 if (!maybe_obj->ToObject(&obj)) return false;
2377 set_byte_array_map(Map::cast(obj));
2379 { MaybeObject* maybe_obj =
2380 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2381 if (!maybe_obj->ToObject(&obj)) return false;
2383 set_free_space_map(Map::cast(obj));
2385 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2386 if (!maybe_obj->ToObject(&obj)) return false;
2388 set_empty_byte_array(ByteArray::cast(obj));
2390 { MaybeObject* maybe_obj =
2391 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2392 if (!maybe_obj->ToObject(&obj)) return false;
2394 set_external_pixel_array_map(Map::cast(obj));
2396 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2397 ExternalArray::kAlignedSize);
2398 if (!maybe_obj->ToObject(&obj)) return false;
2400 set_external_byte_array_map(Map::cast(obj));
2402 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2403 ExternalArray::kAlignedSize);
2404 if (!maybe_obj->ToObject(&obj)) return false;
2406 set_external_unsigned_byte_array_map(Map::cast(obj));
2408 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2409 ExternalArray::kAlignedSize);
2410 if (!maybe_obj->ToObject(&obj)) return false;
2412 set_external_short_array_map(Map::cast(obj));
2414 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2415 ExternalArray::kAlignedSize);
2416 if (!maybe_obj->ToObject(&obj)) return false;
2418 set_external_unsigned_short_array_map(Map::cast(obj));
2420 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2421 ExternalArray::kAlignedSize);
2422 if (!maybe_obj->ToObject(&obj)) return false;
2424 set_external_int_array_map(Map::cast(obj));
2426 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2427 ExternalArray::kAlignedSize);
2428 if (!maybe_obj->ToObject(&obj)) return false;
2430 set_external_unsigned_int_array_map(Map::cast(obj));
2432 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2433 ExternalArray::kAlignedSize);
2434 if (!maybe_obj->ToObject(&obj)) return false;
2436 set_external_float_array_map(Map::cast(obj));
2438 { MaybeObject* maybe_obj =
2439 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2440 if (!maybe_obj->ToObject(&obj)) return false;
2442 set_non_strict_arguments_elements_map(Map::cast(obj));
2444 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2445 ExternalArray::kAlignedSize);
2446 if (!maybe_obj->ToObject(&obj)) return false;
2448 set_external_double_array_map(Map::cast(obj));
2450 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2451 if (!maybe_obj->ToObject(&obj)) return false;
2453 set_code_map(Map::cast(obj));
2455 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2456 JSGlobalPropertyCell::kSize);
2457 if (!maybe_obj->ToObject(&obj)) return false;
2459 set_global_property_cell_map(Map::cast(obj));
2461 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2462 if (!maybe_obj->ToObject(&obj)) return false;
2464 set_one_pointer_filler_map(Map::cast(obj));
2466 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2467 if (!maybe_obj->ToObject(&obj)) return false;
2469 set_two_pointer_filler_map(Map::cast(obj));
2471 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2472 const StructTable& entry = struct_table[i];
2473 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2474 if (!maybe_obj->ToObject(&obj)) return false;
2476 roots_[entry.index] = Map::cast(obj);
2479 { MaybeObject* maybe_obj =
2480 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2481 if (!maybe_obj->ToObject(&obj)) return false;
2483 set_hash_table_map(Map::cast(obj));
2485 { MaybeObject* maybe_obj =
2486 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2487 if (!maybe_obj->ToObject(&obj)) return false;
2489 set_function_context_map(Map::cast(obj));
2491 { MaybeObject* maybe_obj =
2492 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2493 if (!maybe_obj->ToObject(&obj)) return false;
2495 set_catch_context_map(Map::cast(obj));
2497 { MaybeObject* maybe_obj =
2498 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2499 if (!maybe_obj->ToObject(&obj)) return false;
2501 set_with_context_map(Map::cast(obj));
2503 { MaybeObject* maybe_obj =
2504 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2505 if (!maybe_obj->ToObject(&obj)) return false;
2507 set_block_context_map(Map::cast(obj));
2509 { MaybeObject* maybe_obj =
2510 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2511 if (!maybe_obj->ToObject(&obj)) return false;
2513 set_module_context_map(Map::cast(obj));
2515 { MaybeObject* maybe_obj =
2516 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2517 if (!maybe_obj->ToObject(&obj)) return false;
2519 set_global_context_map(Map::cast(obj));
2521 { MaybeObject* maybe_obj =
2522 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2523 if (!maybe_obj->ToObject(&obj)) return false;
2525 Map* native_context_map = Map::cast(obj);
2526 native_context_map->set_dictionary_map(true);
2527 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2528 set_native_context_map(native_context_map);
2530 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2531 SharedFunctionInfo::kAlignedSize);
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_shared_function_info_map(Map::cast(obj));
2536 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2537 JSMessageObject::kSize);
2538 if (!maybe_obj->ToObject(&obj)) return false;
2540 set_message_object_map(Map::cast(obj));
2543 { MaybeObject* maybe_obj =
2544 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2545 if (!maybe_obj->To(&external_map)) return false;
2547 external_map->set_is_extensible(false);
2548 set_external_map(external_map);
2550 ASSERT(!InNewSpace(empty_fixed_array()));
2555 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2556 // Statically ensure that it is safe to allocate heap numbers in paged
2558 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2559 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2562 { MaybeObject* maybe_result =
2563 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2564 if (!maybe_result->ToObject(&result)) return maybe_result;
2567 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2568 HeapNumber::cast(result)->set_value(value);
2573 MaybeObject* Heap::AllocateHeapNumber(double value) {
2574 // Use general version, if we're forced to always allocate.
2575 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2577 // This version of AllocateHeapNumber is optimized for
2578 // allocation in new space.
2579 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2580 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2582 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2583 if (!maybe_result->ToObject(&result)) return maybe_result;
2585 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2586 HeapNumber::cast(result)->set_value(value);
2591 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2593 { MaybeObject* maybe_result = AllocateRawCell();
2594 if (!maybe_result->ToObject(&result)) return maybe_result;
2596 HeapObject::cast(result)->set_map_no_write_barrier(
2597 global_property_cell_map());
2598 JSGlobalPropertyCell::cast(result)->set_value(value);
2603 MaybeObject* Heap::CreateOddball(const char* to_string,
2607 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2608 if (!maybe_result->ToObject(&result)) return maybe_result;
2610 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2614 bool Heap::CreateApiObjects() {
2617 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2618 if (!maybe_obj->ToObject(&obj)) return false;
2620 // Don't use Smi-only elements optimizations for objects with the neander
2621 // map. There are too many cases where element values are set directly with a
2622 // bottleneck to trap the Smi-only -> fast elements transition, and there
2623 // appears to be no benefit for optimize this case.
2624 Map* new_neander_map = Map::cast(obj);
2625 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2626 set_neander_map(new_neander_map);
2628 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2629 if (!maybe_obj->ToObject(&obj)) return false;
2632 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2633 if (!maybe_elements->ToObject(&elements)) return false;
2635 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2636 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2637 set_message_listeners(JSObject::cast(obj));
2643 void Heap::CreateJSEntryStub() {
2645 set_js_entry_code(*stub.GetCode());
2649 void Heap::CreateJSConstructEntryStub() {
2650 JSConstructEntryStub stub;
2651 set_js_construct_entry_code(*stub.GetCode());
2655 void Heap::CreateFixedStubs() {
2656 // Here we create roots for fixed stubs. They are needed at GC
2657 // for cooking and uncooking (check out frames.cc).
2658 // The eliminates the need for doing dictionary lookup in the
2659 // stub cache for these stubs.
2661 // gcc-4.4 has problem generating correct code of following snippet:
2662 // { JSEntryStub stub;
2663 // js_entry_code_ = *stub.GetCode();
2665 // { JSConstructEntryStub stub;
2666 // js_construct_entry_code_ = *stub.GetCode();
2668 // To workaround the problem, make separate functions without inlining.
2669 Heap::CreateJSEntryStub();
2670 Heap::CreateJSConstructEntryStub();
2672 // Create stubs that should be there, so we don't unexpectedly have to
2673 // create them if we need them during the creation of another stub.
2674 // Stub creation mixes raw pointers and handles in an unsafe manner so
2675 // we cannot create stubs while we are creating stubs.
2676 CodeStub::GenerateStubsAheadOfTime();
2680 bool Heap::CreateInitialObjects() {
2683 // The -0 value must be set before NumberFromDouble works.
2684 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2685 if (!maybe_obj->ToObject(&obj)) return false;
2687 set_minus_zero_value(HeapNumber::cast(obj));
2688 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2690 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2691 if (!maybe_obj->ToObject(&obj)) return false;
2693 set_nan_value(HeapNumber::cast(obj));
2695 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2696 if (!maybe_obj->ToObject(&obj)) return false;
2698 set_infinity_value(HeapNumber::cast(obj));
2700 // The hole has not been created yet, but we want to put something
2701 // predictable in the gaps in the symbol table, so lets make that Smi zero.
2702 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2704 // Allocate initial symbol table.
2705 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2706 if (!maybe_obj->ToObject(&obj)) return false;
2708 // Don't use set_symbol_table() due to asserts.
2709 roots_[kSymbolTableRootIndex] = obj;
2711 // Finish initializing oddballs after creating symboltable.
2712 { MaybeObject* maybe_obj =
2713 undefined_value()->Initialize("undefined",
2715 Oddball::kUndefined);
2716 if (!maybe_obj->ToObject(&obj)) return false;
2719 // Initialize the null_value.
2720 { MaybeObject* maybe_obj =
2721 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2722 if (!maybe_obj->ToObject(&obj)) return false;
2725 { MaybeObject* maybe_obj = CreateOddball("true",
2728 if (!maybe_obj->ToObject(&obj)) return false;
2730 set_true_value(Oddball::cast(obj));
2732 { MaybeObject* maybe_obj = CreateOddball("false",
2735 if (!maybe_obj->ToObject(&obj)) return false;
2737 set_false_value(Oddball::cast(obj));
2739 { MaybeObject* maybe_obj = CreateOddball("hole",
2742 if (!maybe_obj->ToObject(&obj)) return false;
2744 set_the_hole_value(Oddball::cast(obj));
2746 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2748 Oddball::kArgumentMarker);
2749 if (!maybe_obj->ToObject(&obj)) return false;
2751 set_arguments_marker(Oddball::cast(obj));
2753 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2756 if (!maybe_obj->ToObject(&obj)) return false;
2758 set_no_interceptor_result_sentinel(obj);
2760 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2763 if (!maybe_obj->ToObject(&obj)) return false;
2765 set_termination_exception(obj);
2767 // Allocate the empty string.
2768 { MaybeObject* maybe_obj = AllocateRawOneByteString(0, TENURED);
2769 if (!maybe_obj->ToObject(&obj)) return false;
2771 set_empty_string(String::cast(obj));
2773 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2774 { MaybeObject* maybe_obj =
2775 LookupAsciiSymbol(constant_symbol_table[i].contents);
2776 if (!maybe_obj->ToObject(&obj)) return false;
2778 roots_[constant_symbol_table[i].index] = String::cast(obj);
2781 // Allocate the hidden symbol which is used to identify the hidden properties
2782 // in JSObjects. The hash code has a special value so that it will not match
2783 // the empty string when searching for the property. It cannot be part of the
2784 // loop above because it needs to be allocated manually with the special
2785 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2786 // that it will always be at the first entry in property descriptors.
2787 { MaybeObject* maybe_obj =
2788 AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
2789 if (!maybe_obj->ToObject(&obj)) return false;
2791 hidden_symbol_ = String::cast(obj);
2793 // Allocate the foreign for __proto__.
2794 { MaybeObject* maybe_obj =
2795 AllocateForeign((Address) &Accessors::ObjectPrototype);
2796 if (!maybe_obj->ToObject(&obj)) return false;
2798 set_prototype_accessors(Foreign::cast(obj));
2800 // Allocate the code_stubs dictionary. The initial size is set to avoid
2801 // expanding the dictionary during bootstrapping.
2802 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2803 if (!maybe_obj->ToObject(&obj)) return false;
2805 set_code_stubs(UnseededNumberDictionary::cast(obj));
2808 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2809 // is set to avoid expanding the dictionary during bootstrapping.
2810 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2811 if (!maybe_obj->ToObject(&obj)) return false;
2813 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2815 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2816 if (!maybe_obj->ToObject(&obj)) return false;
2818 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2820 set_instanceof_cache_function(Smi::FromInt(0));
2821 set_instanceof_cache_map(Smi::FromInt(0));
2822 set_instanceof_cache_answer(Smi::FromInt(0));
2826 // Allocate the dictionary of intrinsic function names.
2827 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2828 if (!maybe_obj->ToObject(&obj)) return false;
2830 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2832 if (!maybe_obj->ToObject(&obj)) return false;
2834 set_intrinsic_function_names(StringDictionary::cast(obj));
2836 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2837 if (!maybe_obj->ToObject(&obj)) return false;
2839 set_number_string_cache(FixedArray::cast(obj));
2841 // Allocate cache for single character ASCII strings.
2842 { MaybeObject* maybe_obj =
2843 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2844 if (!maybe_obj->ToObject(&obj)) return false;
2846 set_single_character_string_cache(FixedArray::cast(obj));
2848 // Allocate cache for string split.
2849 { MaybeObject* maybe_obj = AllocateFixedArray(
2850 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2851 if (!maybe_obj->ToObject(&obj)) return false;
2853 set_string_split_cache(FixedArray::cast(obj));
2855 { MaybeObject* maybe_obj = AllocateFixedArray(
2856 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2857 if (!maybe_obj->ToObject(&obj)) return false;
2859 set_regexp_multiple_cache(FixedArray::cast(obj));
2861 // Allocate cache for external strings pointing to native source code.
2862 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2863 if (!maybe_obj->ToObject(&obj)) return false;
2865 set_natives_source_cache(FixedArray::cast(obj));
2867 // Allocate object to hold object observation state.
2868 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2869 if (!maybe_obj->ToObject(&obj)) return false;
2871 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2872 if (!maybe_obj->ToObject(&obj)) return false;
2874 set_observation_state(JSObject::cast(obj));
2876 // Handling of script id generation is in FACTORY->NewScript.
2877 set_last_script_id(undefined_value());
2879 // Initialize keyed lookup cache.
2880 isolate_->keyed_lookup_cache()->Clear();
2882 // Initialize context slot cache.
2883 isolate_->context_slot_cache()->Clear();
2885 // Initialize descriptor cache.
2886 isolate_->descriptor_lookup_cache()->Clear();
2888 // Initialize compilation cache.
2889 isolate_->compilation_cache()->Clear();
2895 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2896 RootListIndex writable_roots[] = {
2897 kStoreBufferTopRootIndex,
2898 kStackLimitRootIndex,
2899 kInstanceofCacheFunctionRootIndex,
2900 kInstanceofCacheMapRootIndex,
2901 kInstanceofCacheAnswerRootIndex,
2902 kCodeStubsRootIndex,
2903 kNonMonomorphicCacheRootIndex,
2904 kPolymorphicCodeCacheRootIndex,
2905 kLastScriptIdRootIndex,
2906 kEmptyScriptRootIndex,
2907 kRealStackLimitRootIndex,
2908 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2909 kConstructStubDeoptPCOffsetRootIndex,
2910 kGetterStubDeoptPCOffsetRootIndex,
2911 kSetterStubDeoptPCOffsetRootIndex,
2912 kSymbolTableRootIndex,
2915 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2916 if (root_index == writable_roots[i])
2923 Object* RegExpResultsCache::Lookup(Heap* heap,
2925 Object* key_pattern,
2926 ResultsCacheType type) {
2928 if (!key_string->IsSymbol()) return Smi::FromInt(0);
2929 if (type == STRING_SPLIT_SUBSTRINGS) {
2930 ASSERT(key_pattern->IsString());
2931 if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
2932 cache = heap->string_split_cache();
2934 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2935 ASSERT(key_pattern->IsFixedArray());
2936 cache = heap->regexp_multiple_cache();
2939 uint32_t hash = key_string->Hash();
2940 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2941 ~(kArrayEntriesPerCacheEntry - 1));
2942 if (cache->get(index + kStringOffset) == key_string &&
2943 cache->get(index + kPatternOffset) == key_pattern) {
2944 return cache->get(index + kArrayOffset);
2947 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2948 if (cache->get(index + kStringOffset) == key_string &&
2949 cache->get(index + kPatternOffset) == key_pattern) {
2950 return cache->get(index + kArrayOffset);
2952 return Smi::FromInt(0);
2956 void RegExpResultsCache::Enter(Heap* heap,
2958 Object* key_pattern,
2959 FixedArray* value_array,
2960 ResultsCacheType type) {
2962 if (!key_string->IsSymbol()) return;
2963 if (type == STRING_SPLIT_SUBSTRINGS) {
2964 ASSERT(key_pattern->IsString());
2965 if (!key_pattern->IsSymbol()) return;
2966 cache = heap->string_split_cache();
2968 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2969 ASSERT(key_pattern->IsFixedArray());
2970 cache = heap->regexp_multiple_cache();
2973 uint32_t hash = key_string->Hash();
2974 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2975 ~(kArrayEntriesPerCacheEntry - 1));
2976 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2977 cache->set(index + kStringOffset, key_string);
2978 cache->set(index + kPatternOffset, key_pattern);
2979 cache->set(index + kArrayOffset, value_array);
2982 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2983 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2984 cache->set(index2 + kStringOffset, key_string);
2985 cache->set(index2 + kPatternOffset, key_pattern);
2986 cache->set(index2 + kArrayOffset, value_array);
2988 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2989 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2990 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2991 cache->set(index + kStringOffset, key_string);
2992 cache->set(index + kPatternOffset, key_pattern);
2993 cache->set(index + kArrayOffset, value_array);
2996 // If the array is a reasonably short list of substrings, convert it into a
2998 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
2999 for (int i = 0; i < value_array->length(); i++) {
3000 String* str = String::cast(value_array->get(i));
3002 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
3003 if (maybe_symbol->ToObject(&symbol)) {
3004 value_array->set(i, symbol);
3008 // Convert backing store to a copy-on-write array.
3009 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3013 void RegExpResultsCache::Clear(FixedArray* cache) {
3014 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3015 cache->set(i, Smi::FromInt(0));
3020 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3021 MaybeObject* maybe_obj =
3022 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3027 int Heap::FullSizeNumberStringCacheLength() {
3028 // Compute the size of the number string cache based on the max newspace size.
3029 // The number string cache has a minimum size based on twice the initial cache
3030 // size to ensure that it is bigger after being made 'full size'.
3031 int number_string_cache_size = max_semispace_size_ / 512;
3032 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3033 Min(0x4000, number_string_cache_size));
3034 // There is a string and a number per entry so the length is twice the number
3036 return number_string_cache_size * 2;
3040 void Heap::AllocateFullSizeNumberStringCache() {
3041 // The idea is to have a small number string cache in the snapshot to keep
3042 // boot-time memory usage down. If we expand the number string cache already
3043 // while creating the snapshot then that didn't work out.
3044 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3045 MaybeObject* maybe_obj =
3046 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3048 if (maybe_obj->ToObject(&new_cache)) {
3049 // We don't bother to repopulate the cache with entries from the old cache.
3050 // It will be repopulated soon enough with new strings.
3051 set_number_string_cache(FixedArray::cast(new_cache));
3053 // If allocation fails then we just return without doing anything. It is only
3054 // a cache, so best effort is OK here.
3058 void Heap::FlushNumberStringCache() {
3059 // Flush the number to string cache.
3060 int len = number_string_cache()->length();
3061 for (int i = 0; i < len; i++) {
3062 number_string_cache()->set_undefined(this, i);
3067 static inline int double_get_hash(double d) {
3068 DoubleRepresentation rep(d);
3069 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3073 static inline int smi_get_hash(Smi* smi) {
3074 return smi->value();
3078 Object* Heap::GetNumberStringCache(Object* number) {
3080 int mask = (number_string_cache()->length() >> 1) - 1;
3081 if (number->IsSmi()) {
3082 hash = smi_get_hash(Smi::cast(number)) & mask;
3084 hash = double_get_hash(number->Number()) & mask;
3086 Object* key = number_string_cache()->get(hash * 2);
3087 if (key == number) {
3088 return String::cast(number_string_cache()->get(hash * 2 + 1));
3089 } else if (key->IsHeapNumber() &&
3090 number->IsHeapNumber() &&
3091 key->Number() == number->Number()) {
3092 return String::cast(number_string_cache()->get(hash * 2 + 1));
3094 return undefined_value();
3098 void Heap::SetNumberStringCache(Object* number, String* string) {
3100 int mask = (number_string_cache()->length() >> 1) - 1;
3101 if (number->IsSmi()) {
3102 hash = smi_get_hash(Smi::cast(number)) & mask;
3104 hash = double_get_hash(number->Number()) & mask;
3106 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3107 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3108 // The first time we have a hash collision, we move to the full sized
3109 // number string cache.
3110 AllocateFullSizeNumberStringCache();
3113 number_string_cache()->set(hash * 2, number);
3114 number_string_cache()->set(hash * 2 + 1, string);
3118 MaybeObject* Heap::NumberToString(Object* number,
3119 bool check_number_string_cache) {
3120 isolate_->counters()->number_to_string_runtime()->Increment();
3121 if (check_number_string_cache) {
3122 Object* cached = GetNumberStringCache(number);
3123 if (cached != undefined_value()) {
3129 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3131 if (number->IsSmi()) {
3132 int num = Smi::cast(number)->value();
3133 str = IntToCString(num, buffer);
3135 double num = HeapNumber::cast(number)->value();
3136 str = DoubleToCString(num, buffer);
3140 MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
3141 if (maybe_js_string->ToObject(&js_string)) {
3142 SetNumberStringCache(number, String::cast(js_string));
3144 return maybe_js_string;
3148 MaybeObject* Heap::Uint32ToString(uint32_t value,
3149 bool check_number_string_cache) {
3151 MaybeObject* maybe = NumberFromUint32(value);
3152 if (!maybe->To<Object>(&number)) return maybe;
3153 return NumberToString(number, check_number_string_cache);
3157 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3158 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3162 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3163 ExternalArrayType array_type) {
3164 switch (array_type) {
3165 case kExternalByteArray:
3166 return kExternalByteArrayMapRootIndex;
3167 case kExternalUnsignedByteArray:
3168 return kExternalUnsignedByteArrayMapRootIndex;
3169 case kExternalShortArray:
3170 return kExternalShortArrayMapRootIndex;
3171 case kExternalUnsignedShortArray:
3172 return kExternalUnsignedShortArrayMapRootIndex;
3173 case kExternalIntArray:
3174 return kExternalIntArrayMapRootIndex;
3175 case kExternalUnsignedIntArray:
3176 return kExternalUnsignedIntArrayMapRootIndex;
3177 case kExternalFloatArray:
3178 return kExternalFloatArrayMapRootIndex;
3179 case kExternalDoubleArray:
3180 return kExternalDoubleArrayMapRootIndex;
3181 case kExternalPixelArray:
3182 return kExternalPixelArrayMapRootIndex;
3185 return kUndefinedValueRootIndex;
3190 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3191 // We need to distinguish the minus zero value and this cannot be
3192 // done after conversion to int. Doing this by comparing bit
3193 // patterns is faster than using fpclassify() et al.
3194 static const DoubleRepresentation minus_zero(-0.0);
3196 DoubleRepresentation rep(value);
3197 if (rep.bits == minus_zero.bits) {
3198 return AllocateHeapNumber(-0.0, pretenure);
3201 int int_value = FastD2I(value);
3202 if (value == int_value && Smi::IsValid(int_value)) {
3203 return Smi::FromInt(int_value);
3206 // Materialize the value in the heap.
3207 return AllocateHeapNumber(value, pretenure);
3211 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3212 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3213 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3214 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3216 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3217 if (!maybe_result->To(&result)) return maybe_result;
3218 result->set_foreign_address(address);
3223 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3224 SharedFunctionInfo* share;
3225 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3226 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3228 // Set pointer fields.
3229 share->set_name(name);
3230 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3231 share->set_code(illegal);
3232 share->ClearOptimizedCodeMap();
3233 share->set_scope_info(ScopeInfo::Empty());
3234 Code* construct_stub =
3235 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3236 share->set_construct_stub(construct_stub);
3237 share->set_instance_class_name(Object_symbol());
3238 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3239 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3240 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3241 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3242 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3243 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3244 share->set_ast_node_count(0);
3245 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3246 share->set_counters(0);
3248 // Set integer fields (smi or int, depending on the architecture).
3249 share->set_length(0);
3250 share->set_formal_parameter_count(0);
3251 share->set_expected_nof_properties(0);
3252 share->set_num_literals(0);
3253 share->set_start_position_and_type(0);
3254 share->set_end_position(0);
3255 share->set_function_token_position(0);
3256 // All compiler hints default to false or 0.
3257 share->set_compiler_hints(0);
3258 share->set_this_property_assignments_count(0);
3259 share->set_opt_count(0);
3265 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3270 Object* stack_trace,
3271 Object* stack_frames) {
3273 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3274 if (!maybe_result->ToObject(&result)) return maybe_result;
3276 JSMessageObject* message = JSMessageObject::cast(result);
3277 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3278 message->initialize_elements();
3279 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3280 message->set_type(type);
3281 message->set_arguments(arguments);
3282 message->set_start_position(start_position);
3283 message->set_end_position(end_position);
3284 message->set_script(script);
3285 message->set_stack_trace(stack_trace);
3286 message->set_stack_frames(stack_frames);
3292 // Returns true for a character in a range. Both limits are inclusive.
3293 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3294 // This makes uses of the the unsigned wraparound.
3295 return character - from <= to - from;
3299 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3304 // Numeric strings have a different hash algorithm not known by
3305 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
3306 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3307 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
3309 // Now we know the length is 2, we might as well make use of that fact
3310 // when building the new string.
3311 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
3312 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
3314 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3315 if (!maybe_result->ToObject(&result)) return maybe_result;
3317 char* dest = SeqOneByteString::cast(result)->GetChars();
3323 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3324 if (!maybe_result->ToObject(&result)) return maybe_result;
3326 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3334 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3335 int first_length = first->length();
3336 if (first_length == 0) {
3340 int second_length = second->length();
3341 if (second_length == 0) {
3345 int length = first_length + second_length;
3347 // Optimization for 2-byte strings often used as keys in a decompression
3348 // dictionary. Check whether we already have the string in the symbol
3349 // table to prevent creation of many unneccesary strings.
3351 unsigned c1 = first->Get(0);
3352 unsigned c2 = second->Get(0);
3353 return MakeOrFindTwoCharacterString(this, c1, c2);
3356 bool first_is_ascii = first->IsOneByteRepresentation();
3357 bool second_is_ascii = second->IsOneByteRepresentation();
3358 bool is_ascii = first_is_ascii && second_is_ascii;
3360 // Make sure that an out of memory exception is thrown if the length
3361 // of the new cons string is too large.
3362 if (length > String::kMaxLength || length < 0) {
3363 isolate()->context()->mark_out_of_memory();
3364 return Failure::OutOfMemoryException();
3367 bool is_ascii_data_in_two_byte_string = false;
3369 // At least one of the strings uses two-byte representation so we
3370 // can't use the fast case code for short ASCII strings below, but
3371 // we can try to save memory if all chars actually fit in ASCII.
3372 is_ascii_data_in_two_byte_string =
3373 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
3374 if (is_ascii_data_in_two_byte_string) {
3375 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3379 // If the resulting string is small make a flat string.
3380 if (length < ConsString::kMinLength) {
3381 // Note that neither of the two inputs can be a slice because:
3382 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3383 ASSERT(first->IsFlat());
3384 ASSERT(second->IsFlat());
3387 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3388 if (!maybe_result->ToObject(&result)) return maybe_result;
3390 // Copy the characters into the new object.
3391 char* dest = SeqOneByteString::cast(result)->GetChars();
3394 if (first->IsExternalString()) {
3395 src = ExternalAsciiString::cast(first)->GetChars();
3397 src = SeqOneByteString::cast(first)->GetChars();
3399 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3400 // Copy second part.
3401 if (second->IsExternalString()) {
3402 src = ExternalAsciiString::cast(second)->GetChars();
3404 src = SeqOneByteString::cast(second)->GetChars();
3406 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3409 if (is_ascii_data_in_two_byte_string) {
3411 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3412 if (!maybe_result->ToObject(&result)) return maybe_result;
3414 // Copy the characters into the new object.
3415 char* dest = SeqOneByteString::cast(result)->GetChars();
3416 String::WriteToFlat(first, dest, 0, first_length);
3417 String::WriteToFlat(second, dest + first_length, 0, second_length);
3418 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3423 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3424 if (!maybe_result->ToObject(&result)) return maybe_result;
3426 // Copy the characters into the new object.
3427 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3428 String::WriteToFlat(first, dest, 0, first_length);
3429 String::WriteToFlat(second, dest + first_length, 0, second_length);
3434 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
3435 cons_ascii_string_map() : cons_string_map();
3438 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3439 if (!maybe_result->ToObject(&result)) return maybe_result;
3442 AssertNoAllocation no_gc;
3443 ConsString* cons_string = ConsString::cast(result);
3444 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3445 cons_string->set_length(length);
3446 cons_string->set_hash_field(String::kEmptyHashField);
3447 cons_string->set_first(first, mode);
3448 cons_string->set_second(second, mode);
3453 MaybeObject* Heap::AllocateSubString(String* buffer,
3456 PretenureFlag pretenure) {
3457 int length = end - start;
3459 return empty_string();
3460 } else if (length == 1) {
3461 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3462 } else if (length == 2) {
3463 // Optimization for 2-byte strings often used as keys in a decompression
3464 // dictionary. Check whether we already have the string in the symbol
3465 // table to prevent creation of many unneccesary strings.
3466 unsigned c1 = buffer->Get(start);
3467 unsigned c2 = buffer->Get(start + 1);
3468 return MakeOrFindTwoCharacterString(this, c1, c2);
3471 // Make an attempt to flatten the buffer to reduce access time.
3472 buffer = buffer->TryFlattenGetString();
3474 if (!FLAG_string_slices ||
3475 !buffer->IsFlat() ||
3476 length < SlicedString::kMinLength ||
3477 pretenure == TENURED) {
3479 // WriteToFlat takes care of the case when an indirect string has a
3480 // different encoding from its underlying string. These encodings may
3481 // differ because of externalization.
3482 bool is_ascii = buffer->IsOneByteRepresentation();
3483 { MaybeObject* maybe_result = is_ascii
3484 ? AllocateRawOneByteString(length, pretenure)
3485 : AllocateRawTwoByteString(length, pretenure);
3486 if (!maybe_result->ToObject(&result)) return maybe_result;
3488 String* string_result = String::cast(result);
3489 // Copy the characters into the new object.
3491 ASSERT(string_result->IsOneByteRepresentation());
3492 char* dest = SeqOneByteString::cast(string_result)->GetChars();
3493 String::WriteToFlat(buffer, dest, start, end);
3495 ASSERT(string_result->IsTwoByteRepresentation());
3496 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3497 String::WriteToFlat(buffer, dest, start, end);
3502 ASSERT(buffer->IsFlat());
3504 if (FLAG_verify_heap) {
3505 buffer->StringVerify();
3510 // When slicing an indirect string we use its encoding for a newly created
3511 // slice and don't check the encoding of the underlying string. This is safe
3512 // even if the encodings are different because of externalization. If an
3513 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3514 // codes of the underlying string must still fit into ASCII (because
3515 // externalization must not change char codes).
3516 { Map* map = buffer->IsOneByteRepresentation()
3517 ? sliced_ascii_string_map()
3518 : sliced_string_map();
3519 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3520 if (!maybe_result->ToObject(&result)) return maybe_result;
3523 AssertNoAllocation no_gc;
3524 SlicedString* sliced_string = SlicedString::cast(result);
3525 sliced_string->set_length(length);
3526 sliced_string->set_hash_field(String::kEmptyHashField);
3527 if (buffer->IsConsString()) {
3528 ConsString* cons = ConsString::cast(buffer);
3529 ASSERT(cons->second()->length() == 0);
3530 sliced_string->set_parent(cons->first());
3531 sliced_string->set_offset(start);
3532 } else if (buffer->IsSlicedString()) {
3533 // Prevent nesting sliced strings.
3534 SlicedString* parent_slice = SlicedString::cast(buffer);
3535 sliced_string->set_parent(parent_slice->parent());
3536 sliced_string->set_offset(start + parent_slice->offset());
3538 sliced_string->set_parent(buffer);
3539 sliced_string->set_offset(start);
3541 ASSERT(sliced_string->parent()->IsSeqString() ||
3542 sliced_string->parent()->IsExternalString());
3547 MaybeObject* Heap::AllocateExternalStringFromAscii(
3548 const ExternalAsciiString::Resource* resource) {
3549 size_t length = resource->length();
3550 if (length > static_cast<size_t>(String::kMaxLength)) {
3551 isolate()->context()->mark_out_of_memory();
3552 return Failure::OutOfMemoryException();
3555 ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
3557 Map* map = external_ascii_string_map();
3559 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3560 if (!maybe_result->ToObject(&result)) return maybe_result;
3563 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3564 external_string->set_length(static_cast<int>(length));
3565 external_string->set_hash_field(String::kEmptyHashField);
3566 external_string->set_resource(resource);
3572 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3573 const ExternalTwoByteString::Resource* resource) {
3574 size_t length = resource->length();
3575 if (length > static_cast<size_t>(String::kMaxLength)) {
3576 isolate()->context()->mark_out_of_memory();
3577 return Failure::OutOfMemoryException();
3580 // For small strings we check whether the resource contains only
3581 // ASCII characters. If yes, we use a different string map.
3582 static const size_t kAsciiCheckLengthLimit = 32;
3583 bool is_ascii = length <= kAsciiCheckLengthLimit &&
3584 String::IsAscii(resource->data(), static_cast<int>(length));
3585 Map* map = is_ascii ?
3586 external_string_with_ascii_data_map() : external_string_map();
3588 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3589 if (!maybe_result->ToObject(&result)) return maybe_result;
3592 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3593 external_string->set_length(static_cast<int>(length));
3594 external_string->set_hash_field(String::kEmptyHashField);
3595 external_string->set_resource(resource);
3601 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3602 if (code <= String::kMaxAsciiCharCode) {
3603 Object* value = single_character_string_cache()->get(code);
3604 if (value != undefined_value()) return value;
3607 buffer[0] = static_cast<char>(code);
3609 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3611 if (!maybe_result->ToObject(&result)) return maybe_result;
3612 single_character_string_cache()->set(code, result);
3617 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3618 if (!maybe_result->ToObject(&result)) return maybe_result;
3620 String* answer = String::cast(result);
3621 answer->Set(0, code);
3626 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3627 if (length < 0 || length > ByteArray::kMaxLength) {
3628 return Failure::OutOfMemoryException();
3630 if (pretenure == NOT_TENURED) {
3631 return AllocateByteArray(length);
3633 int size = ByteArray::SizeFor(length);
3635 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3636 ? old_data_space_->AllocateRaw(size)
3637 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3638 if (!maybe_result->ToObject(&result)) return maybe_result;
3641 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3643 reinterpret_cast<ByteArray*>(result)->set_length(length);
3648 MaybeObject* Heap::AllocateByteArray(int length) {
3649 if (length < 0 || length > ByteArray::kMaxLength) {
3650 return Failure::OutOfMemoryException();
3652 int size = ByteArray::SizeFor(length);
3653 AllocationSpace space =
3654 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3656 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3657 if (!maybe_result->ToObject(&result)) return maybe_result;
3660 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3662 reinterpret_cast<ByteArray*>(result)->set_length(length);
3667 void Heap::CreateFillerObjectAt(Address addr, int size) {
3668 if (size == 0) return;
3669 HeapObject* filler = HeapObject::FromAddress(addr);
3670 if (size == kPointerSize) {
3671 filler->set_map_no_write_barrier(one_pointer_filler_map());
3672 } else if (size == 2 * kPointerSize) {
3673 filler->set_map_no_write_barrier(two_pointer_filler_map());
3675 filler->set_map_no_write_barrier(free_space_map());
3676 FreeSpace::cast(filler)->set_size(size);
3681 MaybeObject* Heap::AllocateExternalArray(int length,
3682 ExternalArrayType array_type,
3683 void* external_pointer,
3684 PretenureFlag pretenure) {
3685 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3687 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3690 if (!maybe_result->ToObject(&result)) return maybe_result;
3693 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3694 MapForExternalArrayType(array_type));
3695 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3696 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3703 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3705 Handle<Object> self_reference,
3707 // Allocate ByteArray before the Code object, so that we do not risk
3708 // leaving uninitialized Code object (and breaking the heap).
3709 ByteArray* reloc_info;
3710 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3711 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3714 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3715 int obj_size = Code::SizeFor(body_size);
3716 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3717 MaybeObject* maybe_result;
3718 // Large code objects and code objects which should stay at a fixed address
3719 // are allocated in large object space.
3721 bool force_lo_space = obj_size > code_space()->AreaSize();
3722 if (force_lo_space) {
3723 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3725 maybe_result = code_space_->AllocateRaw(obj_size);
3727 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3729 if (immovable && !force_lo_space &&
3730 // Objects on the first page of each space are never moved.
3731 !code_space_->FirstPage()->Contains(result->address())) {
3732 // Discard the first code allocation, which was on a page where it could be
3734 CreateFillerObjectAt(result->address(), obj_size);
3735 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3736 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3739 // Initialize the object
3740 result->set_map_no_write_barrier(code_map());
3741 Code* code = Code::cast(result);
3742 ASSERT(!isolate_->code_range()->exists() ||
3743 isolate_->code_range()->contains(code->address()));
3744 code->set_instruction_size(desc.instr_size);
3745 code->set_relocation_info(reloc_info);
3746 code->set_flags(flags);
3747 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3748 code->set_check_type(RECEIVER_MAP_CHECK);
3750 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3751 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3752 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3753 code->set_gc_metadata(Smi::FromInt(0));
3754 code->set_ic_age(global_ic_age_);
3755 code->set_prologue_offset(kPrologueOffsetNotSet);
3756 // Allow self references to created code object by patching the handle to
3757 // point to the newly allocated Code object.
3758 if (!self_reference.is_null()) {
3759 *(self_reference.location()) = code;
3761 // Migrate generated code.
3762 // The generated code can contain Object** values (typically from handles)
3763 // that are dereferenced during the copy to point directly to the actual heap
3764 // objects. These pointers can include references to the code object itself,
3765 // through the self_reference parameter.
3766 code->CopyFrom(desc);
3769 if (FLAG_verify_heap) {
3777 MaybeObject* Heap::CopyCode(Code* code) {
3778 // Allocate an object the same size as the code object.
3779 int obj_size = code->Size();
3780 MaybeObject* maybe_result;
3781 if (obj_size > code_space()->AreaSize()) {
3782 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3784 maybe_result = code_space_->AllocateRaw(obj_size);
3788 if (!maybe_result->ToObject(&result)) return maybe_result;
3790 // Copy code object.
3791 Address old_addr = code->address();
3792 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3793 CopyBlock(new_addr, old_addr, obj_size);
3794 // Relocate the copy.
3795 Code* new_code = Code::cast(result);
3796 ASSERT(!isolate_->code_range()->exists() ||
3797 isolate_->code_range()->contains(code->address()));
3798 new_code->Relocate(new_addr - old_addr);
3803 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3804 // Allocate ByteArray before the Code object, so that we do not risk
3805 // leaving uninitialized Code object (and breaking the heap).
3806 Object* reloc_info_array;
3807 { MaybeObject* maybe_reloc_info_array =
3808 AllocateByteArray(reloc_info.length(), TENURED);
3809 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3810 return maybe_reloc_info_array;
3814 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3816 int new_obj_size = Code::SizeFor(new_body_size);
3818 Address old_addr = code->address();
3820 size_t relocation_offset =
3821 static_cast<size_t>(code->instruction_end() - old_addr);
3823 MaybeObject* maybe_result;
3824 if (new_obj_size > code_space()->AreaSize()) {
3825 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3827 maybe_result = code_space_->AllocateRaw(new_obj_size);
3831 if (!maybe_result->ToObject(&result)) return maybe_result;
3833 // Copy code object.
3834 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3836 // Copy header and instructions.
3837 memcpy(new_addr, old_addr, relocation_offset);
3839 Code* new_code = Code::cast(result);
3840 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3842 // Copy patched rinfo.
3843 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3845 // Relocate the copy.
3846 ASSERT(!isolate_->code_range()->exists() ||
3847 isolate_->code_range()->contains(code->address()));
3848 new_code->Relocate(new_addr - old_addr);
3851 if (FLAG_verify_heap) {
3859 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3860 ASSERT(gc_state_ == NOT_IN_GC);
3861 ASSERT(map->instance_type() != MAP_TYPE);
3862 // If allocation failures are disallowed, we may allocate in a different
3863 // space when new space is full and the object is not a large object.
3864 AllocationSpace retry_space =
3865 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3867 { MaybeObject* maybe_result =
3868 AllocateRaw(map->instance_size(), space, retry_space);
3869 if (!maybe_result->ToObject(&result)) return maybe_result;
3871 // No need for write barrier since object is white and map is in old space.
3872 HeapObject::cast(result)->set_map_no_write_barrier(map);
3877 void Heap::InitializeFunction(JSFunction* function,
3878 SharedFunctionInfo* shared,
3879 Object* prototype) {
3880 ASSERT(!prototype->IsMap());
3881 function->initialize_properties();
3882 function->initialize_elements();
3883 function->set_shared(shared);
3884 function->set_code(shared->code());
3885 function->set_prototype_or_initial_map(prototype);
3886 function->set_context(undefined_value());
3887 function->set_literals_or_bindings(empty_fixed_array());
3888 function->set_next_function_link(undefined_value());
3892 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3893 // Allocate the prototype. Make sure to use the object function
3894 // from the function's context, since the function can be from a
3895 // different context.
3896 JSFunction* object_function =
3897 function->context()->native_context()->object_function();
3899 // Each function prototype gets a copy of the object function map.
3900 // This avoid unwanted sharing of maps between prototypes of different
3903 ASSERT(object_function->has_initial_map());
3904 MaybeObject* maybe_map = object_function->initial_map()->Copy();
3905 if (!maybe_map->To(&new_map)) return maybe_map;
3908 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3909 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3911 // When creating the prototype for the function we must set its
3912 // constructor to the function.
3913 MaybeObject* maybe_failure =
3914 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3915 constructor_symbol(), function, DONT_ENUM);
3916 if (maybe_failure->IsFailure()) return maybe_failure;
3922 MaybeObject* Heap::AllocateFunction(Map* function_map,
3923 SharedFunctionInfo* shared,
3925 PretenureFlag pretenure) {
3926 AllocationSpace space =
3927 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3929 { MaybeObject* maybe_result = Allocate(function_map, space);
3930 if (!maybe_result->ToObject(&result)) return maybe_result;
3932 InitializeFunction(JSFunction::cast(result), shared, prototype);
3937 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3938 // To get fast allocation and map sharing for arguments objects we
3939 // allocate them based on an arguments boilerplate.
3941 JSObject* boilerplate;
3942 int arguments_object_size;
3943 bool strict_mode_callee = callee->IsJSFunction() &&
3944 !JSFunction::cast(callee)->shared()->is_classic_mode();
3945 if (strict_mode_callee) {
3947 isolate()->context()->native_context()->
3948 strict_mode_arguments_boilerplate();
3949 arguments_object_size = kArgumentsObjectSizeStrict;
3952 isolate()->context()->native_context()->arguments_boilerplate();
3953 arguments_object_size = kArgumentsObjectSize;
3956 // This calls Copy directly rather than using Heap::AllocateRaw so we
3957 // duplicate the check here.
3958 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3960 // Check that the size of the boilerplate matches our
3961 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3962 // on the size being a known constant.
3963 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3965 // Do the allocation.
3967 { MaybeObject* maybe_result =
3968 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3969 if (!maybe_result->ToObject(&result)) return maybe_result;
3972 // Copy the content. The arguments boilerplate doesn't have any
3973 // fields that point to new space so it's safe to skip the write
3975 CopyBlock(HeapObject::cast(result)->address(),
3976 boilerplate->address(),
3977 JSObject::kHeaderSize);
3979 // Set the length property.
3980 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3981 Smi::FromInt(length),
3982 SKIP_WRITE_BARRIER);
3983 // Set the callee property for non-strict mode arguments object only.
3984 if (!strict_mode_callee) {
3985 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3989 // Check the state of the object
3990 ASSERT(JSObject::cast(result)->HasFastProperties());
3991 ASSERT(JSObject::cast(result)->HasFastObjectElements());
3997 static bool HasDuplicates(DescriptorArray* descriptors) {
3998 int count = descriptors->number_of_descriptors();
4000 String* prev_key = descriptors->GetKey(0);
4001 for (int i = 1; i != count; i++) {
4002 String* current_key = descriptors->GetKey(i);
4003 if (prev_key == current_key) return true;
4004 prev_key = current_key;
4011 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4012 ASSERT(!fun->has_initial_map());
4014 // First create a new map with the size and number of in-object properties
4015 // suggested by the function.
4016 int instance_size = fun->shared()->CalculateInstanceSize();
4017 int in_object_properties = fun->shared()->CalculateInObjectProperties();
4019 MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
4020 if (!maybe_map->To(&map)) return maybe_map;
4022 // Fetch or allocate prototype.
4024 if (fun->has_instance_prototype()) {
4025 prototype = fun->instance_prototype();
4027 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4028 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4030 map->set_inobject_properties(in_object_properties);
4031 map->set_unused_property_fields(in_object_properties);
4032 map->set_prototype(prototype);
4033 ASSERT(map->has_fast_object_elements());
4035 // If the function has only simple this property assignments add
4036 // field descriptors for these to the initial map as the object
4037 // cannot be constructed without having these properties. Guard by
4038 // the inline_new flag so we only change the map if we generate a
4039 // specialized construct stub.
4040 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4041 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
4042 int count = fun->shared()->this_property_assignments_count();
4043 if (count > in_object_properties) {
4044 // Inline constructor can only handle inobject properties.
4045 fun->shared()->ForbidInlineConstructor();
4047 DescriptorArray* descriptors;
4048 MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4049 if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4051 DescriptorArray::WhitenessWitness witness(descriptors);
4052 for (int i = 0; i < count; i++) {
4053 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4054 ASSERT(name->IsSymbol());
4055 FieldDescriptor field(name, i, NONE, i + 1);
4056 descriptors->Set(i, &field, witness);
4058 descriptors->Sort();
4060 // The descriptors may contain duplicates because the compiler does not
4061 // guarantee the uniqueness of property names (it would have required
4062 // quadratic time). Once the descriptors are sorted we can check for
4063 // duplicates in linear time.
4064 if (HasDuplicates(descriptors)) {
4065 fun->shared()->ForbidInlineConstructor();
4067 map->InitializeDescriptors(descriptors);
4068 map->set_pre_allocated_property_fields(count);
4069 map->set_unused_property_fields(in_object_properties - count);
4074 fun->shared()->StartInobjectSlackTracking(map);
4080 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4081 FixedArray* properties,
4083 obj->set_properties(properties);
4084 obj->initialize_elements();
4085 // TODO(1240798): Initialize the object's body using valid initial values
4086 // according to the object's initial map. For example, if the map's
4087 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4088 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4089 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4090 // verification code has to cope with (temporarily) invalid objects. See
4091 // for example, JSArray::JSArrayVerify).
4093 // We cannot always fill with one_pointer_filler_map because objects
4094 // created from API functions expect their internal fields to be initialized
4095 // with undefined_value.
4096 // Pre-allocated fields need to be initialized with undefined_value as well
4097 // so that object accesses before the constructor completes (e.g. in the
4098 // debugger) will not cause a crash.
4099 if (map->constructor()->IsJSFunction() &&
4100 JSFunction::cast(map->constructor())->shared()->
4101 IsInobjectSlackTrackingInProgress()) {
4102 // We might want to shrink the object later.
4103 ASSERT(obj->GetInternalFieldCount() == 0);
4104 filler = Heap::one_pointer_filler_map();
4106 filler = Heap::undefined_value();
4108 obj->InitializeBody(map, Heap::undefined_value(), filler);
4112 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4113 // JSFunctions should be allocated using AllocateFunction to be
4114 // properly initialized.
4115 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4117 // Both types of global objects should be allocated using
4118 // AllocateGlobalObject to be properly initialized.
4119 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4120 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4122 // Allocate the backing storage for the properties.
4124 map->pre_allocated_property_fields() +
4125 map->unused_property_fields() -
4126 map->inobject_properties();
4127 ASSERT(prop_size >= 0);
4129 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4130 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4133 // Allocate the JSObject.
4134 AllocationSpace space =
4135 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4136 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4138 { MaybeObject* maybe_obj = Allocate(map, space);
4139 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4142 // Initialize the JSObject.
4143 InitializeJSObjectFromMap(JSObject::cast(obj),
4144 FixedArray::cast(properties),
4146 ASSERT(JSObject::cast(obj)->HasFastElements());
4151 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4152 PretenureFlag pretenure) {
4153 // Allocate the initial map if absent.
4154 if (!constructor->has_initial_map()) {
4155 Object* initial_map;
4156 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4157 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4159 constructor->set_initial_map(Map::cast(initial_map));
4160 Map::cast(initial_map)->set_constructor(constructor);
4162 // Allocate the object based on the constructors initial map.
4163 MaybeObject* result = AllocateJSObjectFromMap(
4164 constructor->initial_map(), pretenure);
4166 // Make sure result is NOT a global object if valid.
4167 Object* non_failure;
4168 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4174 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4175 // Allocate a fresh map. Modules do not have a prototype.
4177 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4178 if (!maybe_map->To(&map)) return maybe_map;
4179 // Allocate the object based on the map.
4181 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4182 if (!maybe_module->To(&module)) return maybe_module;
4183 module->set_context(context);
4184 module->set_scope_info(scope_info);
4189 MaybeObject* Heap::AllocateJSArrayAndStorage(
4190 ElementsKind elements_kind,
4193 ArrayStorageAllocationMode mode,
4194 PretenureFlag pretenure) {
4195 ASSERT(capacity >= length);
4196 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4198 if (!maybe_array->To(&array)) return maybe_array;
4200 if (capacity == 0) {
4201 array->set_length(Smi::FromInt(0));
4202 array->set_elements(empty_fixed_array());
4206 FixedArrayBase* elms;
4207 MaybeObject* maybe_elms = NULL;
4208 if (IsFastDoubleElementsKind(elements_kind)) {
4209 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4210 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4212 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4213 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4216 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4217 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4218 maybe_elms = AllocateUninitializedFixedArray(capacity);
4220 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4221 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4224 if (!maybe_elms->To(&elms)) return maybe_elms;
4226 array->set_elements(elms);
4227 array->set_length(Smi::FromInt(length));
4232 MaybeObject* Heap::AllocateJSArrayWithElements(
4233 FixedArrayBase* elements,
4234 ElementsKind elements_kind,
4236 PretenureFlag pretenure) {
4237 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4239 if (!maybe_array->To(&array)) return maybe_array;
4241 array->set_elements(elements);
4242 array->set_length(Smi::FromInt(length));
4243 array->ValidateElements();
4248 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4250 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4251 // maps. Will probably depend on the identity of the handler object, too.
4253 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4254 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4255 map->set_prototype(prototype);
4257 // Allocate the proxy object.
4259 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4260 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4261 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4262 result->set_handler(handler);
4263 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4268 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4270 Object* construct_trap,
4271 Object* prototype) {
4273 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4274 // maps. Will probably depend on the identity of the handler object, too.
4276 MaybeObject* maybe_map_obj =
4277 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4278 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4279 map->set_prototype(prototype);
4281 // Allocate the proxy object.
4282 JSFunctionProxy* result;
4283 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4284 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4285 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4286 result->set_handler(handler);
4287 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4288 result->set_call_trap(call_trap);
4289 result->set_construct_trap(construct_trap);
4294 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4295 ASSERT(constructor->has_initial_map());
4296 Map* map = constructor->initial_map();
4297 ASSERT(map->is_dictionary_map());
4299 // Make sure no field properties are described in the initial map.
4300 // This guarantees us that normalizing the properties does not
4301 // require us to change property values to JSGlobalPropertyCells.
4302 ASSERT(map->NextFreePropertyIndex() == 0);
4304 // Make sure we don't have a ton of pre-allocated slots in the
4305 // global objects. They will be unused once we normalize the object.
4306 ASSERT(map->unused_property_fields() == 0);
4307 ASSERT(map->inobject_properties() == 0);
4309 // Initial size of the backing store to avoid resize of the storage during
4310 // bootstrapping. The size differs between the JS global object ad the
4312 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4314 // Allocate a dictionary object for backing storage.
4315 StringDictionary* dictionary;
4316 MaybeObject* maybe_dictionary =
4317 StringDictionary::Allocate(
4318 map->NumberOfOwnDescriptors() * 2 + initial_size);
4319 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4321 // The global object might be created from an object template with accessors.
4322 // Fill these accessors into the dictionary.
4323 DescriptorArray* descs = map->instance_descriptors();
4324 for (int i = 0; i < descs->number_of_descriptors(); i++) {
4325 PropertyDetails details = descs->GetDetails(i);
4326 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4327 PropertyDetails d = PropertyDetails(details.attributes(),
4329 details.descriptor_index());
4330 Object* value = descs->GetCallbacksObject(i);
4331 MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4332 if (!maybe_value->ToObject(&value)) return maybe_value;
4334 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4335 if (!maybe_added->To(&dictionary)) return maybe_added;
4338 // Allocate the global object and initialize it with the backing store.
4340 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4341 if (!maybe_global->To(&global)) return maybe_global;
4343 InitializeJSObjectFromMap(global, dictionary, map);
4345 // Create a new map for the global object.
4347 MaybeObject* maybe_map = map->CopyDropDescriptors();
4348 if (!maybe_map->To(&new_map)) return maybe_map;
4349 new_map->set_dictionary_map(true);
4351 // Set up the global object as a normalized object.
4352 global->set_map(new_map);
4353 global->set_properties(dictionary);
4355 // Make sure result is a global object with properties in dictionary.
4356 ASSERT(global->IsGlobalObject());
4357 ASSERT(!global->HasFastProperties());
4362 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4363 // Never used to copy functions. If functions need to be copied we
4364 // have to be careful to clear the literals array.
4365 SLOW_ASSERT(!source->IsJSFunction());
4368 Map* map = source->map();
4369 int object_size = map->instance_size();
4372 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4374 // If we're forced to always allocate, we use the general allocation
4375 // functions which may leave us with an object in old space.
4376 if (always_allocate()) {
4377 { MaybeObject* maybe_clone =
4378 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4379 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4381 Address clone_address = HeapObject::cast(clone)->address();
4382 CopyBlock(clone_address,
4385 // Update write barrier for all fields that lie beyond the header.
4386 RecordWrites(clone_address,
4387 JSObject::kHeaderSize,
4388 (object_size - JSObject::kHeaderSize) / kPointerSize);
4390 wb_mode = SKIP_WRITE_BARRIER;
4391 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4392 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4394 SLOW_ASSERT(InNewSpace(clone));
4395 // Since we know the clone is allocated in new space, we can copy
4396 // the contents without worrying about updating the write barrier.
4397 CopyBlock(HeapObject::cast(clone)->address(),
4403 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4404 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4405 FixedArray* properties = FixedArray::cast(source->properties());
4406 // Update elements if necessary.
4407 if (elements->length() > 0) {
4409 { MaybeObject* maybe_elem;
4410 if (elements->map() == fixed_cow_array_map()) {
4411 maybe_elem = FixedArray::cast(elements);
4412 } else if (source->HasFastDoubleElements()) {
4413 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4415 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4417 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4419 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4421 // Update properties if necessary.
4422 if (properties->length() > 0) {
4424 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4425 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4427 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4429 // Return the new clone.
4434 MaybeObject* Heap::ReinitializeJSReceiver(
4435 JSReceiver* object, InstanceType type, int size) {
4436 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4438 // Allocate fresh map.
4439 // TODO(rossberg): Once we optimize proxies, cache these maps.
4441 MaybeObject* maybe = AllocateMap(type, size);
4442 if (!maybe->To<Map>(&map)) return maybe;
4444 // Check that the receiver has at least the size of the fresh object.
4445 int size_difference = object->map()->instance_size() - map->instance_size();
4446 ASSERT(size_difference >= 0);
4448 map->set_prototype(object->map()->prototype());
4450 // Allocate the backing storage for the properties.
4451 int prop_size = map->unused_property_fields() - map->inobject_properties();
4453 maybe = AllocateFixedArray(prop_size, TENURED);
4454 if (!maybe->ToObject(&properties)) return maybe;
4456 // Functions require some allocation, which might fail here.
4457 SharedFunctionInfo* shared = NULL;
4458 if (type == JS_FUNCTION_TYPE) {
4460 maybe = LookupAsciiSymbol("<freezing call trap>");
4461 if (!maybe->To<String>(&name)) return maybe;
4462 maybe = AllocateSharedFunctionInfo(name);
4463 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4466 // Because of possible retries of this function after failure,
4467 // we must NOT fail after this point, where we have changed the type!
4469 // Reset the map for the object.
4470 object->set_map(map);
4471 JSObject* jsobj = JSObject::cast(object);
4473 // Reinitialize the object from the constructor map.
4474 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4476 // Functions require some minimal initialization.
4477 if (type == JS_FUNCTION_TYPE) {
4478 map->set_function_with_prototype(true);
4479 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4480 JSFunction::cast(object)->set_context(
4481 isolate()->context()->native_context());
4484 // Put in filler if the new object is smaller than the old.
4485 if (size_difference > 0) {
4486 CreateFillerObjectAt(
4487 object->address() + map->instance_size(), size_difference);
4494 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4495 JSGlobalProxy* object) {
4496 ASSERT(constructor->has_initial_map());
4497 Map* map = constructor->initial_map();
4499 // Check that the already allocated object has the same size and type as
4500 // objects allocated using the constructor.
4501 ASSERT(map->instance_size() == object->map()->instance_size());
4502 ASSERT(map->instance_type() == object->map()->instance_type());
4504 // Allocate the backing storage for the properties.
4505 int prop_size = map->unused_property_fields() - map->inobject_properties();
4507 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4508 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4511 // Reset the map for the object.
4512 object->set_map(constructor->initial_map());
4514 // Reinitialize the object from the constructor map.
4515 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4520 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
4521 PretenureFlag pretenure) {
4522 int length = string.length();
4524 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4527 { MaybeObject* maybe_result =
4528 AllocateRawOneByteString(string.length(), pretenure);
4529 if (!maybe_result->ToObject(&result)) return maybe_result;
4532 // Copy the characters into the new object.
4533 CopyChars(SeqOneByteString::cast(result)->GetChars(), string.start(), length);
4538 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4539 int non_ascii_start,
4540 PretenureFlag pretenure) {
4541 // Continue counting the number of characters in the UTF-8 string, starting
4542 // from the first non-ascii character or word.
4543 int chars = non_ascii_start;
4544 Access<UnicodeCache::Utf8Decoder>
4545 decoder(isolate_->unicode_cache()->utf8_decoder());
4546 decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
4547 while (decoder->has_more()) {
4548 uint32_t r = decoder->GetNext();
4549 if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
4557 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4558 if (!maybe_result->ToObject(&result)) return maybe_result;
4561 // Convert and copy the characters into the new object.
4562 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4563 decoder->Reset(string.start(), string.length());
4566 uint32_t r = decoder->GetNext();
4567 if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4568 twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
4569 twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
4571 twobyte->SeqTwoByteStringSet(i++, r);
4578 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4579 PretenureFlag pretenure) {
4580 // Check if the string is an ASCII string.
4582 int length = string.length();
4583 const uc16* start = string.start();
4585 if (String::IsAscii(start, length)) {
4586 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4587 if (!maybe_result->ToObject(&result)) return maybe_result;
4588 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4589 } else { // It's not an ASCII string.
4590 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4591 if (!maybe_result->ToObject(&result)) return maybe_result;
4592 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4598 Map* Heap::SymbolMapForString(String* string) {
4599 // If the string is in new space it cannot be used as a symbol.
4600 if (InNewSpace(string)) return NULL;
4602 // Find the corresponding symbol map for strings.
4603 switch (string->map()->instance_type()) {
4604 case STRING_TYPE: return symbol_map();
4605 case ASCII_STRING_TYPE: return ascii_symbol_map();
4606 case CONS_STRING_TYPE: return cons_symbol_map();
4607 case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4608 case EXTERNAL_STRING_TYPE: return external_symbol_map();
4609 case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4610 case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4611 return external_symbol_with_ascii_data_map();
4612 case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4613 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4614 return short_external_ascii_symbol_map();
4615 case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4616 return short_external_symbol_with_ascii_data_map();
4617 default: return NULL; // No match found.
4622 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4624 uint32_t hash_field) {
4626 // Ensure the chars matches the number of characters in the buffer.
4627 ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
4628 // Determine whether the string is ASCII.
4629 bool is_ascii = true;
4630 while (buffer->has_more()) {
4631 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4638 // Compute map and object size.
4643 if (chars > SeqOneByteString::kMaxLength) {
4644 return Failure::OutOfMemoryException();
4646 map = ascii_symbol_map();
4647 size = SeqOneByteString::SizeFor(chars);
4649 if (chars > SeqTwoByteString::kMaxLength) {
4650 return Failure::OutOfMemoryException();
4653 size = SeqTwoByteString::SizeFor(chars);
4658 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4659 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4660 : old_data_space_->AllocateRaw(size);
4661 if (!maybe_result->ToObject(&result)) return maybe_result;
4664 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4665 // Set length and hash fields of the allocated string.
4666 String* answer = String::cast(result);
4667 answer->set_length(chars);
4668 answer->set_hash_field(hash_field);
4670 ASSERT_EQ(size, answer->Size());
4672 // Fill in the characters.
4675 uint32_t character = buffer->GetNext();
4676 if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4677 answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
4678 answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
4680 answer->Set(i++, character);
4687 MaybeObject* Heap::AllocateRawOneByteString(int length,
4688 PretenureFlag pretenure) {
4689 if (length < 0 || length > SeqOneByteString::kMaxLength) {
4690 return Failure::OutOfMemoryException();
4693 int size = SeqOneByteString::SizeFor(length);
4694 ASSERT(size <= SeqOneByteString::kMaxSize);
4696 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4697 AllocationSpace retry_space = OLD_DATA_SPACE;
4699 if (space == NEW_SPACE) {
4700 if (size > kMaxObjectSizeInNewSpace) {
4701 // Allocate in large object space, retry space will be ignored.
4703 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4704 // Allocate in new space, retry in large object space.
4705 retry_space = LO_SPACE;
4707 } else if (space == OLD_DATA_SPACE &&
4708 size > Page::kMaxNonCodeHeapObjectSize) {
4712 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4713 if (!maybe_result->ToObject(&result)) return maybe_result;
4716 // Partially initialize the object.
4717 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4718 String::cast(result)->set_length(length);
4719 String::cast(result)->set_hash_field(String::kEmptyHashField);
4720 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4723 if (FLAG_verify_heap) {
4724 // Initialize string's content to ensure ASCII-ness (character range 0-127)
4725 // as required when verifying the heap.
4726 char* dest = SeqOneByteString::cast(result)->GetChars();
4727 memset(dest, 0x0F, length * kCharSize);
4735 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4736 PretenureFlag pretenure) {
4737 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4738 return Failure::OutOfMemoryException();
4740 int size = SeqTwoByteString::SizeFor(length);
4741 ASSERT(size <= SeqTwoByteString::kMaxSize);
4742 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4743 AllocationSpace retry_space = OLD_DATA_SPACE;
4745 if (space == NEW_SPACE) {
4746 if (size > kMaxObjectSizeInNewSpace) {
4747 // Allocate in large object space, retry space will be ignored.
4749 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4750 // Allocate in new space, retry in large object space.
4751 retry_space = LO_SPACE;
4753 } else if (space == OLD_DATA_SPACE &&
4754 size > Page::kMaxNonCodeHeapObjectSize) {
4758 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4759 if (!maybe_result->ToObject(&result)) return maybe_result;
4762 // Partially initialize the object.
4763 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4764 String::cast(result)->set_length(length);
4765 String::cast(result)->set_hash_field(String::kEmptyHashField);
4766 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4771 MaybeObject* Heap::AllocateJSArray(
4772 ElementsKind elements_kind,
4773 PretenureFlag pretenure) {
4774 Context* native_context = isolate()->context()->native_context();
4775 JSFunction* array_function = native_context->array_function();
4776 Map* map = array_function->initial_map();
4777 Object* maybe_map_array = native_context->js_array_maps();
4778 if (!maybe_map_array->IsUndefined()) {
4779 Object* maybe_transitioned_map =
4780 FixedArray::cast(maybe_map_array)->get(elements_kind);
4781 if (!maybe_transitioned_map->IsUndefined()) {
4782 map = Map::cast(maybe_transitioned_map);
4786 return AllocateJSObjectFromMap(map, pretenure);
4790 MaybeObject* Heap::AllocateEmptyFixedArray() {
4791 int size = FixedArray::SizeFor(0);
4793 { MaybeObject* maybe_result =
4794 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4795 if (!maybe_result->ToObject(&result)) return maybe_result;
4797 // Initialize the object.
4798 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4800 reinterpret_cast<FixedArray*>(result)->set_length(0);
4805 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4806 if (length < 0 || length > FixedArray::kMaxLength) {
4807 return Failure::OutOfMemoryException();
4810 // Use the general function if we're forced to always allocate.
4811 if (always_allocate()) return AllocateFixedArray(length, TENURED);
4812 // Allocate the raw data for a fixed array.
4813 int size = FixedArray::SizeFor(length);
4814 return size <= kMaxObjectSizeInNewSpace
4815 ? new_space_.AllocateRaw(size)
4816 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4820 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4821 int len = src->length();
4823 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4824 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4826 if (InNewSpace(obj)) {
4827 HeapObject* dst = HeapObject::cast(obj);
4828 dst->set_map_no_write_barrier(map);
4829 CopyBlock(dst->address() + kPointerSize,
4830 src->address() + kPointerSize,
4831 FixedArray::SizeFor(len) - kPointerSize);
4834 HeapObject::cast(obj)->set_map_no_write_barrier(map);
4835 FixedArray* result = FixedArray::cast(obj);
4836 result->set_length(len);
4839 AssertNoAllocation no_gc;
4840 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4841 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4846 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4848 int len = src->length();
4850 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4851 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4853 HeapObject* dst = HeapObject::cast(obj);
4854 dst->set_map_no_write_barrier(map);
4856 dst->address() + FixedDoubleArray::kLengthOffset,
4857 src->address() + FixedDoubleArray::kLengthOffset,
4858 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4863 MaybeObject* Heap::AllocateFixedArray(int length) {
4864 ASSERT(length >= 0);
4865 if (length == 0) return empty_fixed_array();
4867 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4868 if (!maybe_result->ToObject(&result)) return maybe_result;
4870 // Initialize header.
4871 FixedArray* array = reinterpret_cast<FixedArray*>(result);
4872 array->set_map_no_write_barrier(fixed_array_map());
4873 array->set_length(length);
4875 ASSERT(!InNewSpace(undefined_value()));
4876 MemsetPointer(array->data_start(), undefined_value(), length);
4881 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4882 if (length < 0 || length > FixedArray::kMaxLength) {
4883 return Failure::OutOfMemoryException();
4886 AllocationSpace space =
4887 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4888 int size = FixedArray::SizeFor(length);
4889 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4890 // Too big for new space.
4892 } else if (space == OLD_POINTER_SPACE &&
4893 size > Page::kMaxNonCodeHeapObjectSize) {
4894 // Too big for old pointer space.
4898 AllocationSpace retry_space =
4899 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4901 return AllocateRaw(size, space, retry_space);
4905 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4908 PretenureFlag pretenure,
4910 ASSERT(length >= 0);
4911 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4912 if (length == 0) return heap->empty_fixed_array();
4914 ASSERT(!heap->InNewSpace(filler));
4916 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4917 if (!maybe_result->ToObject(&result)) return maybe_result;
4920 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4921 FixedArray* array = FixedArray::cast(result);
4922 array->set_length(length);
4923 MemsetPointer(array->data_start(), filler, length);
4928 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4929 return AllocateFixedArrayWithFiller(this,
4936 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4937 PretenureFlag pretenure) {
4938 return AllocateFixedArrayWithFiller(this,
4945 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4946 if (length == 0) return empty_fixed_array();
4949 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4950 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4953 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4955 FixedArray::cast(obj)->set_length(length);
4960 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4961 int size = FixedDoubleArray::SizeFor(0);
4963 { MaybeObject* maybe_result =
4964 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4965 if (!maybe_result->ToObject(&result)) return maybe_result;
4967 // Initialize the object.
4968 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4969 fixed_double_array_map());
4970 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4975 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4977 PretenureFlag pretenure) {
4978 if (length == 0) return empty_fixed_array();
4980 Object* elements_object;
4981 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4982 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4983 FixedDoubleArray* elements =
4984 reinterpret_cast<FixedDoubleArray*>(elements_object);
4986 elements->set_map_no_write_barrier(fixed_double_array_map());
4987 elements->set_length(length);
4992 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
4994 PretenureFlag pretenure) {
4995 if (length == 0) return empty_fixed_array();
4997 Object* elements_object;
4998 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4999 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5000 FixedDoubleArray* elements =
5001 reinterpret_cast<FixedDoubleArray*>(elements_object);
5003 for (int i = 0; i < length; ++i) {
5004 elements->set_the_hole(i);
5007 elements->set_map_no_write_barrier(fixed_double_array_map());
5008 elements->set_length(length);
5013 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5014 PretenureFlag pretenure) {
5015 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5016 return Failure::OutOfMemoryException();
5019 AllocationSpace space =
5020 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5021 int size = FixedDoubleArray::SizeFor(length);
5023 #ifndef V8_HOST_ARCH_64_BIT
5024 size += kPointerSize;
5027 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5028 // Too big for new space.
5030 } else if (space == OLD_DATA_SPACE &&
5031 size > Page::kMaxNonCodeHeapObjectSize) {
5032 // Too big for old data space.
5036 AllocationSpace retry_space =
5037 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5040 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5041 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5044 return EnsureDoubleAligned(this, object, size);
5048 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5050 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5051 if (!maybe_result->ToObject(&result)) return maybe_result;
5053 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5055 ASSERT(result->IsHashTable());
5060 MaybeObject* Heap::AllocateNativeContext() {
5062 { MaybeObject* maybe_result =
5063 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5064 if (!maybe_result->ToObject(&result)) return maybe_result;
5066 Context* context = reinterpret_cast<Context*>(result);
5067 context->set_map_no_write_barrier(native_context_map());
5068 context->set_js_array_maps(undefined_value());
5069 ASSERT(context->IsNativeContext());
5070 ASSERT(result->IsContext());
5075 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5076 ScopeInfo* scope_info) {
5078 { MaybeObject* maybe_result =
5079 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5080 if (!maybe_result->ToObject(&result)) return maybe_result;
5082 Context* context = reinterpret_cast<Context*>(result);
5083 context->set_map_no_write_barrier(global_context_map());
5084 context->set_closure(function);
5085 context->set_previous(function->context());
5086 context->set_extension(scope_info);
5087 context->set_global_object(function->context()->global_object());
5088 ASSERT(context->IsGlobalContext());
5089 ASSERT(result->IsContext());
5094 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5096 { MaybeObject* maybe_result =
5097 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5098 if (!maybe_result->ToObject(&result)) return maybe_result;
5100 Context* context = reinterpret_cast<Context*>(result);
5101 context->set_map_no_write_barrier(module_context_map());
5102 // Instance link will be set later.
5103 context->set_extension(Smi::FromInt(0));
5108 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5109 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5111 { MaybeObject* maybe_result = AllocateFixedArray(length);
5112 if (!maybe_result->ToObject(&result)) return maybe_result;
5114 Context* context = reinterpret_cast<Context*>(result);
5115 context->set_map_no_write_barrier(function_context_map());
5116 context->set_closure(function);
5117 context->set_previous(function->context());
5118 context->set_extension(Smi::FromInt(0));
5119 context->set_global_object(function->context()->global_object());
5124 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5127 Object* thrown_object) {
5128 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5130 { MaybeObject* maybe_result =
5131 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5132 if (!maybe_result->ToObject(&result)) return maybe_result;
5134 Context* context = reinterpret_cast<Context*>(result);
5135 context->set_map_no_write_barrier(catch_context_map());
5136 context->set_closure(function);
5137 context->set_previous(previous);
5138 context->set_extension(name);
5139 context->set_global_object(previous->global_object());
5140 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5145 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5147 JSObject* extension) {
5149 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5150 if (!maybe_result->ToObject(&result)) return maybe_result;
5152 Context* context = reinterpret_cast<Context*>(result);
5153 context->set_map_no_write_barrier(with_context_map());
5154 context->set_closure(function);
5155 context->set_previous(previous);
5156 context->set_extension(extension);
5157 context->set_global_object(previous->global_object());
5162 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5164 ScopeInfo* scope_info) {
5166 { MaybeObject* maybe_result =
5167 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5168 if (!maybe_result->ToObject(&result)) return maybe_result;
5170 Context* context = reinterpret_cast<Context*>(result);
5171 context->set_map_no_write_barrier(block_context_map());
5172 context->set_closure(function);
5173 context->set_previous(previous);
5174 context->set_extension(scope_info);
5175 context->set_global_object(previous->global_object());
5180 MaybeObject* Heap::AllocateScopeInfo(int length) {
5181 FixedArray* scope_info;
5182 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5183 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5184 scope_info->set_map_no_write_barrier(scope_info_map());
5189 MaybeObject* Heap::AllocateExternal(void* value) {
5191 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5192 if (!maybe_result->To(&foreign)) return maybe_result;
5195 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5196 if (!maybe_result->To(&external)) return maybe_result;
5198 external->SetInternalField(0, foreign);
5203 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5206 #define MAKE_CASE(NAME, Name, name) \
5207 case NAME##_TYPE: map = name##_map(); break;
5208 STRUCT_LIST(MAKE_CASE)
5212 return Failure::InternalError();
5214 int size = map->instance_size();
5215 AllocationSpace space =
5216 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5218 { MaybeObject* maybe_result = Allocate(map, space);
5219 if (!maybe_result->ToObject(&result)) return maybe_result;
5221 Struct::cast(result)->InitializeBody(size);
5226 bool Heap::IsHeapIterable() {
5227 return (!old_pointer_space()->was_swept_conservatively() &&
5228 !old_data_space()->was_swept_conservatively());
5232 void Heap::EnsureHeapIsIterable() {
5233 ASSERT(IsAllocationAllowed());
5234 if (!IsHeapIterable()) {
5235 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5237 ASSERT(IsHeapIterable());
5241 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5242 incremental_marking()->Step(step_size,
5243 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5245 if (incremental_marking()->IsComplete()) {
5246 bool uncommit = false;
5247 if (gc_count_at_last_idle_gc_ == gc_count_) {
5248 // No GC since the last full GC, the mutator is probably not active.
5249 isolate_->compilation_cache()->Clear();
5252 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5253 gc_count_at_last_idle_gc_ = gc_count_;
5255 new_space_.Shrink();
5256 UncommitFromSpace();
5262 bool Heap::IdleNotification(int hint) {
5263 // Hints greater than this value indicate that
5264 // the embedder is requesting a lot of GC work.
5265 const int kMaxHint = 1000;
5266 // Minimal hint that allows to do full GC.
5267 const int kMinHintForFullGC = 100;
5268 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5269 // The size factor is in range [5..250]. The numbers here are chosen from
5270 // experiments. If you changes them, make sure to test with
5271 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5272 intptr_t step_size =
5273 size_factor * IncrementalMarking::kAllocatedThreshold;
5275 if (contexts_disposed_ > 0) {
5276 if (hint >= kMaxHint) {
5277 // The embedder is requesting a lot of GC work after context disposal,
5278 // we age inline caches so that they don't keep objects from
5279 // the old context alive.
5282 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5283 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5284 incremental_marking()->IsStopped()) {
5285 HistogramTimerScope scope(isolate_->counters()->gc_context());
5286 CollectAllGarbage(kReduceMemoryFootprintMask,
5287 "idle notification: contexts disposed");
5289 AdvanceIdleIncrementalMarking(step_size);
5290 contexts_disposed_ = 0;
5292 // After context disposal there is likely a lot of garbage remaining, reset
5293 // the idle notification counters in order to trigger more incremental GCs
5294 // on subsequent idle notifications.
5299 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5300 return IdleGlobalGC();
5303 // By doing small chunks of GC work in each IdleNotification,
5304 // perform a round of incremental GCs and after that wait until
5305 // the mutator creates enough garbage to justify a new round.
5306 // An incremental GC progresses as follows:
5307 // 1. many incremental marking steps,
5308 // 2. one old space mark-sweep-compact,
5309 // 3. many lazy sweep steps.
5310 // Use mark-sweep-compact events to count incremental GCs in a round.
5313 if (incremental_marking()->IsStopped()) {
5314 if (!IsSweepingComplete() &&
5315 !AdvanceSweepers(static_cast<int>(step_size))) {
5320 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5321 if (EnoughGarbageSinceLastIdleRound()) {
5328 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5329 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5330 ms_count_at_last_idle_notification_ = ms_count_;
5332 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5333 mark_sweeps_since_idle_round_started_;
5335 if (remaining_mark_sweeps <= 0) {
5340 if (incremental_marking()->IsStopped()) {
5341 // If there are no more than two GCs left in this idle round and we are
5342 // allowed to do a full GC, then make those GCs full in order to compact
5344 // TODO(ulan): Once we enable code compaction for incremental marking,
5345 // we can get rid of this special case and always start incremental marking.
5346 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5347 CollectAllGarbage(kReduceMemoryFootprintMask,
5348 "idle notification: finalize idle round");
5350 incremental_marking()->Start();
5353 if (!incremental_marking()->IsStopped()) {
5354 AdvanceIdleIncrementalMarking(step_size);
5360 bool Heap::IdleGlobalGC() {
5361 static const int kIdlesBeforeScavenge = 4;
5362 static const int kIdlesBeforeMarkSweep = 7;
5363 static const int kIdlesBeforeMarkCompact = 8;
5364 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5365 static const unsigned int kGCsBetweenCleanup = 4;
5367 if (!last_idle_notification_gc_count_init_) {
5368 last_idle_notification_gc_count_ = gc_count_;
5369 last_idle_notification_gc_count_init_ = true;
5372 bool uncommit = true;
5373 bool finished = false;
5375 // Reset the number of idle notifications received when a number of
5376 // GCs have taken place. This allows another round of cleanup based
5377 // on idle notifications if enough work has been carried out to
5378 // provoke a number of garbage collections.
5379 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5380 number_idle_notifications_ =
5381 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5383 number_idle_notifications_ = 0;
5384 last_idle_notification_gc_count_ = gc_count_;
5387 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5388 CollectGarbage(NEW_SPACE, "idle notification");
5389 new_space_.Shrink();
5390 last_idle_notification_gc_count_ = gc_count_;
5391 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5392 // Before doing the mark-sweep collections we clear the
5393 // compilation cache to avoid hanging on to source code and
5394 // generated code for cached functions.
5395 isolate_->compilation_cache()->Clear();
5397 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5398 new_space_.Shrink();
5399 last_idle_notification_gc_count_ = gc_count_;
5401 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5402 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5403 new_space_.Shrink();
5404 last_idle_notification_gc_count_ = gc_count_;
5405 number_idle_notifications_ = 0;
5407 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5408 // If we have received more than kIdlesBeforeMarkCompact idle
5409 // notifications we do not perform any cleanup because we don't
5410 // expect to gain much by doing so.
5414 if (uncommit) UncommitFromSpace();
5422 void Heap::Print() {
5423 if (!HasBeenSetUp()) return;
5424 isolate()->PrintStack();
5426 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5431 void Heap::ReportCodeStatistics(const char* title) {
5432 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5433 PagedSpace::ResetCodeStatistics();
5434 // We do not look for code in new space, map space, or old space. If code
5435 // somehow ends up in those spaces, we would miss it here.
5436 code_space_->CollectCodeStatistics();
5437 lo_space_->CollectCodeStatistics();
5438 PagedSpace::ReportCodeStatistics();
5442 // This function expects that NewSpace's allocated objects histogram is
5443 // populated (via a call to CollectStatistics or else as a side effect of a
5444 // just-completed scavenge collection).
5445 void Heap::ReportHeapStatistics(const char* title) {
5447 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5449 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5450 old_gen_promotion_limit_);
5451 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5452 old_gen_allocation_limit_);
5453 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5456 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
5457 isolate_->global_handles()->PrintStats();
5460 PrintF("Heap statistics : ");
5461 isolate_->memory_allocator()->ReportStatistics();
5462 PrintF("To space : ");
5463 new_space_.ReportStatistics();
5464 PrintF("Old pointer space : ");
5465 old_pointer_space_->ReportStatistics();
5466 PrintF("Old data space : ");
5467 old_data_space_->ReportStatistics();
5468 PrintF("Code space : ");
5469 code_space_->ReportStatistics();
5470 PrintF("Map space : ");
5471 map_space_->ReportStatistics();
5472 PrintF("Cell space : ");
5473 cell_space_->ReportStatistics();
5474 PrintF("Large object space : ");
5475 lo_space_->ReportStatistics();
5476 PrintF(">>>>>> ========================================= >>>>>>\n");
5481 bool Heap::Contains(HeapObject* value) {
5482 return Contains(value->address());
5486 bool Heap::Contains(Address addr) {
5487 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5488 return HasBeenSetUp() &&
5489 (new_space_.ToSpaceContains(addr) ||
5490 old_pointer_space_->Contains(addr) ||
5491 old_data_space_->Contains(addr) ||
5492 code_space_->Contains(addr) ||
5493 map_space_->Contains(addr) ||
5494 cell_space_->Contains(addr) ||
5495 lo_space_->SlowContains(addr));
5499 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5500 return InSpace(value->address(), space);
5504 bool Heap::InSpace(Address addr, AllocationSpace space) {
5505 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5506 if (!HasBeenSetUp()) return false;
5510 return new_space_.ToSpaceContains(addr);
5511 case OLD_POINTER_SPACE:
5512 return old_pointer_space_->Contains(addr);
5513 case OLD_DATA_SPACE:
5514 return old_data_space_->Contains(addr);
5516 return code_space_->Contains(addr);
5518 return map_space_->Contains(addr);
5520 return cell_space_->Contains(addr);
5522 return lo_space_->SlowContains(addr);
5530 void Heap::Verify() {
5531 CHECK(HasBeenSetUp());
5533 store_buffer()->Verify();
5535 VerifyPointersVisitor visitor;
5536 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5538 new_space_.Verify();
5540 old_pointer_space_->Verify(&visitor);
5541 map_space_->Verify(&visitor);
5543 VerifyPointersVisitor no_dirty_regions_visitor;
5544 old_data_space_->Verify(&no_dirty_regions_visitor);
5545 code_space_->Verify(&no_dirty_regions_visitor);
5546 cell_space_->Verify(&no_dirty_regions_visitor);
5548 lo_space_->Verify();
5553 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
5554 Object* symbol = NULL;
5556 { MaybeObject* maybe_new_table =
5557 symbol_table()->LookupSymbol(string, &symbol);
5558 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5560 // Can't use set_symbol_table because SymbolTable::cast knows that
5561 // SymbolTable is a singleton and checks for identity.
5562 roots_[kSymbolTableRootIndex] = new_table;
5563 ASSERT(symbol != NULL);
5568 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
5569 Object* symbol = NULL;
5571 { MaybeObject* maybe_new_table =
5572 symbol_table()->LookupAsciiSymbol(string, &symbol);
5573 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5575 // Can't use set_symbol_table because SymbolTable::cast knows that
5576 // SymbolTable is a singleton and checks for identity.
5577 roots_[kSymbolTableRootIndex] = new_table;
5578 ASSERT(symbol != NULL);
5583 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
5586 Object* symbol = NULL;
5588 { MaybeObject* maybe_new_table =
5589 symbol_table()->LookupSubStringAsciiSymbol(string,
5593 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5595 // Can't use set_symbol_table because SymbolTable::cast knows that
5596 // SymbolTable is a singleton and checks for identity.
5597 roots_[kSymbolTableRootIndex] = new_table;
5598 ASSERT(symbol != NULL);
5603 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
5604 Object* symbol = NULL;
5606 { MaybeObject* maybe_new_table =
5607 symbol_table()->LookupTwoByteSymbol(string, &symbol);
5608 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5610 // Can't use set_symbol_table because SymbolTable::cast knows that
5611 // SymbolTable is a singleton and checks for identity.
5612 roots_[kSymbolTableRootIndex] = new_table;
5613 ASSERT(symbol != NULL);
5618 MaybeObject* Heap::LookupSymbol(String* string) {
5619 if (string->IsSymbol()) return string;
5620 Object* symbol = NULL;
5622 { MaybeObject* maybe_new_table =
5623 symbol_table()->LookupString(string, &symbol);
5624 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5626 // Can't use set_symbol_table because SymbolTable::cast knows that
5627 // SymbolTable is a singleton and checks for identity.
5628 roots_[kSymbolTableRootIndex] = new_table;
5629 ASSERT(symbol != NULL);
5634 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
5635 if (string->IsSymbol()) {
5639 return symbol_table()->LookupSymbolIfExists(string, symbol);
5643 void Heap::ZapFromSpace() {
5644 NewSpacePageIterator it(new_space_.FromSpaceStart(),
5645 new_space_.FromSpaceEnd());
5646 while (it.has_next()) {
5647 NewSpacePage* page = it.next();
5648 for (Address cursor = page->area_start(), limit = page->area_end();
5650 cursor += kPointerSize) {
5651 Memory::Address_at(cursor) = kFromSpaceZapValue;
5657 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5659 ObjectSlotCallback callback) {
5660 Address slot_address = start;
5662 // We are not collecting slots on new space objects during mutation
5663 // thus we have to scan for pointers to evacuation candidates when we
5664 // promote objects. But we should not record any slots in non-black
5665 // objects. Grey object's slots would be rescanned.
5666 // White object might not survive until the end of collection
5667 // it would be a violation of the invariant to record it's slots.
5668 bool record_slots = false;
5669 if (incremental_marking()->IsCompacting()) {
5670 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5671 record_slots = Marking::IsBlack(mark_bit);
5674 while (slot_address < end) {
5675 Object** slot = reinterpret_cast<Object**>(slot_address);
5676 Object* object = *slot;
5677 // If the store buffer becomes overfull we mark pages as being exempt from
5678 // the store buffer. These pages are scanned to find pointers that point
5679 // to the new space. In that case we may hit newly promoted objects and
5680 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5681 if (object->IsHeapObject()) {
5682 if (Heap::InFromSpace(object)) {
5683 callback(reinterpret_cast<HeapObject**>(slot),
5684 HeapObject::cast(object));
5685 Object* new_object = *slot;
5686 if (InNewSpace(new_object)) {
5687 SLOW_ASSERT(Heap::InToSpace(new_object));
5688 SLOW_ASSERT(new_object->IsHeapObject());
5689 store_buffer_.EnterDirectlyIntoStoreBuffer(
5690 reinterpret_cast<Address>(slot));
5692 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5693 } else if (record_slots &&
5694 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5695 mark_compact_collector()->RecordSlot(slot, slot, object);
5698 slot_address += kPointerSize;
5704 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5707 bool IsAMapPointerAddress(Object** addr) {
5708 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5709 int mod = a % Map::kSize;
5710 return mod >= Map::kPointerFieldsBeginOffset &&
5711 mod < Map::kPointerFieldsEndOffset;
5715 bool EverythingsAPointer(Object** addr) {
5720 static void CheckStoreBuffer(Heap* heap,
5723 Object**** store_buffer_position,
5724 Object*** store_buffer_top,
5725 CheckStoreBufferFilter filter,
5726 Address special_garbage_start,
5727 Address special_garbage_end) {
5728 Map* free_space_map = heap->free_space_map();
5729 for ( ; current < limit; current++) {
5730 Object* o = *current;
5731 Address current_address = reinterpret_cast<Address>(current);
5733 if (o == free_space_map) {
5734 Address current_address = reinterpret_cast<Address>(current);
5735 FreeSpace* free_space =
5736 FreeSpace::cast(HeapObject::FromAddress(current_address));
5737 int skip = free_space->Size();
5738 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5740 current_address += skip - kPointerSize;
5741 current = reinterpret_cast<Object**>(current_address);
5744 // Skip the current linear allocation space between top and limit which is
5745 // unmarked with the free space map, but can contain junk.
5746 if (current_address == special_garbage_start &&
5747 special_garbage_end != special_garbage_start) {
5748 current_address = special_garbage_end - kPointerSize;
5749 current = reinterpret_cast<Object**>(current_address);
5752 if (!(*filter)(current)) continue;
5753 ASSERT(current_address < special_garbage_start ||
5754 current_address >= special_garbage_end);
5755 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5756 // We have to check that the pointer does not point into new space
5757 // without trying to cast it to a heap object since the hash field of
5758 // a string can contain values like 1 and 3 which are tagged null
5760 if (!heap->InNewSpace(o)) continue;
5761 while (**store_buffer_position < current &&
5762 *store_buffer_position < store_buffer_top) {
5763 (*store_buffer_position)++;
5765 if (**store_buffer_position != current ||
5766 *store_buffer_position == store_buffer_top) {
5767 Object** obj_start = current;
5768 while (!(*obj_start)->IsMap()) obj_start--;
5775 // Check that the store buffer contains all intergenerational pointers by
5776 // scanning a page and ensuring that all pointers to young space are in the
5778 void Heap::OldPointerSpaceCheckStoreBuffer() {
5779 OldSpace* space = old_pointer_space();
5780 PageIterator pages(space);
5782 store_buffer()->SortUniq();
5784 while (pages.has_next()) {
5785 Page* page = pages.next();
5786 Object** current = reinterpret_cast<Object**>(page->area_start());
5788 Address end = page->area_end();
5790 Object*** store_buffer_position = store_buffer()->Start();
5791 Object*** store_buffer_top = store_buffer()->Top();
5793 Object** limit = reinterpret_cast<Object**>(end);
5794 CheckStoreBuffer(this,
5797 &store_buffer_position,
5799 &EverythingsAPointer,
5806 void Heap::MapSpaceCheckStoreBuffer() {
5807 MapSpace* space = map_space();
5808 PageIterator pages(space);
5810 store_buffer()->SortUniq();
5812 while (pages.has_next()) {
5813 Page* page = pages.next();
5814 Object** current = reinterpret_cast<Object**>(page->area_start());
5816 Address end = page->area_end();
5818 Object*** store_buffer_position = store_buffer()->Start();
5819 Object*** store_buffer_top = store_buffer()->Top();
5821 Object** limit = reinterpret_cast<Object**>(end);
5822 CheckStoreBuffer(this,
5825 &store_buffer_position,
5827 &IsAMapPointerAddress,
5834 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5835 LargeObjectIterator it(lo_space());
5836 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5837 // We only have code, sequential strings, or fixed arrays in large
5838 // object space, and only fixed arrays can possibly contain pointers to
5839 // the young generation.
5840 if (object->IsFixedArray()) {
5841 Object*** store_buffer_position = store_buffer()->Start();
5842 Object*** store_buffer_top = store_buffer()->Top();
5843 Object** current = reinterpret_cast<Object**>(object->address());
5845 reinterpret_cast<Object**>(object->address() + object->Size());
5846 CheckStoreBuffer(this,
5849 &store_buffer_position,
5851 &EverythingsAPointer,
5860 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5861 IterateStrongRoots(v, mode);
5862 IterateWeakRoots(v, mode);
5866 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5867 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5868 v->Synchronize(VisitorSynchronization::kSymbolTable);
5869 if (mode != VISIT_ALL_IN_SCAVENGE &&
5870 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5871 // Scavenge collections have special processing for this.
5872 external_string_table_.Iterate(v);
5874 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5878 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5879 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5880 v->Synchronize(VisitorSynchronization::kStrongRootList);
5882 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5883 v->Synchronize(VisitorSynchronization::kSymbol);
5885 isolate_->bootstrapper()->Iterate(v);
5886 v->Synchronize(VisitorSynchronization::kBootstrapper);
5887 isolate_->Iterate(v);
5888 v->Synchronize(VisitorSynchronization::kTop);
5889 Relocatable::Iterate(v);
5890 v->Synchronize(VisitorSynchronization::kRelocatable);
5892 #ifdef ENABLE_DEBUGGER_SUPPORT
5893 isolate_->debug()->Iterate(v);
5894 if (isolate_->deoptimizer_data() != NULL) {
5895 isolate_->deoptimizer_data()->Iterate(v);
5898 v->Synchronize(VisitorSynchronization::kDebug);
5899 isolate_->compilation_cache()->Iterate(v);
5900 v->Synchronize(VisitorSynchronization::kCompilationCache);
5902 // Iterate over local handles in handle scopes.
5903 isolate_->handle_scope_implementer()->Iterate(v);
5904 isolate_->IterateDeferredHandles(v);
5905 v->Synchronize(VisitorSynchronization::kHandleScope);
5907 // Iterate over the builtin code objects and code stubs in the
5908 // heap. Note that it is not necessary to iterate over code objects
5909 // on scavenge collections.
5910 if (mode != VISIT_ALL_IN_SCAVENGE) {
5911 isolate_->builtins()->IterateBuiltins(v);
5913 v->Synchronize(VisitorSynchronization::kBuiltins);
5915 // Iterate over global handles.
5917 case VISIT_ONLY_STRONG:
5918 isolate_->global_handles()->IterateStrongRoots(v);
5920 case VISIT_ALL_IN_SCAVENGE:
5921 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5923 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5925 isolate_->global_handles()->IterateAllRoots(v);
5928 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5930 // Iterate over pointers being held by inactive threads.
5931 isolate_->thread_manager()->Iterate(v);
5932 v->Synchronize(VisitorSynchronization::kThreadManager);
5934 // Iterate over the pointers the Serialization/Deserialization code is
5936 // During garbage collection this keeps the partial snapshot cache alive.
5937 // During deserialization of the startup snapshot this creates the partial
5938 // snapshot cache and deserializes the objects it refers to. During
5939 // serialization this does nothing, since the partial snapshot cache is
5940 // empty. However the next thing we do is create the partial snapshot,
5941 // filling up the partial snapshot cache with objects it needs as we go.
5942 SerializerDeserializer::Iterate(v);
5943 // We don't do a v->Synchronize call here, because in debug mode that will
5944 // output a flag to the snapshot. However at this point the serializer and
5945 // deserializer are deliberately a little unsynchronized (see above) so the
5946 // checking of the sync flag in the snapshot would fail.
5950 // TODO(1236194): Since the heap size is configurable on the command line
5951 // and through the API, we should gracefully handle the case that the heap
5952 // size is not big enough to fit all the initial objects.
5953 bool Heap::ConfigureHeap(int max_semispace_size,
5954 intptr_t max_old_gen_size,
5955 intptr_t max_executable_size) {
5956 if (HasBeenSetUp()) return false;
5958 if (FLAG_stress_compaction) {
5959 // This will cause more frequent GCs when stressing.
5960 max_semispace_size_ = Page::kPageSize;
5963 if (max_semispace_size > 0) {
5964 if (max_semispace_size < Page::kPageSize) {
5965 max_semispace_size = Page::kPageSize;
5966 if (FLAG_trace_gc) {
5967 PrintPID("Max semispace size cannot be less than %dkbytes\n",
5968 Page::kPageSize >> 10);
5971 max_semispace_size_ = max_semispace_size;
5974 if (Snapshot::IsEnabled()) {
5975 // If we are using a snapshot we always reserve the default amount
5976 // of memory for each semispace because code in the snapshot has
5977 // write-barrier code that relies on the size and alignment of new
5978 // space. We therefore cannot use a larger max semispace size
5979 // than the default reserved semispace size.
5980 if (max_semispace_size_ > reserved_semispace_size_) {
5981 max_semispace_size_ = reserved_semispace_size_;
5982 if (FLAG_trace_gc) {
5983 PrintPID("Max semispace size cannot be more than %dkbytes\n",
5984 reserved_semispace_size_ >> 10);
5988 // If we are not using snapshots we reserve space for the actual
5989 // max semispace size.
5990 reserved_semispace_size_ = max_semispace_size_;
5993 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5994 if (max_executable_size > 0) {
5995 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5998 // The max executable size must be less than or equal to the max old
6000 if (max_executable_size_ > max_old_generation_size_) {
6001 max_executable_size_ = max_old_generation_size_;
6004 // The new space size must be a power of two to support single-bit testing
6006 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6007 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6008 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6009 external_allocation_limit_ = 16 * max_semispace_size_;
6011 // The old generation is paged and needs at least one page for each space.
6012 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6013 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6015 RoundUp(max_old_generation_size_,
6023 bool Heap::ConfigureHeapDefault() {
6024 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6025 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6026 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6030 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6031 *stats->start_marker = HeapStats::kStartMarker;
6032 *stats->end_marker = HeapStats::kEndMarker;
6033 *stats->new_space_size = new_space_.SizeAsInt();
6034 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6035 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6036 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6037 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6038 *stats->old_data_space_capacity = old_data_space_->Capacity();
6039 *stats->code_space_size = code_space_->SizeOfObjects();
6040 *stats->code_space_capacity = code_space_->Capacity();
6041 *stats->map_space_size = map_space_->SizeOfObjects();
6042 *stats->map_space_capacity = map_space_->Capacity();
6043 *stats->cell_space_size = cell_space_->SizeOfObjects();
6044 *stats->cell_space_capacity = cell_space_->Capacity();
6045 *stats->lo_space_size = lo_space_->Size();
6046 isolate_->global_handles()->RecordStats(stats);
6047 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6048 *stats->memory_allocator_capacity =
6049 isolate()->memory_allocator()->Size() +
6050 isolate()->memory_allocator()->Available();
6051 *stats->os_error = OS::GetLastError();
6052 isolate()->memory_allocator()->Available();
6053 if (take_snapshot) {
6054 HeapIterator iterator;
6055 for (HeapObject* obj = iterator.next();
6057 obj = iterator.next()) {
6058 InstanceType type = obj->map()->instance_type();
6059 ASSERT(0 <= type && type <= LAST_TYPE);
6060 stats->objects_per_type[type]++;
6061 stats->size_per_type[type] += obj->Size();
6067 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6068 return old_pointer_space_->SizeOfObjects()
6069 + old_data_space_->SizeOfObjects()
6070 + code_space_->SizeOfObjects()
6071 + map_space_->SizeOfObjects()
6072 + cell_space_->SizeOfObjects()
6073 + lo_space_->SizeOfObjects();
6077 intptr_t Heap::PromotedExternalMemorySize() {
6078 if (amount_of_external_allocated_memory_
6079 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6080 return amount_of_external_allocated_memory_
6081 - amount_of_external_allocated_memory_at_last_global_gc_;
6085 V8_DECLARE_ONCE(initialize_gc_once);
6087 static void InitializeGCOnce() {
6088 InitializeScavengingVisitorsTables();
6089 NewSpaceScavenger::Initialize();
6090 MarkCompactCollector::Initialize();
6093 bool Heap::SetUp(bool create_heap_objects) {
6095 allocation_timeout_ = FLAG_gc_interval;
6098 // Initialize heap spaces and initial maps and objects. Whenever something
6099 // goes wrong, just return false. The caller should check the results and
6100 // call Heap::TearDown() to release allocated memory.
6102 // If the heap is not yet configured (e.g. through the API), configure it.
6103 // Configuration is based on the flags new-space-size (really the semispace
6104 // size) and old-space-size if set or the initial values of semispace_size_
6105 // and old_generation_size_ otherwise.
6107 if (!ConfigureHeapDefault()) return false;
6110 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6112 MarkMapPointersAsEncoded(false);
6114 // Set up memory allocator.
6115 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6118 // Set up new space.
6119 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6123 // Initialize old pointer space.
6124 old_pointer_space_ =
6126 max_old_generation_size_,
6129 if (old_pointer_space_ == NULL) return false;
6130 if (!old_pointer_space_->SetUp()) return false;
6132 // Initialize old data space.
6135 max_old_generation_size_,
6138 if (old_data_space_ == NULL) return false;
6139 if (!old_data_space_->SetUp()) return false;
6141 // Initialize the code space, set its maximum capacity to the old
6142 // generation size. It needs executable memory.
6143 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6144 // virtual address space, so that they can call each other with near calls.
6145 if (code_range_size_ > 0) {
6146 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6152 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6153 if (code_space_ == NULL) return false;
6154 if (!code_space_->SetUp()) return false;
6156 // Initialize map space.
6157 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6158 if (map_space_ == NULL) return false;
6159 if (!map_space_->SetUp()) return false;
6161 // Initialize global property cell space.
6162 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6163 if (cell_space_ == NULL) return false;
6164 if (!cell_space_->SetUp()) return false;
6166 // The large object code space may contain code or data. We set the memory
6167 // to be non-executable here for safety, but this means we need to enable it
6168 // explicitly when allocating large code objects.
6169 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6170 if (lo_space_ == NULL) return false;
6171 if (!lo_space_->SetUp()) return false;
6173 // Set up the seed that is used to randomize the string hash function.
6174 ASSERT(hash_seed() == 0);
6175 if (FLAG_randomize_hashes) {
6176 if (FLAG_hash_seed == 0) {
6178 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6180 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6184 if (create_heap_objects) {
6185 // Create initial maps.
6186 if (!CreateInitialMaps()) return false;
6187 if (!CreateApiObjects()) return false;
6189 // Create initial objects
6190 if (!CreateInitialObjects()) return false;
6192 native_contexts_list_ = undefined_value();
6195 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6196 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6198 store_buffer()->SetUp();
6200 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6206 void Heap::SetStackLimits() {
6207 ASSERT(isolate_ != NULL);
6208 ASSERT(isolate_ == isolate());
6209 // On 64 bit machines, pointers are generally out of range of Smis. We write
6210 // something that looks like an out of range Smi to the GC.
6212 // Set up the special root array entries containing the stack limits.
6213 // These are actually addresses, but the tag makes the GC ignore it.
6214 roots_[kStackLimitRootIndex] =
6215 reinterpret_cast<Object*>(
6216 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6217 roots_[kRealStackLimitRootIndex] =
6218 reinterpret_cast<Object*>(
6219 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6223 void Heap::TearDown() {
6225 if (FLAG_verify_heap) {
6230 if (FLAG_print_cumulative_gc_stat) {
6232 PrintF("gc_count=%d ", gc_count_);
6233 PrintF("mark_sweep_count=%d ", ms_count_);
6234 PrintF("max_gc_pause=%d ", get_max_gc_pause());
6235 PrintF("total_gc_time=%d ", total_gc_time_ms_);
6236 PrintF("min_in_mutator=%d ", get_min_in_mutator());
6237 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6238 get_max_alive_after_gc());
6242 isolate_->global_handles()->TearDown();
6244 external_string_table_.TearDown();
6246 new_space_.TearDown();
6248 if (old_pointer_space_ != NULL) {
6249 old_pointer_space_->TearDown();
6250 delete old_pointer_space_;
6251 old_pointer_space_ = NULL;
6254 if (old_data_space_ != NULL) {
6255 old_data_space_->TearDown();
6256 delete old_data_space_;
6257 old_data_space_ = NULL;
6260 if (code_space_ != NULL) {
6261 code_space_->TearDown();
6266 if (map_space_ != NULL) {
6267 map_space_->TearDown();
6272 if (cell_space_ != NULL) {
6273 cell_space_->TearDown();
6278 if (lo_space_ != NULL) {
6279 lo_space_->TearDown();
6284 store_buffer()->TearDown();
6285 incremental_marking()->TearDown();
6287 isolate_->memory_allocator()->TearDown();
6289 delete relocation_mutex_;
6293 void Heap::Shrink() {
6294 // Try to shrink all paged spaces.
6296 for (PagedSpace* space = spaces.next();
6298 space = spaces.next()) {
6299 space->ReleaseAllUnusedPages();
6304 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6305 ASSERT(callback != NULL);
6306 GCPrologueCallbackPair pair(callback, gc_type);
6307 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6308 return gc_prologue_callbacks_.Add(pair);
6312 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6313 ASSERT(callback != NULL);
6314 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6315 if (gc_prologue_callbacks_[i].callback == callback) {
6316 gc_prologue_callbacks_.Remove(i);
6324 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6325 ASSERT(callback != NULL);
6326 GCEpilogueCallbackPair pair(callback, gc_type);
6327 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6328 return gc_epilogue_callbacks_.Add(pair);
6332 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6333 ASSERT(callback != NULL);
6334 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6335 if (gc_epilogue_callbacks_[i].callback == callback) {
6336 gc_epilogue_callbacks_.Remove(i);
6346 class PrintHandleVisitor: public ObjectVisitor {
6348 void VisitPointers(Object** start, Object** end) {
6349 for (Object** p = start; p < end; p++)
6350 PrintF(" handle %p to %p\n",
6351 reinterpret_cast<void*>(p),
6352 reinterpret_cast<void*>(*p));
6356 void Heap::PrintHandles() {
6357 PrintF("Handles:\n");
6358 PrintHandleVisitor v;
6359 isolate_->handle_scope_implementer()->Iterate(&v);
6365 Space* AllSpaces::next() {
6366 switch (counter_++) {
6368 return HEAP->new_space();
6369 case OLD_POINTER_SPACE:
6370 return HEAP->old_pointer_space();
6371 case OLD_DATA_SPACE:
6372 return HEAP->old_data_space();
6374 return HEAP->code_space();
6376 return HEAP->map_space();
6378 return HEAP->cell_space();
6380 return HEAP->lo_space();
6387 PagedSpace* PagedSpaces::next() {
6388 switch (counter_++) {
6389 case OLD_POINTER_SPACE:
6390 return HEAP->old_pointer_space();
6391 case OLD_DATA_SPACE:
6392 return HEAP->old_data_space();
6394 return HEAP->code_space();
6396 return HEAP->map_space();
6398 return HEAP->cell_space();
6406 OldSpace* OldSpaces::next() {
6407 switch (counter_++) {
6408 case OLD_POINTER_SPACE:
6409 return HEAP->old_pointer_space();
6410 case OLD_DATA_SPACE:
6411 return HEAP->old_data_space();
6413 return HEAP->code_space();
6420 SpaceIterator::SpaceIterator()
6421 : current_space_(FIRST_SPACE),
6427 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
6428 : current_space_(FIRST_SPACE),
6430 size_func_(size_func) {
6434 SpaceIterator::~SpaceIterator() {
6435 // Delete active iterator if any.
6440 bool SpaceIterator::has_next() {
6441 // Iterate until no more spaces.
6442 return current_space_ != LAST_SPACE;
6446 ObjectIterator* SpaceIterator::next() {
6447 if (iterator_ != NULL) {
6450 // Move to the next space
6452 if (current_space_ > LAST_SPACE) {
6457 // Return iterator for the new current space.
6458 return CreateIterator();
6462 // Create an iterator for the space to iterate.
6463 ObjectIterator* SpaceIterator::CreateIterator() {
6464 ASSERT(iterator_ == NULL);
6466 switch (current_space_) {
6468 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
6470 case OLD_POINTER_SPACE:
6471 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
6473 case OLD_DATA_SPACE:
6474 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
6477 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
6480 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
6483 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
6486 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
6490 // Return the newly allocated iterator;
6491 ASSERT(iterator_ != NULL);
6496 class HeapObjectsFilter {
6498 virtual ~HeapObjectsFilter() {}
6499 virtual bool SkipObject(HeapObject* object) = 0;
6503 class UnreachableObjectsFilter : public HeapObjectsFilter {
6505 UnreachableObjectsFilter() {
6506 MarkReachableObjects();
6509 ~UnreachableObjectsFilter() {
6510 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6513 bool SkipObject(HeapObject* object) {
6514 MarkBit mark_bit = Marking::MarkBitFrom(object);
6515 return !mark_bit.Get();
6519 class MarkingVisitor : public ObjectVisitor {
6521 MarkingVisitor() : marking_stack_(10) {}
6523 void VisitPointers(Object** start, Object** end) {
6524 for (Object** p = start; p < end; p++) {
6525 if (!(*p)->IsHeapObject()) continue;
6526 HeapObject* obj = HeapObject::cast(*p);
6527 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6528 if (!mark_bit.Get()) {
6530 marking_stack_.Add(obj);
6535 void TransitiveClosure() {
6536 while (!marking_stack_.is_empty()) {
6537 HeapObject* obj = marking_stack_.RemoveLast();
6543 List<HeapObject*> marking_stack_;
6546 void MarkReachableObjects() {
6547 Heap* heap = Isolate::Current()->heap();
6548 MarkingVisitor visitor;
6549 heap->IterateRoots(&visitor, VISIT_ALL);
6550 visitor.TransitiveClosure();
6553 AssertNoAllocation no_alloc;
6557 HeapIterator::HeapIterator()
6558 : filtering_(HeapIterator::kNoFiltering),
6564 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6565 : filtering_(filtering),
6571 HeapIterator::~HeapIterator() {
6576 void HeapIterator::Init() {
6577 // Start the iteration.
6578 space_iterator_ = new SpaceIterator;
6579 switch (filtering_) {
6580 case kFilterUnreachable:
6581 filter_ = new UnreachableObjectsFilter;
6586 object_iterator_ = space_iterator_->next();
6590 void HeapIterator::Shutdown() {
6592 // Assert that in filtering mode we have iterated through all
6593 // objects. Otherwise, heap will be left in an inconsistent state.
6594 if (filtering_ != kNoFiltering) {
6595 ASSERT(object_iterator_ == NULL);
6598 // Make sure the last iterator is deallocated.
6599 delete space_iterator_;
6600 space_iterator_ = NULL;
6601 object_iterator_ = NULL;
6607 HeapObject* HeapIterator::next() {
6608 if (filter_ == NULL) return NextObject();
6610 HeapObject* obj = NextObject();
6611 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6616 HeapObject* HeapIterator::NextObject() {
6617 // No iterator means we are done.
6618 if (object_iterator_ == NULL) return NULL;
6620 if (HeapObject* obj = object_iterator_->next_object()) {
6621 // If the current iterator has more objects we are fine.
6624 // Go though the spaces looking for one that has objects.
6625 while (space_iterator_->has_next()) {
6626 object_iterator_ = space_iterator_->next();
6627 if (HeapObject* obj = object_iterator_->next_object()) {
6632 // Done with the last space.
6633 object_iterator_ = NULL;
6638 void HeapIterator::reset() {
6639 // Restart the iterator.
6645 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6647 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6649 class PathTracer::MarkVisitor: public ObjectVisitor {
6651 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6652 void VisitPointers(Object** start, Object** end) {
6653 // Scan all HeapObject pointers in [start, end)
6654 for (Object** p = start; !tracer_->found() && (p < end); p++) {
6655 if ((*p)->IsHeapObject())
6656 tracer_->MarkRecursively(p, this);
6661 PathTracer* tracer_;
6665 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6667 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6668 void VisitPointers(Object** start, Object** end) {
6669 // Scan all HeapObject pointers in [start, end)
6670 for (Object** p = start; p < end; p++) {
6671 if ((*p)->IsHeapObject())
6672 tracer_->UnmarkRecursively(p, this);
6677 PathTracer* tracer_;
6681 void PathTracer::VisitPointers(Object** start, Object** end) {
6682 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6683 // Visit all HeapObject pointers in [start, end)
6684 for (Object** p = start; !done && (p < end); p++) {
6685 if ((*p)->IsHeapObject()) {
6687 done = ((what_to_find_ == FIND_FIRST) && found_target_);
6693 void PathTracer::Reset() {
6694 found_target_ = false;
6695 object_stack_.Clear();
6699 void PathTracer::TracePathFrom(Object** root) {
6700 ASSERT((search_target_ == kAnyGlobalObject) ||
6701 search_target_->IsHeapObject());
6702 found_target_in_trace_ = false;
6705 MarkVisitor mark_visitor(this);
6706 MarkRecursively(root, &mark_visitor);
6708 UnmarkVisitor unmark_visitor(this);
6709 UnmarkRecursively(root, &unmark_visitor);
6715 static bool SafeIsNativeContext(HeapObject* obj) {
6716 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6720 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6721 if (!(*p)->IsHeapObject()) return;
6723 HeapObject* obj = HeapObject::cast(*p);
6725 Object* map = obj->map();
6727 if (!map->IsHeapObject()) return; // visited before
6729 if (found_target_in_trace_) return; // stop if target found
6730 object_stack_.Add(obj);
6731 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6732 (obj == search_target_)) {
6733 found_target_in_trace_ = true;
6734 found_target_ = true;
6738 bool is_native_context = SafeIsNativeContext(obj);
6741 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6743 Address map_addr = map_p->address();
6745 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6747 // Scan the object body.
6748 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6749 // This is specialized to scan Context's properly.
6750 Object** start = reinterpret_cast<Object**>(obj->address() +
6751 Context::kHeaderSize);
6752 Object** end = reinterpret_cast<Object**>(obj->address() +
6753 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6754 mark_visitor->VisitPointers(start, end);
6756 obj->IterateBody(map_p->instance_type(),
6757 obj->SizeFromMap(map_p),
6761 // Scan the map after the body because the body is a lot more interesting
6762 // when doing leak detection.
6763 MarkRecursively(&map, mark_visitor);
6765 if (!found_target_in_trace_) // don't pop if found the target
6766 object_stack_.RemoveLast();
6770 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6771 if (!(*p)->IsHeapObject()) return;
6773 HeapObject* obj = HeapObject::cast(*p);
6775 Object* map = obj->map();
6777 if (map->IsHeapObject()) return; // unmarked already
6779 Address map_addr = reinterpret_cast<Address>(map);
6781 map_addr -= kMarkTag;
6783 ASSERT_TAG_ALIGNED(map_addr);
6785 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6787 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6789 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6791 obj->IterateBody(Map::cast(map_p)->instance_type(),
6792 obj->SizeFromMap(Map::cast(map_p)),
6797 void PathTracer::ProcessResults() {
6798 if (found_target_) {
6799 PrintF("=====================================\n");
6800 PrintF("==== Path to object ====\n");
6801 PrintF("=====================================\n\n");
6803 ASSERT(!object_stack_.is_empty());
6804 for (int i = 0; i < object_stack_.length(); i++) {
6805 if (i > 0) PrintF("\n |\n |\n V\n\n");
6806 Object* obj = object_stack_[i];
6809 PrintF("=====================================\n");
6812 #endif // DEBUG || LIVE_OBJECT_LIST
6816 // Triggers a depth-first traversal of reachable objects from one
6817 // given root object and finds a path to a specific heap object and
6819 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6820 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6821 tracer.VisitPointer(&root);
6825 // Triggers a depth-first traversal of reachable objects from roots
6826 // and finds a path to a specific heap object and prints it.
6827 void Heap::TracePathToObject(Object* target) {
6828 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6829 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6833 // Triggers a depth-first traversal of reachable objects from roots
6834 // and finds a path to any global object and prints it. Useful for
6835 // determining the source for leaks of global objects.
6836 void Heap::TracePathToGlobal() {
6837 PathTracer tracer(PathTracer::kAnyGlobalObject,
6838 PathTracer::FIND_ALL,
6840 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6845 static intptr_t CountTotalHolesSize() {
6846 intptr_t holes_size = 0;
6848 for (OldSpace* space = spaces.next();
6850 space = spaces.next()) {
6851 holes_size += space->Waste() + space->Available();
6857 GCTracer::GCTracer(Heap* heap,
6858 const char* gc_reason,
6859 const char* collector_reason)
6861 start_object_size_(0),
6862 start_memory_size_(0),
6865 allocated_since_last_gc_(0),
6866 spent_in_mutator_(0),
6867 promoted_objects_size_(0),
6868 nodes_died_in_new_space_(0),
6869 nodes_copied_in_new_space_(0),
6872 gc_reason_(gc_reason),
6873 collector_reason_(collector_reason) {
6874 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6875 start_time_ = OS::TimeCurrentMillis();
6876 start_object_size_ = heap_->SizeOfObjects();
6877 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6879 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6883 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6885 allocated_since_last_gc_ =
6886 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6888 if (heap_->last_gc_end_timestamp_ > 0) {
6889 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6892 steps_count_ = heap_->incremental_marking()->steps_count();
6893 steps_took_ = heap_->incremental_marking()->steps_took();
6894 longest_step_ = heap_->incremental_marking()->longest_step();
6895 steps_count_since_last_gc_ =
6896 heap_->incremental_marking()->steps_count_since_last_gc();
6897 steps_took_since_last_gc_ =
6898 heap_->incremental_marking()->steps_took_since_last_gc();
6902 GCTracer::~GCTracer() {
6903 // Printf ONE line iff flag is set.
6904 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6906 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6908 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6909 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6911 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6913 // Update cumulative GC statistics if required.
6914 if (FLAG_print_cumulative_gc_stat) {
6915 heap_->total_gc_time_ms_ += time;
6916 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6917 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6918 heap_->alive_after_last_gc_);
6920 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6921 static_cast<int>(spent_in_mutator_));
6923 } else if (FLAG_trace_gc_verbose) {
6924 heap_->total_gc_time_ms_ += time;
6927 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6929 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6931 if (!FLAG_trace_gc_nvp) {
6932 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6934 double end_memory_size_mb =
6935 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6937 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6939 static_cast<double>(start_object_size_) / MB,
6940 static_cast<double>(start_memory_size_) / MB,
6941 SizeOfHeapObjects(),
6942 end_memory_size_mb);
6944 if (external_time > 0) PrintF("%d / ", external_time);
6945 PrintF("%d ms", time);
6946 if (steps_count_ > 0) {
6947 if (collector_ == SCAVENGER) {
6948 PrintF(" (+ %d ms in %d steps since last GC)",
6949 static_cast<int>(steps_took_since_last_gc_),
6950 steps_count_since_last_gc_);
6952 PrintF(" (+ %d ms in %d steps since start of marking, "
6953 "biggest step %f ms)",
6954 static_cast<int>(steps_took_),
6960 if (gc_reason_ != NULL) {
6961 PrintF(" [%s]", gc_reason_);
6964 if (collector_reason_ != NULL) {
6965 PrintF(" [%s]", collector_reason_);
6970 PrintF("pause=%d ", time);
6971 PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
6973 switch (collector_) {
6977 case MARK_COMPACTOR:
6985 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6986 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6987 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6988 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6989 PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
6990 PrintF("new_new=%d ",
6991 static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
6992 PrintF("root_new=%d ",
6993 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6994 PrintF("old_new=%d ",
6995 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6996 PrintF("compaction_ptrs=%d ",
6997 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6998 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6999 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
7000 PrintF("misc_compaction=%d ",
7001 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
7003 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7004 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7005 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7006 in_free_list_or_wasted_before_gc_);
7007 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
7009 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7010 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7011 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7012 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7013 PrintF("nodes_promoted=%d ", nodes_promoted_);
7015 if (collector_ == SCAVENGER) {
7016 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7017 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
7019 PrintF("stepscount=%d ", steps_count_);
7020 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
7021 PrintF("longeststep=%.f ", longest_step_);
7027 heap_->PrintShortHeapStatistics();
7031 const char* GCTracer::CollectorString() {
7032 switch (collector_) {
7035 case MARK_COMPACTOR:
7036 return "Mark-sweep";
7038 return "Unknown GC";
7042 int KeyedLookupCache::Hash(Map* map, String* name) {
7043 // Uses only lower 32 bits if pointers are larger.
7044 uintptr_t addr_hash =
7045 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7046 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7050 int KeyedLookupCache::Lookup(Map* map, String* name) {
7051 int index = (Hash(map, name) & kHashMask);
7052 for (int i = 0; i < kEntriesPerBucket; i++) {
7053 Key& key = keys_[index + i];
7054 if ((key.map == map) && key.name->Equals(name)) {
7055 return field_offsets_[index + i];
7062 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
7064 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
7065 int index = (Hash(map, symbol) & kHashMask);
7066 // After a GC there will be free slots, so we use them in order (this may
7067 // help to get the most frequently used one in position 0).
7068 for (int i = 0; i< kEntriesPerBucket; i++) {
7069 Key& key = keys_[index];
7070 Object* free_entry_indicator = NULL;
7071 if (key.map == free_entry_indicator) {
7074 field_offsets_[index + i] = field_offset;
7078 // No free entry found in this bucket, so we move them all down one and
7079 // put the new entry at position zero.
7080 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7081 Key& key = keys_[index + i];
7082 Key& key2 = keys_[index + i - 1];
7084 field_offsets_[index + i] = field_offsets_[index + i - 1];
7087 // Write the new first entry.
7088 Key& key = keys_[index];
7091 field_offsets_[index] = field_offset;
7096 void KeyedLookupCache::Clear() {
7097 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7101 void DescriptorLookupCache::Clear() {
7102 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7107 void Heap::GarbageCollectionGreedyCheck() {
7108 ASSERT(FLAG_gc_greedy);
7109 if (isolate_->bootstrapper()->IsActive()) return;
7110 if (disallow_allocation_failure()) return;
7111 CollectGarbage(NEW_SPACE);
7116 TranscendentalCache::SubCache::SubCache(Type t)
7118 isolate_(Isolate::Current()) {
7119 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7120 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7121 for (int i = 0; i < kCacheSize; i++) {
7122 elements_[i].in[0] = in0;
7123 elements_[i].in[1] = in1;
7124 elements_[i].output = NULL;
7129 void TranscendentalCache::Clear() {
7130 for (int i = 0; i < kNumberOfCaches; i++) {
7131 if (caches_[i] != NULL) {
7139 void ExternalStringTable::CleanUp() {
7141 for (int i = 0; i < new_space_strings_.length(); ++i) {
7142 if (new_space_strings_[i] == heap_->the_hole_value()) {
7145 if (heap_->InNewSpace(new_space_strings_[i])) {
7146 new_space_strings_[last++] = new_space_strings_[i];
7148 old_space_strings_.Add(new_space_strings_[i]);
7151 new_space_strings_.Rewind(last);
7153 for (int i = 0; i < old_space_strings_.length(); ++i) {
7154 if (old_space_strings_[i] == heap_->the_hole_value()) {
7157 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7158 old_space_strings_[last++] = old_space_strings_[i];
7160 old_space_strings_.Rewind(last);
7162 if (FLAG_verify_heap) {
7169 void ExternalStringTable::TearDown() {
7170 new_space_strings_.Free();
7171 old_space_strings_.Free();
7175 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7176 chunk->set_next_chunk(chunks_queued_for_free_);
7177 chunks_queued_for_free_ = chunk;
7181 void Heap::FreeQueuedChunks() {
7182 if (chunks_queued_for_free_ == NULL) return;
7185 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7186 next = chunk->next_chunk();
7187 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7189 if (chunk->owner()->identity() == LO_SPACE) {
7190 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7191 // If FromAnyPointerAddress encounters a slot that belongs to a large
7192 // chunk queued for deletion it will fail to find the chunk because
7193 // it try to perform a search in the list of pages owned by of the large
7194 // object space and queued chunks were detached from that list.
7195 // To work around this we split large chunk into normal kPageSize aligned
7196 // pieces and initialize size, owner and flags field of every piece.
7197 // If FromAnyPointerAddress encounters a slot that belongs to one of
7198 // these smaller pieces it will treat it as a slot on a normal Page.
7199 Address chunk_end = chunk->address() + chunk->size();
7200 MemoryChunk* inner = MemoryChunk::FromAddress(
7201 chunk->address() + Page::kPageSize);
7202 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7203 while (inner <= inner_last) {
7204 // Size of a large chunk is always a multiple of
7205 // OS::AllocateAlignment() so there is always
7206 // enough space for a fake MemoryChunk header.
7207 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7208 // Guard against overflow.
7209 if (area_end < inner->address()) area_end = chunk_end;
7210 inner->SetArea(inner->address(), area_end);
7211 inner->set_size(Page::kPageSize);
7212 inner->set_owner(lo_space());
7213 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7214 inner = MemoryChunk::FromAddress(
7215 inner->address() + Page::kPageSize);
7219 isolate_->heap()->store_buffer()->Compact();
7220 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7221 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7222 next = chunk->next_chunk();
7223 isolate_->memory_allocator()->Free(chunk);
7225 chunks_queued_for_free_ = NULL;
7229 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7230 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7231 // Tag the page pointer to make it findable in the dump file.
7233 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7235 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7237 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7238 reinterpret_cast<Address>(p);
7239 remembered_unmapped_pages_index_++;
7240 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7244 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7245 memset(object_counts_, 0, sizeof(object_counts_));
7246 memset(object_sizes_, 0, sizeof(object_sizes_));
7247 if (clear_last_time_stats) {
7248 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7249 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7254 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7257 void Heap::CheckpointObjectStats() {
7258 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7259 Counters* counters = isolate()->counters();
7260 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7261 counters->count_of_##name()->Increment( \
7262 static_cast<int>(object_counts_[name])); \
7263 counters->count_of_##name()->Decrement( \
7264 static_cast<int>(object_counts_last_time_[name])); \
7265 counters->size_of_##name()->Increment( \
7266 static_cast<int>(object_sizes_[name])); \
7267 counters->size_of_##name()->Decrement( \
7268 static_cast<int>(object_sizes_last_time_[name]));
7269 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7270 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7272 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7273 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7274 counters->count_of_CODE_TYPE_##name()->Increment( \
7275 static_cast<int>(object_counts_[index])); \
7276 counters->count_of_CODE_TYPE_##name()->Decrement( \
7277 static_cast<int>(object_counts_last_time_[index])); \
7278 counters->size_of_CODE_TYPE_##name()->Increment( \
7279 static_cast<int>(object_sizes_[index])); \
7280 counters->size_of_CODE_TYPE_##name()->Decrement( \
7281 static_cast<int>(object_sizes_last_time_[index]));
7282 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7283 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7284 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7285 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7286 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7287 static_cast<int>(object_counts_[index])); \
7288 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7289 static_cast<int>(object_counts_last_time_[index])); \
7290 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7291 static_cast<int>(object_sizes_[index])); \
7292 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7293 static_cast<int>(object_sizes_last_time_[index]));
7294 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7295 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7297 memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7298 memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7302 } } // namespace v8::internal