1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/serialize.h"
32 #include "src/snapshot.h"
33 #include "src/utils.h"
34 #include "src/v8threads.h"
35 #include "src/vm-state-inl.h"
37 #if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h" // NOLINT
39 #include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
41 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
42 #include "src/regexp-macro-assembler.h" // NOLINT
43 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
45 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
46 #include "src/regexp-macro-assembler.h" // NOLINT
47 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
49 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
50 #include "src/regexp-macro-assembler.h"
51 #include "src/mips64/regexp-macro-assembler-mips64.h"
59 : amount_of_external_allocated_memory_(0),
60 amount_of_external_allocated_memory_at_last_global_gc_(0),
63 // semispace_size_ should be a power of 2 and old_generation_size_ should
64 // be a multiple of Page::kPageSize.
65 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
66 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
67 initial_semispace_size_(Page::kPageSize),
68 target_semispace_size_(Page::kPageSize),
69 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
70 initial_old_generation_size_(max_old_generation_size_ /
71 kInitalOldGenerationLimitFactor),
72 old_generation_size_configured_(false),
73 max_executable_size_(256ul * (kPointerSize / 4) * MB),
74 // Variables set based on semispace_size_ and old_generation_size_ in
76 // Will be 4 * reserved_semispace_size_ to ensure that young
77 // generation can be aligned to its size.
78 maximum_committed_(0),
79 survived_since_last_expansion_(0),
80 survived_last_scavenge_(0),
82 always_allocate_scope_depth_(0),
83 contexts_disposed_(0),
85 scan_on_scavenge_pages_(0),
87 old_pointer_space_(NULL),
88 old_data_space_(NULL),
92 property_cell_space_(NULL),
95 gc_post_processing_depth_(0),
96 allocations_count_(0),
97 raw_allocations_hash_(0),
98 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
101 remembered_unmapped_pages_index_(0),
102 unflattened_strings_length_(0),
104 allocation_timeout_(0),
106 old_generation_allocation_limit_(initial_old_generation_size_),
107 idle_old_generation_allocation_limit_(
108 kMinimumOldGenerationAllocationLimit),
109 old_gen_exhausted_(false),
110 inline_allocation_disabled_(false),
111 store_buffer_rebuilder_(store_buffer()),
112 hidden_string_(NULL),
113 gc_safe_size_of_old_object_(NULL),
114 total_regexp_code_generated_(0),
116 high_survival_rate_period_length_(0),
117 promoted_objects_size_(0),
119 semi_space_copied_object_size_(0),
120 previous_semi_space_copied_object_size_(0),
121 semi_space_copied_rate_(0),
122 nodes_died_in_new_space_(0),
123 nodes_copied_in_new_space_(0),
125 maximum_size_scavenges_(0),
127 total_gc_time_ms_(0.0),
128 max_alive_after_gc_(0),
129 min_in_mutator_(kMaxInt),
132 last_idle_notification_time_(0.0),
133 mark_compact_collector_(this),
136 incremental_marking_(this),
137 gc_count_at_last_idle_gc_(0),
138 full_codegen_bytes_generated_(0),
139 crankshaft_codegen_bytes_generated_(0),
140 gcs_since_last_deopt_(0),
141 allocation_sites_scratchpad_length_(0),
142 promotion_queue_(this),
144 external_string_table_(this),
145 chunks_queued_for_free_(NULL),
146 gc_callbacks_depth_(0),
147 deserialization_complete_(false) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151 #if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155 // Ensure old_generation_size_ is a multiple of kPageSize.
156 DCHECK(MB >= Page::kPageSize);
158 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
159 set_native_contexts_list(NULL);
160 set_array_buffers_list(Smi::FromInt(0));
161 set_allocation_sites_list(Smi::FromInt(0));
162 set_encountered_weak_collections(Smi::FromInt(0));
163 set_encountered_weak_cells(Smi::FromInt(0));
164 // Put a dummy entry in the remembered pages so we can find the list the
165 // minidump even if there are no real unmapped pages.
166 RememberUnmappedPage(NULL, false);
168 ClearObjectStats(true);
172 intptr_t Heap::Capacity() {
173 if (!HasBeenSetUp()) return 0;
175 return new_space_.Capacity() + old_pointer_space_->Capacity() +
176 old_data_space_->Capacity() + code_space_->Capacity() +
177 map_space_->Capacity() + cell_space_->Capacity() +
178 property_cell_space_->Capacity();
182 intptr_t Heap::CommittedMemory() {
183 if (!HasBeenSetUp()) return 0;
185 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
186 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
187 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
188 property_cell_space_->CommittedMemory() + lo_space_->Size();
192 size_t Heap::CommittedPhysicalMemory() {
193 if (!HasBeenSetUp()) return 0;
195 return new_space_.CommittedPhysicalMemory() +
196 old_pointer_space_->CommittedPhysicalMemory() +
197 old_data_space_->CommittedPhysicalMemory() +
198 code_space_->CommittedPhysicalMemory() +
199 map_space_->CommittedPhysicalMemory() +
200 cell_space_->CommittedPhysicalMemory() +
201 property_cell_space_->CommittedPhysicalMemory() +
202 lo_space_->CommittedPhysicalMemory();
206 intptr_t Heap::CommittedMemoryExecutable() {
207 if (!HasBeenSetUp()) return 0;
209 return isolate()->memory_allocator()->SizeExecutable();
213 void Heap::UpdateMaximumCommitted() {
214 if (!HasBeenSetUp()) return;
216 intptr_t current_committed_memory = CommittedMemory();
217 if (current_committed_memory > maximum_committed_) {
218 maximum_committed_ = current_committed_memory;
223 intptr_t Heap::Available() {
224 if (!HasBeenSetUp()) return 0;
226 return new_space_.Available() + old_pointer_space_->Available() +
227 old_data_space_->Available() + code_space_->Available() +
228 map_space_->Available() + cell_space_->Available() +
229 property_cell_space_->Available();
233 bool Heap::HasBeenSetUp() {
234 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
235 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
236 property_cell_space_ != NULL && lo_space_ != NULL;
240 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
241 if (IntrusiveMarking::IsMarked(object)) {
242 return IntrusiveMarking::SizeOfMarkedObject(object);
244 return object->SizeFromMap(object->map());
248 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
249 const char** reason) {
250 // Is global GC requested?
251 if (space != NEW_SPACE) {
252 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
253 *reason = "GC in old space requested";
254 return MARK_COMPACTOR;
257 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
258 *reason = "GC in old space forced by flags";
259 return MARK_COMPACTOR;
262 // Is enough data promoted to justify a global GC?
263 if (OldGenerationAllocationLimitReached()) {
264 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
265 *reason = "promotion limit reached";
266 return MARK_COMPACTOR;
269 // Have allocation in OLD and LO failed?
270 if (old_gen_exhausted_) {
272 ->gc_compactor_caused_by_oldspace_exhaustion()
274 *reason = "old generations exhausted";
275 return MARK_COMPACTOR;
278 // Is there enough space left in OLD to guarantee that a scavenge can
281 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
282 // for object promotion. It counts only the bytes that the memory
283 // allocator has not yet allocated from the OS and assigned to any space,
284 // and does not count available bytes already in the old space or code
285 // space. Undercounting is safe---we may get an unrequested full GC when
286 // a scavenge would have succeeded.
287 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
289 ->gc_compactor_caused_by_oldspace_exhaustion()
291 *reason = "scavenge might not succeed";
292 return MARK_COMPACTOR;
301 // TODO(1238405): Combine the infrastructure for --heap-stats and
302 // --log-gc to avoid the complicated preprocessor and flag testing.
303 void Heap::ReportStatisticsBeforeGC() {
304 // Heap::ReportHeapStatistics will also log NewSpace statistics when
305 // compiled --log-gc is set. The following logic is used to avoid
308 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
309 if (FLAG_heap_stats) {
310 ReportHeapStatistics("Before GC");
311 } else if (FLAG_log_gc) {
312 new_space_.ReportStatistics();
314 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
317 new_space_.CollectStatistics();
318 new_space_.ReportStatistics();
319 new_space_.ClearHistograms();
325 void Heap::PrintShortHeapStatistics() {
326 if (!FLAG_trace_gc_verbose) return;
327 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
329 ", available: %6" V8_PTR_PREFIX "d KB\n",
330 isolate_->memory_allocator()->Size() / KB,
331 isolate_->memory_allocator()->Available() / KB);
332 PrintPID("New space, used: %6" V8_PTR_PREFIX
334 ", available: %6" V8_PTR_PREFIX
336 ", committed: %6" V8_PTR_PREFIX "d KB\n",
337 new_space_.Size() / KB, new_space_.Available() / KB,
338 new_space_.CommittedMemory() / KB);
339 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
341 ", available: %6" V8_PTR_PREFIX
343 ", committed: %6" V8_PTR_PREFIX "d KB\n",
344 old_pointer_space_->SizeOfObjects() / KB,
345 old_pointer_space_->Available() / KB,
346 old_pointer_space_->CommittedMemory() / KB);
347 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
349 ", available: %6" V8_PTR_PREFIX
351 ", committed: %6" V8_PTR_PREFIX "d KB\n",
352 old_data_space_->SizeOfObjects() / KB,
353 old_data_space_->Available() / KB,
354 old_data_space_->CommittedMemory() / KB);
355 PrintPID("Code space, used: %6" V8_PTR_PREFIX
357 ", available: %6" V8_PTR_PREFIX
359 ", committed: %6" V8_PTR_PREFIX "d KB\n",
360 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
361 code_space_->CommittedMemory() / KB);
362 PrintPID("Map space, used: %6" V8_PTR_PREFIX
364 ", available: %6" V8_PTR_PREFIX
366 ", committed: %6" V8_PTR_PREFIX "d KB\n",
367 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
368 map_space_->CommittedMemory() / KB);
369 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
371 ", available: %6" V8_PTR_PREFIX
373 ", committed: %6" V8_PTR_PREFIX "d KB\n",
374 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
375 cell_space_->CommittedMemory() / KB);
376 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
378 ", available: %6" V8_PTR_PREFIX
380 ", committed: %6" V8_PTR_PREFIX "d KB\n",
381 property_cell_space_->SizeOfObjects() / KB,
382 property_cell_space_->Available() / KB,
383 property_cell_space_->CommittedMemory() / KB);
384 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
386 ", available: %6" V8_PTR_PREFIX
388 ", committed: %6" V8_PTR_PREFIX "d KB\n",
389 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
390 lo_space_->CommittedMemory() / KB);
391 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
393 ", available: %6" V8_PTR_PREFIX
395 ", committed: %6" V8_PTR_PREFIX "d KB\n",
396 this->SizeOfObjects() / KB, this->Available() / KB,
397 this->CommittedMemory() / KB);
398 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
399 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
400 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407 // Similar to the before GC, we use some complicated logic to ensure that
408 // NewSpace statistics are logged exactly once when --log-gc is turned on.
410 if (FLAG_heap_stats) {
411 new_space_.CollectStatistics();
412 ReportHeapStatistics("After GC");
413 } else if (FLAG_log_gc) {
414 new_space_.ReportStatistics();
417 if (FLAG_log_gc) new_space_.ReportStatistics();
422 void Heap::GarbageCollectionPrologue() {
424 AllowHeapAllocation for_the_first_part_of_prologue;
425 ClearJSFunctionResultCaches();
427 unflattened_strings_length_ = 0;
429 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
430 mark_compact_collector()->EnableCodeFlushing(true);
434 if (FLAG_verify_heap) {
440 // Reset GC statistics.
441 promoted_objects_size_ = 0;
442 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
443 semi_space_copied_object_size_ = 0;
444 nodes_died_in_new_space_ = 0;
445 nodes_copied_in_new_space_ = 0;
448 UpdateMaximumCommitted();
451 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
453 if (FLAG_gc_verbose) Print();
455 ReportStatisticsBeforeGC();
458 store_buffer()->GCPrologue();
460 if (isolate()->concurrent_osr_enabled()) {
461 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
464 if (new_space_.IsAtMaximumCapacity()) {
465 maximum_size_scavenges_++;
467 maximum_size_scavenges_ = 0;
469 CheckNewSpaceExpansionCriteria();
473 intptr_t Heap::SizeOfObjects() {
475 AllSpaces spaces(this);
476 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
477 total += space->SizeOfObjects();
483 void Heap::ClearAllICsByKind(Code::Kind kind) {
484 HeapObjectIterator it(code_space());
486 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
487 Code* code = Code::cast(object);
488 Code::Kind current_kind = code->kind();
489 if (current_kind == Code::FUNCTION ||
490 current_kind == Code::OPTIMIZED_FUNCTION) {
491 code->ClearInlineCaches(kind);
497 void Heap::RepairFreeListsAfterDeserialization() {
498 PagedSpaces spaces(this);
499 for (PagedSpace* space = spaces.next(); space != NULL;
500 space = spaces.next()) {
501 space->RepairFreeListsAfterDeserialization();
506 void Heap::ProcessPretenuringFeedback() {
507 if (FLAG_allocation_site_pretenuring) {
508 int tenure_decisions = 0;
509 int dont_tenure_decisions = 0;
510 int allocation_mementos_found = 0;
511 int allocation_sites = 0;
512 int active_allocation_sites = 0;
514 // If the scratchpad overflowed, we have to iterate over the allocation
516 // TODO(hpayer): We iterate over the whole list of allocation sites when
517 // we grew to the maximum semi-space size to deopt maybe tenured
518 // allocation sites. We could hold the maybe tenured allocation sites
519 // in a seperate data structure if this is a performance problem.
520 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
521 bool use_scratchpad =
522 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
523 !deopt_maybe_tenured;
526 Object* list_element = allocation_sites_list();
527 bool trigger_deoptimization = false;
528 bool maximum_size_scavenge = MaximumSizeScavenge();
529 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
530 : list_element->IsAllocationSite()) {
531 AllocationSite* site =
533 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
534 : AllocationSite::cast(list_element);
535 allocation_mementos_found += site->memento_found_count();
536 if (site->memento_found_count() > 0) {
537 active_allocation_sites++;
538 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
539 trigger_deoptimization = true;
541 if (site->GetPretenureMode() == TENURED) {
544 dont_tenure_decisions++;
549 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
550 site->set_deopt_dependent_code(true);
551 trigger_deoptimization = true;
554 if (use_scratchpad) {
557 list_element = site->weak_next();
561 if (trigger_deoptimization) {
562 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
565 FlushAllocationSitesScratchpad();
567 if (FLAG_trace_pretenuring_statistics &&
568 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
569 dont_tenure_decisions > 0)) {
571 "GC: (mode, #visited allocation sites, #active allocation sites, "
572 "#mementos, #tenure decisions, #donttenure decisions) "
573 "(%s, %d, %d, %d, %d, %d)\n",
574 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
575 active_allocation_sites, allocation_mementos_found, tenure_decisions,
576 dont_tenure_decisions);
582 void Heap::DeoptMarkedAllocationSites() {
583 // TODO(hpayer): If iterating over the allocation sites list becomes a
584 // performance issue, use a cache heap data structure instead (similar to the
585 // allocation sites scratchpad).
586 Object* list_element = allocation_sites_list();
587 while (list_element->IsAllocationSite()) {
588 AllocationSite* site = AllocationSite::cast(list_element);
589 if (site->deopt_dependent_code()) {
590 site->dependent_code()->MarkCodeForDeoptimization(
591 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
592 site->set_deopt_dependent_code(false);
594 list_element = site->weak_next();
596 Deoptimizer::DeoptimizeMarkedCode(isolate_);
600 void Heap::GarbageCollectionEpilogue() {
601 store_buffer()->GCEpilogue();
603 // In release mode, we only zap the from space under heap verification.
604 if (Heap::ShouldZapGarbage()) {
608 // Process pretenuring feedback and update allocation sites.
609 ProcessPretenuringFeedback();
612 if (FLAG_verify_heap) {
617 AllowHeapAllocation for_the_rest_of_the_epilogue;
620 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
621 if (FLAG_print_handles) PrintHandles();
622 if (FLAG_gc_verbose) Print();
623 if (FLAG_code_stats) ReportCodeStatistics("After GC");
625 if (FLAG_deopt_every_n_garbage_collections > 0) {
626 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
627 // the topmost optimized frame can be deoptimized safely, because it
628 // might not have a lazy bailout point right after its current PC.
629 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
630 Deoptimizer::DeoptimizeAll(isolate());
631 gcs_since_last_deopt_ = 0;
635 UpdateMaximumCommitted();
637 isolate_->counters()->alive_after_last_gc()->Set(
638 static_cast<int>(SizeOfObjects()));
640 isolate_->counters()->string_table_capacity()->Set(
641 string_table()->Capacity());
642 isolate_->counters()->number_of_symbols()->Set(
643 string_table()->NumberOfElements());
645 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
646 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
647 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
648 (crankshaft_codegen_bytes_generated_ +
649 full_codegen_bytes_generated_)));
652 if (CommittedMemory() > 0) {
653 isolate_->counters()->external_fragmentation_total()->AddSample(
654 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
656 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
657 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
658 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
659 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
661 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
662 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
664 isolate_->counters()->heap_fraction_code_space()->AddSample(
665 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
667 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
668 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
669 isolate_->counters()->heap_fraction_cell_space()->AddSample(
670 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
672 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
673 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
675 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
676 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
678 isolate_->counters()->heap_sample_total_committed()->AddSample(
679 static_cast<int>(CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_total_used()->AddSample(
681 static_cast<int>(SizeOfObjects() / KB));
682 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
683 static_cast<int>(map_space()->CommittedMemory() / KB));
684 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
685 static_cast<int>(cell_space()->CommittedMemory() / KB));
687 ->heap_sample_property_cell_space_committed()
689 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
690 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
691 static_cast<int>(code_space()->CommittedMemory() / KB));
693 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
694 static_cast<int>(MaximumCommittedMemory() / KB));
697 #define UPDATE_COUNTERS_FOR_SPACE(space) \
698 isolate_->counters()->space##_bytes_available()->Set( \
699 static_cast<int>(space()->Available())); \
700 isolate_->counters()->space##_bytes_committed()->Set( \
701 static_cast<int>(space()->CommittedMemory())); \
702 isolate_->counters()->space##_bytes_used()->Set( \
703 static_cast<int>(space()->SizeOfObjects()));
704 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
705 if (space()->CommittedMemory() > 0) { \
706 isolate_->counters()->external_fragmentation_##space()->AddSample( \
707 static_cast<int>(100 - \
708 (space()->SizeOfObjects() * 100.0) / \
709 space()->CommittedMemory())); \
711 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
712 UPDATE_COUNTERS_FOR_SPACE(space) \
713 UPDATE_FRAGMENTATION_FOR_SPACE(space)
715 UPDATE_COUNTERS_FOR_SPACE(new_space)
716 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
717 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
718 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
719 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
720 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
721 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
722 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
723 #undef UPDATE_COUNTERS_FOR_SPACE
724 #undef UPDATE_FRAGMENTATION_FOR_SPACE
725 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
728 ReportStatisticsAfterGC();
731 // Remember the last top pointer so that we can later find out
732 // whether we allocated in new space since the last GC.
733 new_space_top_after_last_gc_ = new_space()->top();
737 void Heap::HandleGCRequest() {
738 if (incremental_marking()->request_type() ==
739 IncrementalMarking::COMPLETE_MARKING) {
740 CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
743 DCHECK(FLAG_overapproximate_weak_closure);
744 DCHECK(!incremental_marking()->weak_closure_was_overapproximated());
745 OverApproximateWeakClosure("GC interrupt");
749 void Heap::OverApproximateWeakClosure(const char* gc_reason) {
750 if (FLAG_trace_incremental_marking) {
751 PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
755 GCCallbacksScope scope(this);
756 if (scope.CheckReenter()) {
757 AllowHeapAllocation allow_allocation;
758 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
759 VMState<EXTERNAL> state(isolate_);
760 HandleScope handle_scope(isolate_);
761 CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
764 mark_compact_collector()->OverApproximateWeakClosure();
765 incremental_marking()->set_should_hurry(false);
766 incremental_marking()->set_weak_closure_was_overapproximated(true);
768 GCCallbacksScope scope(this);
769 if (scope.CheckReenter()) {
770 AllowHeapAllocation allow_allocation;
771 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
772 VMState<EXTERNAL> state(isolate_);
773 HandleScope handle_scope(isolate_);
774 CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
780 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
781 const v8::GCCallbackFlags gc_callback_flags) {
782 // Since we are ignoring the return value, the exact choice of space does
783 // not matter, so long as we do not specify NEW_SPACE, which would not
785 mark_compact_collector_.SetFlags(flags);
786 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
787 mark_compact_collector_.SetFlags(kNoGCFlags);
791 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
792 // Since we are ignoring the return value, the exact choice of space does
793 // not matter, so long as we do not specify NEW_SPACE, which would not
795 // Major GC would invoke weak handle callbacks on weakly reachable
796 // handles, but won't collect weakly reachable objects until next
797 // major GC. Therefore if we collect aggressively and weak handle callback
798 // has been invoked, we rerun major GC to release objects which become
800 // Note: as weak callbacks can execute arbitrary code, we cannot
801 // hope that eventually there will be no weak callbacks invocations.
802 // Therefore stop recollecting after several attempts.
803 if (isolate()->concurrent_recompilation_enabled()) {
804 // The optimizing compiler may be unnecessarily holding on to memory.
805 DisallowHeapAllocation no_recursive_gc;
806 isolate()->optimizing_compiler_thread()->Flush();
808 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
809 kReduceMemoryFootprintMask);
810 isolate_->compilation_cache()->Clear();
811 const int kMaxNumberOfAttempts = 7;
812 const int kMinNumberOfAttempts = 2;
813 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
814 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
815 attempt + 1 >= kMinNumberOfAttempts) {
819 mark_compact_collector()->SetFlags(kNoGCFlags);
825 void Heap::EnsureFillerObjectAtTop() {
826 // There may be an allocation memento behind every object in new space.
827 // If we evacuate a not full new space or if we are on the last page of
828 // the new space, then there may be uninitialized memory behind the top
829 // pointer of the new space page. We store a filler object there to
830 // identify the unused space.
831 Address from_top = new_space_.top();
832 Address from_limit = new_space_.limit();
833 if (from_top < from_limit) {
834 int remaining_in_page = static_cast<int>(from_limit - from_top);
835 CreateFillerObjectAt(from_top, remaining_in_page);
840 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
841 const char* collector_reason,
842 const v8::GCCallbackFlags gc_callback_flags) {
843 // The VM is in the GC state until exiting this function.
844 VMState<GC> state(isolate_);
847 // Reset the allocation timeout to the GC interval, but make sure to
848 // allow at least a few allocations after a collection. The reason
849 // for this is that we have a lot of allocation sequences and we
850 // assume that a garbage collection will allow the subsequent
851 // allocation attempts to go through.
852 allocation_timeout_ = Max(6, FLAG_gc_interval);
855 EnsureFillerObjectAtTop();
857 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
858 if (FLAG_trace_incremental_marking) {
859 PrintF("[IncrementalMarking] Scavenge during marking.\n");
863 if (collector == MARK_COMPACTOR &&
864 !mark_compact_collector()->abort_incremental_marking() &&
865 !incremental_marking()->IsStopped() &&
866 !incremental_marking()->should_hurry() &&
867 FLAG_incremental_marking_steps) {
868 // Make progress in incremental marking.
869 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
870 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
871 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
872 if (!incremental_marking()->IsComplete() &&
873 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
874 if (FLAG_trace_incremental_marking) {
875 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
877 collector = SCAVENGER;
878 collector_reason = "incremental marking delaying mark-sweep";
882 bool next_gc_likely_to_collect_more = false;
885 tracer()->Start(collector, gc_reason, collector_reason);
886 DCHECK(AllowHeapAllocation::IsAllowed());
887 DisallowHeapAllocation no_allocation_during_gc;
888 GarbageCollectionPrologue();
891 HistogramTimerScope histogram_timer_scope(
892 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
893 : isolate_->counters()->gc_compactor());
894 next_gc_likely_to_collect_more =
895 PerformGarbageCollection(collector, gc_callback_flags);
898 GarbageCollectionEpilogue();
899 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
900 isolate()->CheckDetachedContextsAfterGC();
902 tracer()->Stop(collector);
905 // Start incremental marking for the next cycle. The heap snapshot
906 // generator needs incremental marking to stay off after it aborted.
907 if (!mark_compact_collector()->abort_incremental_marking() &&
908 WorthActivatingIncrementalMarking()) {
909 incremental_marking()->Start();
912 return next_gc_likely_to_collect_more;
916 int Heap::NotifyContextDisposed(bool dependant_context) {
917 if (!dependant_context) {
918 tracer()->ResetSurvivalEvents();
919 old_generation_size_configured_ = false;
921 if (isolate()->concurrent_recompilation_enabled()) {
922 // Flush the queued recompilation tasks.
923 isolate()->optimizing_compiler_thread()->Flush();
926 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
927 return ++contexts_disposed_;
931 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
933 if (len == 0) return;
935 DCHECK(array->map() != fixed_cow_array_map());
936 Object** dst_objects = array->data_start() + dst_index;
937 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
938 if (!InNewSpace(array)) {
939 for (int i = 0; i < len; i++) {
940 // TODO(hpayer): check store buffer for entries
941 if (InNewSpace(dst_objects[i])) {
942 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
946 incremental_marking()->RecordWrites(array);
951 // Helper class for verifying the string table.
952 class StringTableVerifier : public ObjectVisitor {
954 void VisitPointers(Object** start, Object** end) {
955 // Visit all HeapObject pointers in [start, end).
956 for (Object** p = start; p < end; p++) {
957 if ((*p)->IsHeapObject()) {
958 // Check that the string is actually internalized.
959 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
960 (*p)->IsInternalizedString());
967 static void VerifyStringTable(Heap* heap) {
968 StringTableVerifier verifier;
969 heap->string_table()->IterateElements(&verifier);
971 #endif // VERIFY_HEAP
974 bool Heap::ReserveSpace(Reservation* reservations) {
975 bool gc_performed = true;
977 static const int kThreshold = 20;
978 while (gc_performed && counter++ < kThreshold) {
979 gc_performed = false;
980 for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
981 Reservation* reservation = &reservations[space];
982 DCHECK_LE(1, reservation->length());
983 if (reservation->at(0).size == 0) continue;
984 bool perform_gc = false;
985 if (space == LO_SPACE) {
986 DCHECK_EQ(1, reservation->length());
987 perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
989 for (auto& chunk : *reservation) {
990 AllocationResult allocation;
991 int size = chunk.size;
992 DCHECK_LE(size, MemoryAllocator::PageAreaSize(
993 static_cast<AllocationSpace>(space)));
994 if (space == NEW_SPACE) {
995 allocation = new_space()->AllocateRaw(size);
997 allocation = paged_space(space)->AllocateRaw(size);
999 HeapObject* free_space;
1000 if (allocation.To(&free_space)) {
1001 // Mark with a free list node, in case we have a GC before
1003 Address free_space_address = free_space->address();
1004 CreateFillerObjectAt(free_space_address, size);
1005 DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1006 chunk.start = free_space_address;
1007 chunk.end = free_space_address + size;
1015 if (space == NEW_SPACE) {
1016 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1020 kReduceMemoryFootprintMask,
1021 "failed to reserve space in paged or large "
1022 "object space, trying to reduce memory footprint");
1025 kAbortIncrementalMarkingMask,
1026 "failed to reserve space in paged or large object space");
1029 gc_performed = true;
1030 break; // Abort for-loop over spaces and retry.
1035 return !gc_performed;
1039 void Heap::EnsureFromSpaceIsCommitted() {
1040 if (new_space_.CommitFromSpaceIfNeeded()) return;
1042 // Committing memory to from space failed.
1043 // Memory is exhausted and we will die.
1044 V8::FatalProcessOutOfMemory("Committing semi space failed.");
1048 void Heap::ClearJSFunctionResultCaches() {
1049 if (isolate_->bootstrapper()->IsActive()) return;
1051 Object* context = native_contexts_list();
1052 while (!context->IsUndefined()) {
1053 // Get the caches for this context. GC can happen when the context
1054 // is not fully initialized, so the caches can be undefined.
1055 Object* caches_or_undefined =
1056 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
1057 if (!caches_or_undefined->IsUndefined()) {
1058 FixedArray* caches = FixedArray::cast(caches_or_undefined);
1059 // Clear the caches:
1060 int length = caches->length();
1061 for (int i = 0; i < length; i++) {
1062 JSFunctionResultCache::cast(caches->get(i))->Clear();
1065 // Get the next context:
1066 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1071 void Heap::ClearNormalizedMapCaches() {
1072 if (isolate_->bootstrapper()->IsActive() &&
1073 !incremental_marking()->IsMarking()) {
1077 Object* context = native_contexts_list();
1078 while (!context->IsUndefined()) {
1079 // GC can happen when the context is not fully initialized,
1080 // so the cache can be undefined.
1082 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1083 if (!cache->IsUndefined()) {
1084 NormalizedMapCache::cast(cache)->Clear();
1086 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1091 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1092 if (start_new_space_size == 0) return;
1094 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1095 static_cast<double>(start_new_space_size) * 100);
1097 if (previous_semi_space_copied_object_size_ > 0) {
1099 (static_cast<double>(promoted_objects_size_) /
1100 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1102 promotion_rate_ = 0;
1105 semi_space_copied_rate_ =
1106 (static_cast<double>(semi_space_copied_object_size_) /
1107 static_cast<double>(start_new_space_size) * 100);
1109 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1110 tracer()->AddSurvivalRatio(survival_rate);
1111 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1112 high_survival_rate_period_length_++;
1114 high_survival_rate_period_length_ = 0;
1118 bool Heap::PerformGarbageCollection(
1119 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1120 int freed_global_handles = 0;
1122 if (collector != SCAVENGER) {
1123 PROFILE(isolate_, CodeMovingGCEvent());
1127 if (FLAG_verify_heap) {
1128 VerifyStringTable(this);
1133 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1136 GCCallbacksScope scope(this);
1137 if (scope.CheckReenter()) {
1138 AllowHeapAllocation allow_allocation;
1139 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1140 VMState<EXTERNAL> state(isolate_);
1141 HandleScope handle_scope(isolate_);
1142 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1146 EnsureFromSpaceIsCommitted();
1148 int start_new_space_size = Heap::new_space()->SizeAsInt();
1150 if (IsHighSurvivalRate()) {
1151 // We speed up the incremental marker if it is running so that it
1152 // does not fall behind the rate of promotion, which would cause a
1153 // constantly growing old space.
1154 incremental_marking()->NotifyOfHighPromotionRate();
1157 if (collector == MARK_COMPACTOR) {
1158 // Perform mark-sweep with optional compaction.
1160 sweep_generation_++;
1161 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1162 // allocates and triggers GC. The real limit is set at after
1163 // PostGarbageCollectionProcessing.
1164 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1165 old_gen_exhausted_ = false;
1166 old_generation_size_configured_ = true;
1171 UpdateSurvivalStatistics(start_new_space_size);
1172 ConfigureInitialOldGenerationSize();
1174 isolate_->counters()->objs_since_last_young()->Set(0);
1176 // Callbacks that fire after this point might trigger nested GCs and
1177 // restart incremental marking, the assertion can't be moved down.
1178 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1180 gc_post_processing_depth_++;
1182 AllowHeapAllocation allow_allocation;
1183 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1184 freed_global_handles =
1185 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1187 gc_post_processing_depth_--;
1189 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1191 // Update relocatables.
1192 Relocatable::PostGarbageCollectionProcessing(isolate_);
1194 if (collector == MARK_COMPACTOR) {
1195 // Register the amount of external allocated memory.
1196 amount_of_external_allocated_memory_at_last_global_gc_ =
1197 amount_of_external_allocated_memory_;
1198 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1199 freed_global_handles);
1200 // We finished a marking cycle. We can uncommit the marking deque until
1201 // we start marking again.
1202 mark_compact_collector_.UncommitMarkingDeque();
1206 GCCallbacksScope scope(this);
1207 if (scope.CheckReenter()) {
1208 AllowHeapAllocation allow_allocation;
1209 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1210 VMState<EXTERNAL> state(isolate_);
1211 HandleScope handle_scope(isolate_);
1212 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1217 if (FLAG_verify_heap) {
1218 VerifyStringTable(this);
1222 return freed_global_handles > 0;
1226 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1227 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1228 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1229 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1230 v8::GCPrologueCallback callback =
1231 reinterpret_cast<v8::GCPrologueCallback>(
1232 gc_prologue_callbacks_[i].callback);
1233 callback(gc_type, flags);
1235 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1236 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1243 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1244 GCCallbackFlags gc_callback_flags) {
1245 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1246 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1247 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1248 v8::GCPrologueCallback callback =
1249 reinterpret_cast<v8::GCPrologueCallback>(
1250 gc_epilogue_callbacks_[i].callback);
1251 callback(gc_type, gc_callback_flags);
1253 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1254 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1261 void Heap::MarkCompact() {
1262 gc_state_ = MARK_COMPACT;
1263 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1265 uint64_t size_of_objects_before_gc = SizeOfObjects();
1267 mark_compact_collector_.Prepare();
1271 MarkCompactPrologue();
1273 mark_compact_collector_.CollectGarbage();
1275 LOG(isolate_, ResourceEvent("markcompact", "end"));
1277 MarkCompactEpilogue();
1279 if (FLAG_allocation_site_pretenuring) {
1280 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1285 void Heap::MarkCompactEpilogue() {
1286 gc_state_ = NOT_IN_GC;
1288 isolate_->counters()->objs_since_last_full()->Set(0);
1290 incremental_marking()->Epilogue();
1294 void Heap::MarkCompactPrologue() {
1295 // At any old GC clear the keyed lookup cache to enable collection of unused
1297 isolate_->keyed_lookup_cache()->Clear();
1298 isolate_->context_slot_cache()->Clear();
1299 isolate_->descriptor_lookup_cache()->Clear();
1300 RegExpResultsCache::Clear(string_split_cache());
1301 RegExpResultsCache::Clear(regexp_multiple_cache());
1303 isolate_->compilation_cache()->MarkCompactPrologue();
1305 CompletelyClearInstanceofCache();
1307 FlushNumberStringCache();
1308 if (FLAG_cleanup_code_caches_at_gc) {
1309 polymorphic_code_cache()->set_cache(undefined_value());
1312 ClearNormalizedMapCaches();
1316 // Helper class for copying HeapObjects
1317 class ScavengeVisitor : public ObjectVisitor {
1319 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1321 void VisitPointer(Object** p) { ScavengePointer(p); }
1323 void VisitPointers(Object** start, Object** end) {
1324 // Copy all HeapObject pointers in [start, end)
1325 for (Object** p = start; p < end; p++) ScavengePointer(p);
1329 void ScavengePointer(Object** p) {
1330 Object* object = *p;
1331 if (!heap_->InNewSpace(object)) return;
1332 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1333 reinterpret_cast<HeapObject*>(object));
1341 // Visitor class to verify pointers in code or data space do not point into
1343 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1345 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1346 void VisitPointers(Object** start, Object** end) {
1347 for (Object** current = start; current < end; current++) {
1348 if ((*current)->IsHeapObject()) {
1349 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1359 static void VerifyNonPointerSpacePointers(Heap* heap) {
1360 // Verify that there are no pointers to new space in spaces where we
1361 // do not expect them.
1362 VerifyNonPointerSpacePointersVisitor v(heap);
1363 HeapObjectIterator code_it(heap->code_space());
1364 for (HeapObject* object = code_it.Next(); object != NULL;
1365 object = code_it.Next())
1366 object->Iterate(&v);
1368 HeapObjectIterator data_it(heap->old_data_space());
1369 for (HeapObject* object = data_it.Next(); object != NULL;
1370 object = data_it.Next())
1371 object->Iterate(&v);
1373 #endif // VERIFY_HEAP
1376 void Heap::CheckNewSpaceExpansionCriteria() {
1377 if (FLAG_experimental_new_space_growth_heuristic) {
1378 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1379 survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1380 // Grow the size of new space if there is room to grow, and more than 10%
1381 // have survived the last scavenge.
1383 survived_since_last_expansion_ = 0;
1385 } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1386 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1387 // Grow the size of new space if there is room to grow, and enough data
1388 // has survived scavenge since the last expansion.
1390 survived_since_last_expansion_ = 0;
1395 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1396 return heap->InNewSpace(*p) &&
1397 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1401 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1402 StoreBufferEvent event) {
1403 heap->store_buffer_rebuilder_.Callback(page, event);
1407 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1408 if (event == kStoreBufferStartScanningPagesEvent) {
1409 start_of_current_page_ = NULL;
1410 current_page_ = NULL;
1411 } else if (event == kStoreBufferScanningPageEvent) {
1412 if (current_page_ != NULL) {
1413 // If this page already overflowed the store buffer during this iteration.
1414 if (current_page_->scan_on_scavenge()) {
1415 // Then we should wipe out the entries that have been added for it.
1416 store_buffer_->SetTop(start_of_current_page_);
1417 } else if (store_buffer_->Top() - start_of_current_page_ >=
1418 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1419 // Did we find too many pointers in the previous page? The heuristic is
1420 // that no page can take more then 1/5 the remaining slots in the store
1422 current_page_->set_scan_on_scavenge(true);
1423 store_buffer_->SetTop(start_of_current_page_);
1425 // In this case the page we scanned took a reasonable number of slots in
1426 // the store buffer. It has now been rehabilitated and is no longer
1427 // marked scan_on_scavenge.
1428 DCHECK(!current_page_->scan_on_scavenge());
1431 start_of_current_page_ = store_buffer_->Top();
1432 current_page_ = page;
1433 } else if (event == kStoreBufferFullEvent) {
1434 // The current page overflowed the store buffer again. Wipe out its entries
1435 // in the store buffer and mark it scan-on-scavenge again. This may happen
1436 // several times while scanning.
1437 if (current_page_ == NULL) {
1438 // Store Buffer overflowed while scanning promoted objects. These are not
1439 // in any particular page, though they are likely to be clustered by the
1440 // allocation routines.
1441 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1443 // Store Buffer overflowed while scanning a particular old space page for
1444 // pointers to new space.
1445 DCHECK(current_page_ == page);
1446 DCHECK(page != NULL);
1447 current_page_->set_scan_on_scavenge(true);
1448 DCHECK(start_of_current_page_ != store_buffer_->Top());
1449 store_buffer_->SetTop(start_of_current_page_);
1457 void PromotionQueue::Initialize() {
1458 // The last to-space page may be used for promotion queue. On promotion
1459 // conflict, we use the emergency stack.
1460 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1463 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1464 limit_ = reinterpret_cast<intptr_t*>(
1465 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1466 emergency_stack_ = NULL;
1470 void PromotionQueue::RelocateQueueHead() {
1471 DCHECK(emergency_stack_ == NULL);
1473 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1474 intptr_t* head_start = rear_;
1475 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1478 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1480 emergency_stack_ = new List<Entry>(2 * entries_count);
1482 while (head_start != head_end) {
1483 int size = static_cast<int>(*(head_start++));
1484 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1485 // New space allocation in SemiSpaceCopyObject marked the region
1486 // overlapping with promotion queue as uninitialized.
1487 MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
1488 MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
1489 emergency_stack_->Add(Entry(obj, size));
1495 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1497 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1499 virtual Object* RetainAs(Object* object) {
1500 if (!heap_->InFromSpace(object)) {
1504 MapWord map_word = HeapObject::cast(object)->map_word();
1505 if (map_word.IsForwardingAddress()) {
1506 return map_word.ToForwardingAddress();
1516 void Heap::Scavenge() {
1517 RelocationLock relocation_lock(this);
1520 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1523 gc_state_ = SCAVENGE;
1525 // Implements Cheney's copying algorithm
1526 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1528 // Clear descriptor cache.
1529 isolate_->descriptor_lookup_cache()->Clear();
1531 // Used for updating survived_since_last_expansion_ at function end.
1532 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1534 SelectScavengingVisitorsTable();
1536 incremental_marking()->PrepareForScavenge();
1538 // Flip the semispaces. After flipping, to space is empty, from space has
1541 new_space_.ResetAllocationInfo();
1543 // We need to sweep newly copied objects which can be either in the
1544 // to space or promoted to the old generation. For to-space
1545 // objects, we treat the bottom of the to space as a queue. Newly
1546 // copied and unswept objects lie between a 'front' mark and the
1547 // allocation pointer.
1549 // Promoted objects can go into various old-generation spaces, and
1550 // can be allocated internally in the spaces (from the free list).
1551 // We treat the top of the to space as a queue of addresses of
1552 // promoted objects. The addresses of newly promoted and unswept
1553 // objects lie between a 'front' mark and a 'rear' mark that is
1554 // updated as a side effect of promoting an object.
1556 // There is guaranteed to be enough room at the top of the to space
1557 // for the addresses of promoted objects: every object promoted
1558 // frees up its size in bytes from the top of the new space, and
1559 // objects are at least one pointer in size.
1560 Address new_space_front = new_space_.ToSpaceStart();
1561 promotion_queue_.Initialize();
1564 store_buffer()->Clean();
1567 ScavengeVisitor scavenge_visitor(this);
1569 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1571 // Copy objects reachable from the old generation.
1573 StoreBufferRebuildScope scope(this, store_buffer(),
1574 &ScavengeStoreBufferCallback);
1575 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1578 // Copy objects reachable from simple cells by scavenging cell values
1580 HeapObjectIterator cell_iterator(cell_space_);
1581 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1582 heap_object = cell_iterator.Next()) {
1583 if (heap_object->IsCell()) {
1584 Cell* cell = Cell::cast(heap_object);
1585 Address value_address = cell->ValueAddress();
1586 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1590 // Copy objects reachable from global property cells by scavenging global
1591 // property cell values directly.
1592 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1593 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1594 heap_object != NULL;
1595 heap_object = js_global_property_cell_iterator.Next()) {
1596 if (heap_object->IsPropertyCell()) {
1597 PropertyCell* cell = PropertyCell::cast(heap_object);
1598 Address value_address = cell->ValueAddress();
1599 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1600 Address type_address = cell->TypeAddress();
1601 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1605 // Copy objects reachable from the encountered weak collections list.
1606 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1607 // Copy objects reachable from the encountered weak cells.
1608 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1610 // Copy objects reachable from the code flushing candidates list.
1611 MarkCompactCollector* collector = mark_compact_collector();
1612 if (collector->is_code_flushing_enabled()) {
1613 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1616 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1618 while (isolate()->global_handles()->IterateObjectGroups(
1619 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1620 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1622 isolate()->global_handles()->RemoveObjectGroups();
1623 isolate()->global_handles()->RemoveImplicitRefGroups();
1625 isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1626 &IsUnscavengedHeapObject);
1628 isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1630 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1632 UpdateNewSpaceReferencesInExternalStringTable(
1633 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1635 promotion_queue_.Destroy();
1637 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1639 ScavengeWeakObjectRetainer weak_object_retainer(this);
1640 ProcessYoungWeakReferences(&weak_object_retainer);
1642 // Collects callback info for handles referenced by young generation that are
1643 // pending (about to be collected) and either phantom or internal-fields.
1644 // Releases the global handles. See also PostGarbageCollectionProcessing.
1645 isolate()->global_handles()->CollectYoungPhantomCallbackData();
1647 DCHECK(new_space_front == new_space_.top());
1650 new_space_.set_age_mark(new_space_.top());
1652 new_space_.LowerInlineAllocationLimit(
1653 new_space_.inline_allocation_limit_step());
1655 // Update how much has survived scavenge.
1656 IncrementYoungSurvivorsCounter(static_cast<int>(
1657 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1659 LOG(isolate_, ResourceEvent("scavenge", "end"));
1661 gc_state_ = NOT_IN_GC;
1663 gc_idle_time_handler_.NotifyScavenge();
1667 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1669 MapWord first_word = HeapObject::cast(*p)->map_word();
1671 if (!first_word.IsForwardingAddress()) {
1672 // Unreachable external string can be finalized.
1673 heap->FinalizeExternalString(String::cast(*p));
1677 // String is still reachable.
1678 return String::cast(first_word.ToForwardingAddress());
1682 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1683 ExternalStringTableUpdaterCallback updater_func) {
1685 if (FLAG_verify_heap) {
1686 external_string_table_.Verify();
1690 if (external_string_table_.new_space_strings_.is_empty()) return;
1692 Object** start = &external_string_table_.new_space_strings_[0];
1693 Object** end = start + external_string_table_.new_space_strings_.length();
1694 Object** last = start;
1696 for (Object** p = start; p < end; ++p) {
1697 DCHECK(InFromSpace(*p));
1698 String* target = updater_func(this, p);
1700 if (target == NULL) continue;
1702 DCHECK(target->IsExternalString());
1704 if (InNewSpace(target)) {
1705 // String is still in new space. Update the table entry.
1709 // String got promoted. Move it to the old string list.
1710 external_string_table_.AddOldString(target);
1714 DCHECK(last <= end);
1715 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1719 void Heap::UpdateReferencesInExternalStringTable(
1720 ExternalStringTableUpdaterCallback updater_func) {
1721 // Update old space string references.
1722 if (external_string_table_.old_space_strings_.length() > 0) {
1723 Object** start = &external_string_table_.old_space_strings_[0];
1724 Object** end = start + external_string_table_.old_space_strings_.length();
1725 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1728 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1732 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1733 ProcessArrayBuffers(retainer);
1734 ProcessNativeContexts(retainer);
1735 ProcessAllocationSites(retainer);
1739 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1740 ProcessArrayBuffers(retainer);
1741 ProcessNativeContexts(retainer);
1745 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1746 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1747 // Update the head of the list of contexts.
1748 set_native_contexts_list(head);
1752 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1753 Object* array_buffer_obj =
1754 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1755 set_array_buffers_list(array_buffer_obj);
1759 void Heap::TearDownArrayBuffers() {
1760 Object* undefined = undefined_value();
1761 for (Object* o = array_buffers_list(); o != undefined;) {
1762 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1763 Runtime::FreeArrayBuffer(isolate(), buffer);
1764 o = buffer->weak_next();
1766 set_array_buffers_list(undefined);
1770 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1771 Object* allocation_site_obj =
1772 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1773 set_allocation_sites_list(allocation_site_obj);
1777 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1778 DisallowHeapAllocation no_allocation_scope;
1779 Object* cur = allocation_sites_list();
1780 bool marked = false;
1781 while (cur->IsAllocationSite()) {
1782 AllocationSite* casted = AllocationSite::cast(cur);
1783 if (casted->GetPretenureMode() == flag) {
1784 casted->ResetPretenureDecision();
1785 casted->set_deopt_dependent_code(true);
1788 cur = casted->weak_next();
1790 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1794 void Heap::EvaluateOldSpaceLocalPretenuring(
1795 uint64_t size_of_objects_before_gc) {
1796 uint64_t size_of_objects_after_gc = SizeOfObjects();
1797 double old_generation_survival_rate =
1798 (static_cast<double>(size_of_objects_after_gc) * 100) /
1799 static_cast<double>(size_of_objects_before_gc);
1801 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1802 // Too many objects died in the old generation, pretenuring of wrong
1803 // allocation sites may be the cause for that. We have to deopt all
1804 // dependent code registered in the allocation sites to re-evaluate
1805 // our pretenuring decisions.
1806 ResetAllAllocationSitesDependentCode(TENURED);
1807 if (FLAG_trace_pretenuring) {
1809 "Deopt all allocation sites dependent code due to low survival "
1810 "rate in the old generation %f\n",
1811 old_generation_survival_rate);
1817 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1818 DisallowHeapAllocation no_allocation;
1819 // All external strings are listed in the external string table.
1821 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1823 explicit ExternalStringTableVisitorAdapter(
1824 v8::ExternalResourceVisitor* visitor)
1825 : visitor_(visitor) {}
1826 virtual void VisitPointers(Object** start, Object** end) {
1827 for (Object** p = start; p < end; p++) {
1828 DCHECK((*p)->IsExternalString());
1829 visitor_->VisitExternalString(
1830 Utils::ToLocal(Handle<String>(String::cast(*p))));
1835 v8::ExternalResourceVisitor* visitor_;
1836 } external_string_table_visitor(visitor);
1838 external_string_table_.Iterate(&external_string_table_visitor);
1842 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1844 static inline void VisitPointer(Heap* heap, Object** p) {
1845 Object* object = *p;
1846 if (!heap->InNewSpace(object)) return;
1847 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1848 reinterpret_cast<HeapObject*>(object));
1853 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1854 Address new_space_front) {
1856 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1857 // The addresses new_space_front and new_space_.top() define a
1858 // queue of unprocessed copied objects. Process them until the
1860 while (new_space_front != new_space_.top()) {
1861 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1862 HeapObject* object = HeapObject::FromAddress(new_space_front);
1864 NewSpaceScavenger::IterateBody(object->map(), object);
1867 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1871 // Promote and process all the to-be-promoted objects.
1873 StoreBufferRebuildScope scope(this, store_buffer(),
1874 &ScavengeStoreBufferCallback);
1875 while (!promotion_queue()->is_empty()) {
1878 promotion_queue()->remove(&target, &size);
1880 // Promoted object might be already partially visited
1881 // during old space pointer iteration. Thus we search specifically
1882 // for pointers to from semispace instead of looking for pointers
1884 DCHECK(!target->IsMap());
1885 Address obj_address = target->address();
1886 #if V8_DOUBLE_FIELDS_UNBOXING
1887 LayoutDescriptorHelper helper(target->map());
1888 bool has_only_tagged_fields = helper.all_fields_tagged();
1890 if (!has_only_tagged_fields) {
1891 for (int offset = 0; offset < size;) {
1892 int end_of_region_offset;
1893 if (helper.IsTagged(offset, size, &end_of_region_offset)) {
1894 IterateAndMarkPointersToFromSpace(
1895 obj_address + offset, obj_address + end_of_region_offset,
1898 offset = end_of_region_offset;
1902 IterateAndMarkPointersToFromSpace(obj_address, obj_address + size,
1904 #if V8_DOUBLE_FIELDS_UNBOXING
1910 // Take another spin if there are now unswept objects in new space
1911 // (there are currently no more unswept promoted objects).
1912 } while (new_space_front != new_space_.top());
1914 return new_space_front;
1918 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1920 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1922 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1923 kDoubleAlignmentMask) == 0); // NOLINT
1926 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1929 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1931 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1932 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1933 return HeapObject::FromAddress(object->address() + kPointerSize);
1935 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1942 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
1943 return EnsureDoubleAligned(this, object, size);
1947 enum LoggingAndProfiling {
1948 LOGGING_AND_PROFILING_ENABLED,
1949 LOGGING_AND_PROFILING_DISABLED
1953 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1956 template <MarksHandling marks_handling,
1957 LoggingAndProfiling logging_and_profiling_mode>
1958 class ScavengingVisitor : public StaticVisitorBase {
1960 static void Initialize() {
1961 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1962 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1963 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1964 table_.Register(kVisitByteArray, &EvacuateByteArray);
1965 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1966 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1967 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1968 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1971 kVisitNativeContext,
1972 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1977 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1978 ConsString::kSize>);
1982 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1983 SlicedString::kSize>);
1987 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1991 kVisitSharedFunctionInfo,
1992 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1993 SharedFunctionInfo::kSize>);
1995 table_.Register(kVisitJSWeakCollection,
1996 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1998 table_.Register(kVisitJSArrayBuffer,
1999 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2001 table_.Register(kVisitJSTypedArray,
2002 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2004 table_.Register(kVisitJSDataView,
2005 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2007 table_.Register(kVisitJSRegExp,
2008 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2010 if (marks_handling == IGNORE_MARKS) {
2013 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2014 JSFunction::kSize>);
2016 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2019 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2020 kVisitDataObject, kVisitDataObjectGeneric>();
2022 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2023 kVisitJSObject, kVisitJSObjectGeneric>();
2025 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2026 kVisitStruct, kVisitStructGeneric>();
2029 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2034 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2036 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2037 bool should_record = false;
2039 should_record = FLAG_heap_stats;
2041 should_record = should_record || FLAG_log_gc;
2042 if (should_record) {
2043 if (heap->new_space()->Contains(obj)) {
2044 heap->new_space()->RecordAllocation(obj);
2046 heap->new_space()->RecordPromotion(obj);
2051 // Helper function used by CopyObject to copy a source object to an
2052 // allocated target object and update the forwarding pointer in the source
2053 // object. Returns the target object.
2054 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
2055 HeapObject* target, int size)) {
2056 // If we migrate into to-space, then the to-space top pointer should be
2057 // right after the target object. Incorporate double alignment
2059 DCHECK(!heap->InToSpace(target) ||
2060 target->address() + size == heap->new_space()->top() ||
2061 target->address() + size + kPointerSize == heap->new_space()->top());
2063 // Make sure that we do not overwrite the promotion queue which is at
2064 // the end of to-space.
2065 DCHECK(!heap->InToSpace(target) ||
2066 heap->promotion_queue()->IsBelowPromotionQueue(
2067 heap->new_space()->top()));
2069 // Copy the content of source to target.
2070 heap->CopyBlock(target->address(), source->address(), size);
2072 // Set the forwarding address.
2073 source->set_map_word(MapWord::FromForwardingAddress(target));
2075 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2076 // Update NewSpace stats if necessary.
2077 RecordCopiedObject(heap, target);
2078 heap->OnMoveEvent(target, source, size);
2081 if (marks_handling == TRANSFER_MARKS) {
2082 if (Marking::TransferColor(source, target)) {
2083 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2088 template <int alignment>
2089 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
2090 HeapObject* object, int object_size) {
2091 Heap* heap = map->GetHeap();
2093 int allocation_size = object_size;
2094 if (alignment != kObjectAlignment) {
2095 DCHECK(alignment == kDoubleAlignment);
2096 allocation_size += kPointerSize;
2099 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2100 AllocationResult allocation =
2101 heap->new_space()->AllocateRaw(allocation_size);
2103 HeapObject* target = NULL; // Initialization to please compiler.
2104 if (allocation.To(&target)) {
2105 // Order is important here: Set the promotion limit before storing a
2106 // filler for double alignment or migrating the object. Otherwise we
2107 // may end up overwriting promotion queue entries when we migrate the
2109 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2111 if (alignment != kObjectAlignment) {
2112 target = EnsureDoubleAligned(heap, target, allocation_size);
2115 // Order is important: slot might be inside of the target if target
2116 // was allocated over a dead object and slot comes from the store
2119 MigrateObject(heap, object, target, object_size);
2121 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2128 template <ObjectContents object_contents, int alignment>
2129 static inline bool PromoteObject(Map* map, HeapObject** slot,
2130 HeapObject* object, int object_size) {
2131 Heap* heap = map->GetHeap();
2133 int allocation_size = object_size;
2134 if (alignment != kObjectAlignment) {
2135 DCHECK(alignment == kDoubleAlignment);
2136 allocation_size += kPointerSize;
2139 AllocationResult allocation;
2140 if (object_contents == DATA_OBJECT) {
2141 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2142 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2144 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2145 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2148 HeapObject* target = NULL; // Initialization to please compiler.
2149 if (allocation.To(&target)) {
2150 if (alignment != kObjectAlignment) {
2151 target = EnsureDoubleAligned(heap, target, allocation_size);
2154 // Order is important: slot might be inside of the target if target
2155 // was allocated over a dead object and slot comes from the store
2158 // Unfortunately, the allocation can also write over the slot if the slot
2159 // was in free space and the allocation wrote free list data (such as the
2160 // free list map or entry size) over the slot. We guard against this by
2161 // checking that the slot still points to the object being moved. This
2162 // should be sufficient because neither the free list map nor the free
2163 // list entry size should look like a new space pointer (the former is an
2164 // old space pointer, the latter is word-aligned).
2165 if (*slot == object) {
2168 MigrateObject(heap, object, target, object_size);
2170 if (object_contents == POINTER_OBJECT) {
2171 if (map->instance_type() == JS_FUNCTION_TYPE) {
2172 heap->promotion_queue()->insert(target,
2173 JSFunction::kNonWeakFieldsEndOffset);
2175 heap->promotion_queue()->insert(target, object_size);
2178 heap->IncrementPromotedObjectsSize(object_size);
2185 template <ObjectContents object_contents, int alignment>
2186 static inline void EvacuateObject(Map* map, HeapObject** slot,
2187 HeapObject* object, int object_size) {
2188 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2189 SLOW_DCHECK(object->Size() == object_size);
2190 Heap* heap = map->GetHeap();
2192 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2193 // A semi-space copy may fail due to fragmentation. In that case, we
2194 // try to promote the object.
2195 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2200 if (PromoteObject<object_contents, alignment>(map, slot, object,
2205 // If promotion failed, we try to copy the object to the other semi-space
2206 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2212 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2213 HeapObject* object) {
2214 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2215 JSFunction::kSize>(map, slot, object);
2217 MapWord map_word = object->map_word();
2218 DCHECK(map_word.IsForwardingAddress());
2219 HeapObject* target = map_word.ToForwardingAddress();
2221 MarkBit mark_bit = Marking::MarkBitFrom(target);
2222 if (Marking::IsBlack(mark_bit)) {
2223 // This object is black and it might not be rescanned by marker.
2224 // We should explicitly record code entry slot for compaction because
2225 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2226 // miss it as it is not HeapObject-tagged.
2227 Address code_entry_slot =
2228 target->address() + JSFunction::kCodeEntryOffset;
2229 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2230 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2231 code_entry_slot, code);
2236 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2237 HeapObject* object) {
2238 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2239 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2244 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2245 HeapObject* object) {
2246 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2247 int object_size = FixedDoubleArray::SizeFor(length);
2248 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2253 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2254 HeapObject* object) {
2255 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2256 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2261 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2262 HeapObject* object) {
2263 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2264 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2269 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2270 HeapObject* object) {
2271 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2272 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2277 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2278 HeapObject* object) {
2279 int object_size = SeqOneByteString::cast(object)
2280 ->SeqOneByteStringSize(map->instance_type());
2281 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2286 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2287 HeapObject* object) {
2288 int object_size = SeqTwoByteString::cast(object)
2289 ->SeqTwoByteStringSize(map->instance_type());
2290 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2295 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2296 HeapObject* object) {
2297 DCHECK(IsShortcutCandidate(map->instance_type()));
2299 Heap* heap = map->GetHeap();
2301 if (marks_handling == IGNORE_MARKS &&
2302 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2304 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2308 if (!heap->InNewSpace(first)) {
2309 object->set_map_word(MapWord::FromForwardingAddress(first));
2313 MapWord first_word = first->map_word();
2314 if (first_word.IsForwardingAddress()) {
2315 HeapObject* target = first_word.ToForwardingAddress();
2318 object->set_map_word(MapWord::FromForwardingAddress(target));
2322 heap->DoScavengeObject(first->map(), slot, first);
2323 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2327 int object_size = ConsString::kSize;
2328 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2332 template <ObjectContents object_contents>
2333 class ObjectEvacuationStrategy {
2335 template <int object_size>
2336 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2337 HeapObject* object) {
2338 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2342 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2343 int object_size = map->instance_size();
2344 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2349 static VisitorDispatchTable<ScavengingCallback> table_;
2353 template <MarksHandling marks_handling,
2354 LoggingAndProfiling logging_and_profiling_mode>
2355 VisitorDispatchTable<ScavengingCallback>
2356 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2359 static void InitializeScavengingVisitorsTables() {
2360 ScavengingVisitor<TRANSFER_MARKS,
2361 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2362 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2363 ScavengingVisitor<TRANSFER_MARKS,
2364 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2365 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2369 void Heap::SelectScavengingVisitorsTable() {
2370 bool logging_and_profiling =
2371 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2372 isolate()->cpu_profiler()->is_profiling() ||
2373 (isolate()->heap_profiler() != NULL &&
2374 isolate()->heap_profiler()->is_tracking_object_moves());
2376 if (!incremental_marking()->IsMarking()) {
2377 if (!logging_and_profiling) {
2378 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2379 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2381 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2382 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2385 if (!logging_and_profiling) {
2386 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2387 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2389 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2390 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2393 if (incremental_marking()->IsCompacting()) {
2394 // When compacting forbid short-circuiting of cons-strings.
2395 // Scavenging code relies on the fact that new space object
2396 // can't be evacuated into evacuation candidate but
2397 // short-circuiting violates this assumption.
2398 scavenging_visitors_table_.Register(
2399 StaticVisitorBase::kVisitShortcutCandidate,
2400 scavenging_visitors_table_.GetVisitorById(
2401 StaticVisitorBase::kVisitConsString));
2407 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2408 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2409 MapWord first_word = object->map_word();
2410 SLOW_DCHECK(!first_word.IsForwardingAddress());
2411 Map* map = first_word.ToMap();
2412 map->GetHeap()->DoScavengeObject(map, p, object);
2416 void Heap::ConfigureInitialOldGenerationSize() {
2417 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2418 old_generation_allocation_limit_ =
2419 Max(kMinimumOldGenerationAllocationLimit,
2420 static_cast<intptr_t>(
2421 static_cast<double>(old_generation_allocation_limit_) *
2422 (tracer()->AverageSurvivalRatio() / 100)));
2427 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2428 int instance_size) {
2430 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2431 if (!allocation.To(&result)) return allocation;
2433 // Map::cast cannot be used due to uninitialized map field.
2434 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2435 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2436 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2437 // Initialize to only containing tagged fields.
2438 reinterpret_cast<Map*>(result)->set_visitor_id(
2439 StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2440 if (FLAG_unbox_double_fields) {
2441 reinterpret_cast<Map*>(result)
2442 ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2444 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2445 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2446 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2447 reinterpret_cast<Map*>(result)->set_bit_field(0);
2448 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2449 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2450 Map::OwnsDescriptors::encode(true) |
2451 Map::Counter::encode(Map::kRetainingCounterStart);
2452 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2457 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2459 ElementsKind elements_kind) {
2461 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2462 if (!allocation.To(&result)) return allocation;
2464 result->set_map_no_write_barrier(meta_map());
2465 Map* map = Map::cast(result);
2466 map->set_instance_type(instance_type);
2467 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2468 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2469 map->set_instance_size(instance_size);
2470 map->set_inobject_properties(0);
2471 map->set_pre_allocated_property_fields(0);
2472 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2473 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2474 SKIP_WRITE_BARRIER);
2475 map->init_back_pointer(undefined_value());
2476 map->set_unused_property_fields(0);
2477 map->set_instance_descriptors(empty_descriptor_array());
2478 if (FLAG_unbox_double_fields) {
2479 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2481 // Must be called only after |instance_type|, |instance_size| and
2482 // |layout_descriptor| are set.
2483 map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
2484 map->set_bit_field(0);
2485 map->set_bit_field2(1 << Map::kIsExtensible);
2486 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2487 Map::OwnsDescriptors::encode(true) |
2488 Map::Counter::encode(Map::kRetainingCounterStart);
2489 map->set_bit_field3(bit_field3);
2490 map->set_elements_kind(elements_kind);
2496 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2497 AllocationSpace space) {
2500 AllocationResult allocation = AllocateRaw(size, space, space);
2501 if (!allocation.To(&obj)) return allocation;
2504 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2505 DCHECK(chunk->owner()->identity() == space);
2507 CreateFillerObjectAt(obj->address(), size);
2512 const Heap::StringTypeTable Heap::string_type_table[] = {
2513 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2514 { type, size, k##camel_name##MapRootIndex } \
2516 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2517 #undef STRING_TYPE_ELEMENT
2521 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2522 #define CONSTANT_STRING_ELEMENT(name, contents) \
2523 { contents, k##name##RootIndex } \
2525 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2526 #undef CONSTANT_STRING_ELEMENT
2530 const Heap::StructTable Heap::struct_table[] = {
2531 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2532 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2534 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2535 #undef STRUCT_TABLE_ELEMENT
2539 bool Heap::CreateInitialMaps() {
2542 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2543 if (!allocation.To(&obj)) return false;
2545 // Map::cast cannot be used due to uninitialized map field.
2546 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2547 set_meta_map(new_meta_map);
2548 new_meta_map->set_map(new_meta_map);
2550 { // Partial map allocation
2551 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2554 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2555 set_##field_name##_map(map); \
2558 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2559 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2560 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2561 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2562 constant_pool_array);
2564 #undef ALLOCATE_PARTIAL_MAP
2567 // Allocate the empty array.
2569 AllocationResult allocation = AllocateEmptyFixedArray();
2570 if (!allocation.To(&obj)) return false;
2572 set_empty_fixed_array(FixedArray::cast(obj));
2575 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2576 if (!allocation.To(&obj)) return false;
2578 set_null_value(Oddball::cast(obj));
2579 Oddball::cast(obj)->set_kind(Oddball::kNull);
2582 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2583 if (!allocation.To(&obj)) return false;
2585 set_undefined_value(Oddball::cast(obj));
2586 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2587 DCHECK(!InNewSpace(undefined_value()));
2589 // Set preliminary exception sentinel value before actually initializing it.
2590 set_exception(null_value());
2592 // Allocate the empty descriptor array.
2594 AllocationResult allocation = AllocateEmptyFixedArray();
2595 if (!allocation.To(&obj)) return false;
2597 set_empty_descriptor_array(DescriptorArray::cast(obj));
2599 // Allocate the constant pool array.
2601 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2602 if (!allocation.To(&obj)) return false;
2604 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2606 // Fix the instance_descriptors for the existing maps.
2607 meta_map()->set_code_cache(empty_fixed_array());
2608 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2609 meta_map()->init_back_pointer(undefined_value());
2610 meta_map()->set_instance_descriptors(empty_descriptor_array());
2611 if (FLAG_unbox_double_fields) {
2612 meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2615 fixed_array_map()->set_code_cache(empty_fixed_array());
2616 fixed_array_map()->set_dependent_code(
2617 DependentCode::cast(empty_fixed_array()));
2618 fixed_array_map()->init_back_pointer(undefined_value());
2619 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2620 if (FLAG_unbox_double_fields) {
2621 fixed_array_map()->set_layout_descriptor(
2622 LayoutDescriptor::FastPointerLayout());
2625 undefined_map()->set_code_cache(empty_fixed_array());
2626 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2627 undefined_map()->init_back_pointer(undefined_value());
2628 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2629 if (FLAG_unbox_double_fields) {
2630 undefined_map()->set_layout_descriptor(
2631 LayoutDescriptor::FastPointerLayout());
2634 null_map()->set_code_cache(empty_fixed_array());
2635 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2636 null_map()->init_back_pointer(undefined_value());
2637 null_map()->set_instance_descriptors(empty_descriptor_array());
2638 if (FLAG_unbox_double_fields) {
2639 null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2642 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2643 constant_pool_array_map()->set_dependent_code(
2644 DependentCode::cast(empty_fixed_array()));
2645 constant_pool_array_map()->init_back_pointer(undefined_value());
2646 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2647 if (FLAG_unbox_double_fields) {
2648 constant_pool_array_map()->set_layout_descriptor(
2649 LayoutDescriptor::FastPointerLayout());
2652 // Fix prototype object for existing maps.
2653 meta_map()->set_prototype(null_value());
2654 meta_map()->set_constructor(null_value());
2656 fixed_array_map()->set_prototype(null_value());
2657 fixed_array_map()->set_constructor(null_value());
2659 undefined_map()->set_prototype(null_value());
2660 undefined_map()->set_constructor(null_value());
2662 null_map()->set_prototype(null_value());
2663 null_map()->set_constructor(null_value());
2665 constant_pool_array_map()->set_prototype(null_value());
2666 constant_pool_array_map()->set_constructor(null_value());
2669 #define ALLOCATE_MAP(instance_type, size, field_name) \
2672 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2673 set_##field_name##_map(map); \
2676 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2677 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2679 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2680 DCHECK(fixed_array_map() != fixed_cow_array_map());
2682 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2683 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2684 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2685 mutable_heap_number)
2686 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2687 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2689 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2690 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2691 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2692 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2693 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2694 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2695 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2697 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2698 const StringTypeTable& entry = string_type_table[i];
2700 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2701 if (!allocation.To(&obj)) return false;
2703 // Mark cons string maps as unstable, because their objects can change
2705 Map* map = Map::cast(obj);
2706 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2707 roots_[entry.index] = map;
2710 { // Create a separate external one byte string map for native sources.
2711 AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
2712 ExternalOneByteString::kSize);
2713 if (!allocation.To(&obj)) return false;
2714 set_native_source_string_map(Map::cast(obj));
2717 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2718 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2719 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2721 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2722 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2723 external_##type##_array)
2725 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2726 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2728 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2729 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2731 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2732 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2734 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2736 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2738 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2739 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2740 ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2741 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2742 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2745 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2746 const StructTable& entry = struct_table[i];
2748 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2749 roots_[entry.index] = map;
2752 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2753 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2755 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2756 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2757 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2758 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2759 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2760 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2761 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2763 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2764 native_context_map()->set_dictionary_map(true);
2765 native_context_map()->set_visitor_id(
2766 StaticVisitorBase::kVisitNativeContext);
2768 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2769 shared_function_info)
2771 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2772 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2773 external_map()->set_is_extensible(false);
2774 #undef ALLOCATE_VARSIZE_MAP
2780 ByteArray* byte_array;
2781 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2782 set_empty_byte_array(byte_array);
2785 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2787 ExternalArray* obj; \
2788 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2790 set_empty_external_##type##_array(obj); \
2793 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2794 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2796 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2798 FixedTypedArrayBase* obj; \
2799 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2801 set_empty_fixed_##type##_array(obj); \
2804 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2805 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2807 DCHECK(!InNewSpace(empty_fixed_array()));
2812 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2813 PretenureFlag pretenure) {
2814 // Statically ensure that it is safe to allocate heap numbers in paged
2816 int size = HeapNumber::kSize;
2817 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2819 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2823 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2824 if (!allocation.To(&result)) return allocation;
2827 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2828 HeapObject::cast(result)->set_map_no_write_barrier(map);
2829 HeapNumber::cast(result)->set_value(value);
2834 AllocationResult Heap::AllocateCell(Object* value) {
2835 int size = Cell::kSize;
2836 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2840 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2841 if (!allocation.To(&result)) return allocation;
2843 result->set_map_no_write_barrier(cell_map());
2844 Cell::cast(result)->set_value(value);
2849 AllocationResult Heap::AllocatePropertyCell() {
2850 int size = PropertyCell::kSize;
2851 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2854 AllocationResult allocation =
2855 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2856 if (!allocation.To(&result)) return allocation;
2858 result->set_map_no_write_barrier(global_property_cell_map());
2859 PropertyCell* cell = PropertyCell::cast(result);
2860 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2861 SKIP_WRITE_BARRIER);
2862 cell->set_value(the_hole_value());
2863 cell->set_type(HeapType::None());
2868 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2869 int size = WeakCell::kSize;
2870 STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
2873 AllocationResult allocation =
2874 AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
2875 if (!allocation.To(&result)) return allocation;
2877 result->set_map_no_write_barrier(weak_cell_map());
2878 WeakCell::cast(result)->initialize(value);
2879 WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
2884 void Heap::CreateApiObjects() {
2885 HandleScope scope(isolate());
2886 Factory* factory = isolate()->factory();
2887 Handle<Map> new_neander_map =
2888 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2890 // Don't use Smi-only elements optimizations for objects with the neander
2891 // map. There are too many cases where element values are set directly with a
2892 // bottleneck to trap the Smi-only -> fast elements transition, and there
2893 // appears to be no benefit for optimize this case.
2894 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2895 set_neander_map(*new_neander_map);
2897 Handle<JSObject> listeners = factory->NewNeanderObject();
2898 Handle<FixedArray> elements = factory->NewFixedArray(2);
2899 elements->set(0, Smi::FromInt(0));
2900 listeners->set_elements(*elements);
2901 set_message_listeners(*listeners);
2905 void Heap::CreateJSEntryStub() {
2906 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2907 set_js_entry_code(*stub.GetCode());
2911 void Heap::CreateJSConstructEntryStub() {
2912 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2913 set_js_construct_entry_code(*stub.GetCode());
2917 void Heap::CreateFixedStubs() {
2918 // Here we create roots for fixed stubs. They are needed at GC
2919 // for cooking and uncooking (check out frames.cc).
2920 // The eliminates the need for doing dictionary lookup in the
2921 // stub cache for these stubs.
2922 HandleScope scope(isolate());
2924 // Create stubs that should be there, so we don't unexpectedly have to
2925 // create them if we need them during the creation of another stub.
2926 // Stub creation mixes raw pointers and handles in an unsafe manner so
2927 // we cannot create stubs while we are creating stubs.
2928 CodeStub::GenerateStubsAheadOfTime(isolate());
2930 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2931 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2934 // gcc-4.4 has problem generating correct code of following snippet:
2935 // { JSEntryStub stub;
2936 // js_entry_code_ = *stub.GetCode();
2938 // { JSConstructEntryStub stub;
2939 // js_construct_entry_code_ = *stub.GetCode();
2941 // To workaround the problem, make separate functions without inlining.
2942 Heap::CreateJSEntryStub();
2943 Heap::CreateJSConstructEntryStub();
2947 void Heap::CreateInitialObjects() {
2948 HandleScope scope(isolate());
2949 Factory* factory = isolate()->factory();
2951 // The -0 value must be set before NewNumber works.
2952 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2953 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2955 set_nan_value(*factory->NewHeapNumber(
2956 std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2957 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2959 // The hole has not been created yet, but we want to put something
2960 // predictable in the gaps in the string table, so lets make that Smi zero.
2961 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2963 // Allocate initial string table.
2964 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2966 // Finish initializing oddballs after creating the string table.
2967 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2968 factory->nan_value(), Oddball::kUndefined);
2970 // Initialize the null_value.
2971 Oddball::Initialize(isolate(), factory->null_value(), "null",
2972 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2974 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2975 handle(Smi::FromInt(1), isolate()),
2978 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2979 handle(Smi::FromInt(0), isolate()),
2982 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2983 handle(Smi::FromInt(-1), isolate()),
2984 Oddball::kTheHole));
2986 set_uninitialized_value(*factory->NewOddball(
2987 factory->uninitialized_map(), "uninitialized",
2988 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2990 set_arguments_marker(*factory->NewOddball(
2991 factory->arguments_marker_map(), "arguments_marker",
2992 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2994 set_no_interceptor_result_sentinel(*factory->NewOddball(
2995 factory->no_interceptor_result_sentinel_map(),
2996 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2999 set_termination_exception(*factory->NewOddball(
3000 factory->termination_exception_map(), "termination_exception",
3001 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
3003 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
3004 handle(Smi::FromInt(-5), isolate()),
3005 Oddball::kException));
3007 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
3008 Handle<String> str =
3009 factory->InternalizeUtf8String(constant_string_table[i].contents);
3010 roots_[constant_string_table[i].index] = *str;
3013 // Allocate the hidden string which is used to identify the hidden properties
3014 // in JSObjects. The hash code has a special value so that it will not match
3015 // the empty string when searching for the property. It cannot be part of the
3016 // loop above because it needs to be allocated manually with the special
3017 // hash code in place. The hash code for the hidden_string is zero to ensure
3018 // that it will always be at the first entry in property descriptors.
3019 hidden_string_ = *factory->NewOneByteInternalizedString(
3020 OneByteVector("", 0), String::kEmptyStringHash);
3022 // Create the code_stubs dictionary. The initial size is set to avoid
3023 // expanding the dictionary during bootstrapping.
3024 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
3026 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
3027 // is set to avoid expanding the dictionary during bootstrapping.
3028 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
3030 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
3031 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
3033 set_instanceof_cache_function(Smi::FromInt(0));
3034 set_instanceof_cache_map(Smi::FromInt(0));
3035 set_instanceof_cache_answer(Smi::FromInt(0));
3038 HandleScope scope(isolate());
3039 #define SYMBOL_INIT(name) \
3040 Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
3041 roots_[k##name##RootIndex] = *name;
3042 PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
3047 HandleScope scope(isolate());
3048 #define SYMBOL_INIT(name, varname, description) \
3049 Handle<Symbol> name = factory->NewSymbol(); \
3050 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
3051 name->set_name(*name##d); \
3052 roots_[k##name##RootIndex] = *name;
3053 PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
3059 // Allocate the dictionary of intrinsic function names.
3060 Handle<NameDictionary> intrinsic_names =
3061 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
3062 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
3063 set_intrinsic_function_names(*intrinsic_names);
3065 set_number_string_cache(
3066 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
3068 // Allocate cache for single character one byte strings.
3069 set_single_character_string_cache(
3070 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
3072 // Allocate cache for string split and regexp-multiple.
3073 set_string_split_cache(*factory->NewFixedArray(
3074 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3075 set_regexp_multiple_cache(*factory->NewFixedArray(
3076 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3078 // Allocate cache for external strings pointing to native source code.
3079 set_natives_source_cache(
3080 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
3082 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
3084 // The symbol registry is initialized lazily.
3085 set_symbol_registry(Smi::FromInt(0));
3087 // Allocate object to hold object observation state.
3088 set_observation_state(*factory->NewJSObjectFromMap(
3089 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
3091 // Microtask queue uses the empty fixed array as a sentinel for "empty".
3092 // Number of queued microtasks stored in Isolate::pending_microtask_count().
3093 set_microtask_queue(empty_fixed_array());
3095 if (FLAG_vector_ics) {
3096 FeedbackVectorSpec spec(0, 1);
3097 spec.SetKind(0, Code::KEYED_LOAD_IC);
3098 Handle<TypeFeedbackVector> dummy_vector =
3099 factory->NewTypeFeedbackVector(spec);
3100 dummy_vector->Set(FeedbackVectorICSlot(0),
3101 *TypeFeedbackVector::MegamorphicSentinel(isolate()),
3102 SKIP_WRITE_BARRIER);
3103 set_keyed_load_dummy_vector(*dummy_vector);
3105 set_keyed_load_dummy_vector(empty_fixed_array());
3108 set_detached_contexts(empty_fixed_array());
3110 set_weak_object_to_code_table(
3111 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
3114 Handle<SeededNumberDictionary> slow_element_dictionary =
3115 SeededNumberDictionary::New(isolate(), 0, TENURED);
3116 slow_element_dictionary->set_requires_slow_elements();
3117 set_empty_slow_element_dictionary(*slow_element_dictionary);
3119 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
3121 // Handling of script id generation is in Factory::NewScript.
3122 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
3124 set_allocation_sites_scratchpad(
3125 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
3126 InitializeAllocationSitesScratchpad();
3128 // Initialize keyed lookup cache.
3129 isolate_->keyed_lookup_cache()->Clear();
3131 // Initialize context slot cache.
3132 isolate_->context_slot_cache()->Clear();
3134 // Initialize descriptor cache.
3135 isolate_->descriptor_lookup_cache()->Clear();
3137 // Initialize compilation cache.
3138 isolate_->compilation_cache()->Clear();
3142 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3143 RootListIndex writable_roots[] = {
3144 kStoreBufferTopRootIndex,
3145 kStackLimitRootIndex,
3146 kNumberStringCacheRootIndex,
3147 kInstanceofCacheFunctionRootIndex,
3148 kInstanceofCacheMapRootIndex,
3149 kInstanceofCacheAnswerRootIndex,
3150 kCodeStubsRootIndex,
3151 kNonMonomorphicCacheRootIndex,
3152 kPolymorphicCodeCacheRootIndex,
3153 kLastScriptIdRootIndex,
3154 kEmptyScriptRootIndex,
3155 kRealStackLimitRootIndex,
3156 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3157 kConstructStubDeoptPCOffsetRootIndex,
3158 kGetterStubDeoptPCOffsetRootIndex,
3159 kSetterStubDeoptPCOffsetRootIndex,
3160 kStringTableRootIndex,
3163 for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
3164 if (root_index == writable_roots[i]) return true;
3170 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3171 return !RootCanBeWrittenAfterInitialization(root_index) &&
3172 !InNewSpace(roots_array_start()[root_index]);
3176 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
3177 Object* key_pattern, ResultsCacheType type) {
3179 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3180 if (type == STRING_SPLIT_SUBSTRINGS) {
3181 DCHECK(key_pattern->IsString());
3182 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3183 cache = heap->string_split_cache();
3185 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3186 DCHECK(key_pattern->IsFixedArray());
3187 cache = heap->regexp_multiple_cache();
3190 uint32_t hash = key_string->Hash();
3191 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3192 ~(kArrayEntriesPerCacheEntry - 1));
3193 if (cache->get(index + kStringOffset) == key_string &&
3194 cache->get(index + kPatternOffset) == key_pattern) {
3195 return cache->get(index + kArrayOffset);
3198 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3199 if (cache->get(index + kStringOffset) == key_string &&
3200 cache->get(index + kPatternOffset) == key_pattern) {
3201 return cache->get(index + kArrayOffset);
3203 return Smi::FromInt(0);
3207 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3208 Handle<Object> key_pattern,
3209 Handle<FixedArray> value_array,
3210 ResultsCacheType type) {
3211 Factory* factory = isolate->factory();
3212 Handle<FixedArray> cache;
3213 if (!key_string->IsInternalizedString()) return;
3214 if (type == STRING_SPLIT_SUBSTRINGS) {
3215 DCHECK(key_pattern->IsString());
3216 if (!key_pattern->IsInternalizedString()) return;
3217 cache = factory->string_split_cache();
3219 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3220 DCHECK(key_pattern->IsFixedArray());
3221 cache = factory->regexp_multiple_cache();
3224 uint32_t hash = key_string->Hash();
3225 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3226 ~(kArrayEntriesPerCacheEntry - 1));
3227 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3228 cache->set(index + kStringOffset, *key_string);
3229 cache->set(index + kPatternOffset, *key_pattern);
3230 cache->set(index + kArrayOffset, *value_array);
3233 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3234 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3235 cache->set(index2 + kStringOffset, *key_string);
3236 cache->set(index2 + kPatternOffset, *key_pattern);
3237 cache->set(index2 + kArrayOffset, *value_array);
3239 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3240 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3241 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3242 cache->set(index + kStringOffset, *key_string);
3243 cache->set(index + kPatternOffset, *key_pattern);
3244 cache->set(index + kArrayOffset, *value_array);
3247 // If the array is a reasonably short list of substrings, convert it into a
3248 // list of internalized strings.
3249 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3250 for (int i = 0; i < value_array->length(); i++) {
3251 Handle<String> str(String::cast(value_array->get(i)), isolate);
3252 Handle<String> internalized_str = factory->InternalizeString(str);
3253 value_array->set(i, *internalized_str);
3256 // Convert backing store to a copy-on-write array.
3257 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3261 void RegExpResultsCache::Clear(FixedArray* cache) {
3262 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3263 cache->set(i, Smi::FromInt(0));
3268 int Heap::FullSizeNumberStringCacheLength() {
3269 // Compute the size of the number string cache based on the max newspace size.
3270 // The number string cache has a minimum size based on twice the initial cache
3271 // size to ensure that it is bigger after being made 'full size'.
3272 int number_string_cache_size = max_semi_space_size_ / 512;
3273 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3274 Min(0x4000, number_string_cache_size));
3275 // There is a string and a number per entry so the length is twice the number
3277 return number_string_cache_size * 2;
3281 void Heap::FlushNumberStringCache() {
3282 // Flush the number to string cache.
3283 int len = number_string_cache()->length();
3284 for (int i = 0; i < len; i++) {
3285 number_string_cache()->set_undefined(i);
3290 void Heap::FlushAllocationSitesScratchpad() {
3291 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3292 allocation_sites_scratchpad()->set_undefined(i);
3294 allocation_sites_scratchpad_length_ = 0;
3298 void Heap::InitializeAllocationSitesScratchpad() {
3299 DCHECK(allocation_sites_scratchpad()->length() ==
3300 kAllocationSiteScratchpadSize);
3301 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3302 allocation_sites_scratchpad()->set_undefined(i);
3307 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3308 ScratchpadSlotMode mode) {
3309 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3310 // We cannot use the normal write-barrier because slots need to be
3311 // recorded with non-incremental marking as well. We have to explicitly
3312 // record the slot to take evacuation candidates into account.
3313 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3314 site, SKIP_WRITE_BARRIER);
3315 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3316 allocation_sites_scratchpad_length_);
3318 if (mode == RECORD_SCRATCHPAD_SLOT) {
3319 // We need to allow slots buffer overflow here since the evacuation
3320 // candidates are not part of the global list of old space pages and
3321 // releasing an evacuation candidate due to a slots buffer overflow
3322 // results in lost pages.
3323 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3324 SlotsBuffer::IGNORE_OVERFLOW);
3326 allocation_sites_scratchpad_length_++;
3331 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3332 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3336 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3337 ExternalArrayType array_type) {
3338 switch (array_type) {
3339 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3340 case kExternal##Type##Array: \
3341 return kExternal##Type##ArrayMapRootIndex;
3343 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3344 #undef ARRAY_TYPE_TO_ROOT_INDEX
3348 return kUndefinedValueRootIndex;
3353 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3354 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3358 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3359 ExternalArrayType array_type) {
3360 switch (array_type) {
3361 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3362 case kExternal##Type##Array: \
3363 return kFixed##Type##ArrayMapRootIndex;
3365 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3366 #undef ARRAY_TYPE_TO_ROOT_INDEX
3370 return kUndefinedValueRootIndex;
3375 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3376 ElementsKind elementsKind) {
3377 switch (elementsKind) {
3378 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3379 case EXTERNAL_##TYPE##_ELEMENTS: \
3380 return kEmptyExternal##Type##ArrayRootIndex;
3382 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3383 #undef ELEMENT_KIND_TO_ROOT_INDEX
3387 return kUndefinedValueRootIndex;
3392 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3393 ElementsKind elementsKind) {
3394 switch (elementsKind) {
3395 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3396 case TYPE##_ELEMENTS: \
3397 return kEmptyFixed##Type##ArrayRootIndex;
3399 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3400 #undef ELEMENT_KIND_TO_ROOT_INDEX
3403 return kUndefinedValueRootIndex;
3408 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3409 return ExternalArray::cast(
3410 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3414 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3415 return FixedTypedArrayBase::cast(
3416 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3420 AllocationResult Heap::AllocateForeign(Address address,
3421 PretenureFlag pretenure) {
3422 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3423 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3424 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3426 AllocationResult allocation = Allocate(foreign_map(), space);
3427 if (!allocation.To(&result)) return allocation;
3428 result->set_foreign_address(address);
3433 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3434 if (length < 0 || length > ByteArray::kMaxLength) {
3435 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3437 int size = ByteArray::SizeFor(length);
3438 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3441 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3442 if (!allocation.To(&result)) return allocation;
3445 result->set_map_no_write_barrier(byte_array_map());
3446 ByteArray::cast(result)->set_length(length);
3451 void Heap::CreateFillerObjectAt(Address addr, int size) {
3452 if (size == 0) return;
3453 HeapObject* filler = HeapObject::FromAddress(addr);
3454 // At this point, we may be deserializing the heap from a snapshot, and
3455 // none of the maps have been created yet and are NULL.
3456 if (size == kPointerSize) {
3457 filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
3458 DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
3459 } else if (size == 2 * kPointerSize) {
3460 filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
3461 DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
3463 filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
3464 DCHECK(filler->map() == NULL || filler->map() == free_space_map());
3465 FreeSpace::cast(filler)->nobarrier_set_size(size);
3470 bool Heap::CanMoveObjectStart(HeapObject* object) {
3471 Address address = object->address();
3472 bool is_in_old_pointer_space = InOldPointerSpace(address);
3473 bool is_in_old_data_space = InOldDataSpace(address);
3475 if (lo_space()->Contains(object)) return false;
3477 Page* page = Page::FromAddress(address);
3478 // We can move the object start if:
3479 // (1) the object is not in old pointer or old data space,
3480 // (2) the page of the object was already swept,
3481 // (3) the page was already concurrently swept. This case is an optimization
3482 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3483 // pages is set after sweeping all pages.
3484 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3485 page->WasSwept() || page->SweepingCompleted();
3489 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3490 if (incremental_marking()->IsMarking() &&
3491 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3492 if (mode == FROM_GC) {
3493 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3495 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3501 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3502 int elements_to_trim) {
3503 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3504 const int bytes_to_trim = elements_to_trim * element_size;
3505 Map* map = object->map();
3507 // For now this trick is only applied to objects in new and paged space.
3508 // In large object space the object's start must coincide with chunk
3509 // and thus the trick is just not applicable.
3510 DCHECK(!lo_space()->Contains(object));
3511 DCHECK(object->map() != fixed_cow_array_map());
3513 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3514 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3515 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3517 const int len = object->length();
3518 DCHECK(elements_to_trim <= len);
3520 // Calculate location of new array start.
3521 Address new_start = object->address() + bytes_to_trim;
3523 // Technically in new space this write might be omitted (except for
3524 // debug mode which iterates through the heap), but to play safer
3526 CreateFillerObjectAt(object->address(), bytes_to_trim);
3528 // Initialize header of the trimmed array. Since left trimming is only
3529 // performed on pages which are not concurrently swept creating a filler
3530 // object does not require synchronization.
3531 DCHECK(CanMoveObjectStart(object));
3532 Object** former_start = HeapObject::RawField(object, 0);
3533 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3534 former_start[new_start_index] = map;
3535 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3536 FixedArrayBase* new_object =
3537 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3539 // Maintain consistency of live bytes during incremental marking
3540 marking()->TransferMark(object->address(), new_start);
3541 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3543 // Notify the heap profiler of change in object layout.
3544 OnMoveEvent(new_object, object, new_object->Size());
3549 // Force instantiation of templatized method.
3551 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3553 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3556 template<Heap::InvocationMode mode>
3557 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3558 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3559 const int bytes_to_trim = elements_to_trim * element_size;
3561 // For now this trick is only applied to objects in new and paged space.
3562 DCHECK(object->map() != fixed_cow_array_map());
3564 const int len = object->length();
3565 DCHECK(elements_to_trim < len);
3567 // Calculate location of new array end.
3568 Address new_end = object->address() + object->Size() - bytes_to_trim;
3570 // Technically in new space this write might be omitted (except for
3571 // debug mode which iterates through the heap), but to play safer
3573 // We do not create a filler for objects in large object space.
3574 // TODO(hpayer): We should shrink the large object page if the size
3575 // of the object changed significantly.
3576 if (!lo_space()->Contains(object)) {
3577 CreateFillerObjectAt(new_end, bytes_to_trim);
3580 // Initialize header of the trimmed array. We are storing the new length
3581 // using release store after creating a filler for the left-over space to
3582 // avoid races with the sweeper thread.
3583 object->synchronized_set_length(len - elements_to_trim);
3585 // Maintain consistency of live bytes during incremental marking
3586 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3588 // Notify the heap profiler of change in object layout. The array may not be
3589 // moved during GC, and size has to be adjusted nevertheless.
3590 HeapProfiler* profiler = isolate()->heap_profiler();
3591 if (profiler->is_tracking_allocations()) {
3592 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3597 AllocationResult Heap::AllocateExternalArray(int length,
3598 ExternalArrayType array_type,
3599 void* external_pointer,
3600 PretenureFlag pretenure) {
3601 int size = ExternalArray::kAlignedSize;
3602 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3605 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3606 if (!allocation.To(&result)) return allocation;
3609 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3610 ExternalArray::cast(result)->set_length(length);
3611 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3615 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3616 ElementsKind* element_kind) {
3617 switch (array_type) {
3618 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3619 case kExternal##Type##Array: \
3620 *element_size = size; \
3621 *element_kind = TYPE##_ELEMENTS; \
3624 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3625 #undef TYPED_ARRAY_CASE
3628 *element_size = 0; // Bogus
3629 *element_kind = UINT8_ELEMENTS; // Bogus
3635 AllocationResult Heap::AllocateFixedTypedArray(int length,
3636 ExternalArrayType array_type,
3637 PretenureFlag pretenure) {
3639 ElementsKind elements_kind;
3640 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3641 int size = OBJECT_POINTER_ALIGN(length * element_size +
3642 FixedTypedArrayBase::kDataOffset);
3643 #ifndef V8_HOST_ARCH_64_BIT
3644 if (array_type == kExternalFloat64Array) {
3645 size += kPointerSize;
3648 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3651 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3652 if (!allocation.To(&object)) return allocation;
3654 if (array_type == kExternalFloat64Array) {
3655 object = EnsureDoubleAligned(this, object, size);
3658 object->set_map(MapForFixedTypedArray(array_type));
3659 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3660 elements->set_length(length);
3661 memset(elements->DataPtr(), 0, elements->DataSize());
3666 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3667 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3668 AllocationResult allocation =
3669 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3672 if (!allocation.To(&result)) return allocation;
3675 Address address = result->address();
3676 // Code objects which should stay at a fixed address are allocated either
3677 // in the first page of code space (objects on the first page of each space
3678 // are never moved) or in large object space.
3679 if (!code_space_->FirstPage()->Contains(address) &&
3680 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3681 // Discard the first code allocation, which was on a page where it could
3683 CreateFillerObjectAt(result->address(), object_size);
3684 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3685 if (!allocation.To(&result)) return allocation;
3686 OnAllocationEvent(result, object_size);
3690 result->set_map_no_write_barrier(code_map());
3691 Code* code = Code::cast(result);
3692 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3693 isolate_->code_range()->contains(code->address()));
3694 code->set_gc_metadata(Smi::FromInt(0));
3695 code->set_ic_age(global_ic_age_);
3700 AllocationResult Heap::CopyCode(Code* code) {
3701 AllocationResult allocation;
3702 HeapObject* new_constant_pool;
3703 if (FLAG_enable_ool_constant_pool &&
3704 code->constant_pool() != empty_constant_pool_array()) {
3705 // Copy the constant pool, since edits to the copied code may modify
3706 // the constant pool.
3707 allocation = CopyConstantPoolArray(code->constant_pool());
3708 if (!allocation.To(&new_constant_pool)) return allocation;
3710 new_constant_pool = empty_constant_pool_array();
3714 // Allocate an object the same size as the code object.
3715 int obj_size = code->Size();
3716 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3717 if (!allocation.To(&result)) return allocation;
3719 // Copy code object.
3720 Address old_addr = code->address();
3721 Address new_addr = result->address();
3722 CopyBlock(new_addr, old_addr, obj_size);
3723 Code* new_code = Code::cast(result);
3725 // Update the constant pool.
3726 new_code->set_constant_pool(new_constant_pool);
3728 // Relocate the copy.
3729 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3730 isolate_->code_range()->contains(code->address()));
3731 new_code->Relocate(new_addr - old_addr);
3736 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3737 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3738 // do not risk leaving uninitialized Code object (and breaking the heap).
3739 ByteArray* reloc_info_array;
3741 AllocationResult allocation =
3742 AllocateByteArray(reloc_info.length(), TENURED);
3743 if (!allocation.To(&reloc_info_array)) return allocation;
3745 HeapObject* new_constant_pool;
3746 if (FLAG_enable_ool_constant_pool &&
3747 code->constant_pool() != empty_constant_pool_array()) {
3748 // Copy the constant pool, since edits to the copied code may modify
3749 // the constant pool.
3750 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3751 if (!allocation.To(&new_constant_pool)) return allocation;
3753 new_constant_pool = empty_constant_pool_array();
3756 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3758 int new_obj_size = Code::SizeFor(new_body_size);
3760 Address old_addr = code->address();
3762 size_t relocation_offset =
3763 static_cast<size_t>(code->instruction_end() - old_addr);
3766 AllocationResult allocation =
3767 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3768 if (!allocation.To(&result)) return allocation;
3770 // Copy code object.
3771 Address new_addr = result->address();
3773 // Copy header and instructions.
3774 CopyBytes(new_addr, old_addr, relocation_offset);
3776 Code* new_code = Code::cast(result);
3777 new_code->set_relocation_info(reloc_info_array);
3779 // Update constant pool.
3780 new_code->set_constant_pool(new_constant_pool);
3782 // Copy patched rinfo.
3783 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3784 static_cast<size_t>(reloc_info.length()));
3786 // Relocate the copy.
3787 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3788 isolate_->code_range()->contains(code->address()));
3789 new_code->Relocate(new_addr - old_addr);
3792 if (FLAG_verify_heap) code->ObjectVerify();
3798 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3799 AllocationSite* allocation_site) {
3800 memento->set_map_no_write_barrier(allocation_memento_map());
3801 DCHECK(allocation_site->map() == allocation_site_map());
3802 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3803 if (FLAG_allocation_site_pretenuring) {
3804 allocation_site->IncrementMementoCreateCount();
3809 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3810 AllocationSite* allocation_site) {
3811 DCHECK(gc_state_ == NOT_IN_GC);
3812 DCHECK(map->instance_type() != MAP_TYPE);
3813 // If allocation failures are disallowed, we may allocate in a different
3814 // space when new space is full and the object is not a large object.
3815 AllocationSpace retry_space =
3816 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3817 int size = map->instance_size();
3818 if (allocation_site != NULL) {
3819 size += AllocationMemento::kSize;
3822 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3823 if (!allocation.To(&result)) return allocation;
3824 // No need for write barrier since object is white and map is in old space.
3825 result->set_map_no_write_barrier(map);
3826 if (allocation_site != NULL) {
3827 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3828 reinterpret_cast<Address>(result) + map->instance_size());
3829 InitializeAllocationMemento(alloc_memento, allocation_site);
3835 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3837 obj->set_properties(properties);
3838 obj->initialize_elements();
3839 // TODO(1240798): Initialize the object's body using valid initial values
3840 // according to the object's initial map. For example, if the map's
3841 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3842 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3843 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3844 // verification code has to cope with (temporarily) invalid objects. See
3845 // for example, JSArray::JSArrayVerify).
3847 // We cannot always fill with one_pointer_filler_map because objects
3848 // created from API functions expect their internal fields to be initialized
3849 // with undefined_value.
3850 // Pre-allocated fields need to be initialized with undefined_value as well
3851 // so that object accesses before the constructor completes (e.g. in the
3852 // debugger) will not cause a crash.
3853 if (map->constructor()->IsJSFunction() &&
3854 JSFunction::cast(map->constructor())
3855 ->IsInobjectSlackTrackingInProgress()) {
3856 // We might want to shrink the object later.
3857 DCHECK(obj->GetInternalFieldCount() == 0);
3858 filler = Heap::one_pointer_filler_map();
3860 filler = Heap::undefined_value();
3862 obj->InitializeBody(map, Heap::undefined_value(), filler);
3866 AllocationResult Heap::AllocateJSObjectFromMap(
3867 Map* map, PretenureFlag pretenure, bool allocate_properties,
3868 AllocationSite* allocation_site) {
3869 // JSFunctions should be allocated using AllocateFunction to be
3870 // properly initialized.
3871 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3873 // Both types of global objects should be allocated using
3874 // AllocateGlobalObject to be properly initialized.
3875 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3876 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3878 // Allocate the backing storage for the properties.
3879 FixedArray* properties;
3880 if (allocate_properties) {
3881 int prop_size = map->InitialPropertiesLength();
3882 DCHECK(prop_size >= 0);
3884 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3885 if (!allocation.To(&properties)) return allocation;
3888 properties = empty_fixed_array();
3891 // Allocate the JSObject.
3892 int size = map->instance_size();
3893 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3895 AllocationResult allocation = Allocate(map, space, allocation_site);
3896 if (!allocation.To(&js_obj)) return allocation;
3898 // Initialize the JSObject.
3899 InitializeJSObjectFromMap(js_obj, properties, map);
3900 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3901 js_obj->HasFixedTypedArrayElements());
3906 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3907 PretenureFlag pretenure,
3908 AllocationSite* allocation_site) {
3909 DCHECK(constructor->has_initial_map());
3911 // Allocate the object based on the constructors initial map.
3912 AllocationResult allocation = AllocateJSObjectFromMap(
3913 constructor->initial_map(), pretenure, true, allocation_site);
3915 // Make sure result is NOT a global object if valid.
3917 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3923 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3925 Map* map = source->map();
3927 // We can only clone normal objects or arrays. Copying anything else
3928 // will break invariants.
3929 CHECK(map->instance_type() == JS_OBJECT_TYPE ||
3930 map->instance_type() == JS_ARRAY_TYPE);
3932 int object_size = map->instance_size();
3935 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3937 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3939 // If we're forced to always allocate, we use the general allocation
3940 // functions which may leave us with an object in old space.
3941 if (always_allocate()) {
3943 AllocationResult allocation =
3944 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3945 if (!allocation.To(&clone)) return allocation;
3947 Address clone_address = clone->address();
3948 CopyBlock(clone_address, source->address(), object_size);
3950 // Update write barrier for all tagged fields that lie beyond the header.
3951 const int start_offset = JSObject::kHeaderSize;
3952 const int end_offset = object_size;
3954 #if V8_DOUBLE_FIELDS_UNBOXING
3955 LayoutDescriptorHelper helper(map);
3956 bool has_only_tagged_fields = helper.all_fields_tagged();
3958 if (!has_only_tagged_fields) {
3959 for (int offset = start_offset; offset < end_offset;) {
3960 int end_of_region_offset;
3961 if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
3962 RecordWrites(clone_address, offset,
3963 (end_of_region_offset - offset) / kPointerSize);
3965 offset = end_of_region_offset;
3969 // Object has only tagged fields.
3970 RecordWrites(clone_address, start_offset,
3971 (end_offset - start_offset) / kPointerSize);
3972 #if V8_DOUBLE_FIELDS_UNBOXING
3977 wb_mode = SKIP_WRITE_BARRIER;
3980 int adjusted_object_size =
3981 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3982 AllocationResult allocation =
3983 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3984 if (!allocation.To(&clone)) return allocation;
3986 SLOW_DCHECK(InNewSpace(clone));
3987 // Since we know the clone is allocated in new space, we can copy
3988 // the contents without worrying about updating the write barrier.
3989 CopyBlock(clone->address(), source->address(), object_size);
3992 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3993 reinterpret_cast<Address>(clone) + object_size);
3994 InitializeAllocationMemento(alloc_memento, site);
3998 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3999 source->GetElementsKind());
4000 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4001 FixedArray* properties = FixedArray::cast(source->properties());
4002 // Update elements if necessary.
4003 if (elements->length() > 0) {
4004 FixedArrayBase* elem;
4006 AllocationResult allocation;
4007 if (elements->map() == fixed_cow_array_map()) {
4008 allocation = FixedArray::cast(elements);
4009 } else if (source->HasFastDoubleElements()) {
4010 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4012 allocation = CopyFixedArray(FixedArray::cast(elements));
4014 if (!allocation.To(&elem)) return allocation;
4016 JSObject::cast(clone)->set_elements(elem, wb_mode);
4018 // Update properties if necessary.
4019 if (properties->length() > 0) {
4022 AllocationResult allocation = CopyFixedArray(properties);
4023 if (!allocation.To(&prop)) return allocation;
4025 JSObject::cast(clone)->set_properties(prop, wb_mode);
4027 // Return the new clone.
4032 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
4034 // Only works for one byte strings.
4035 DCHECK(vector.length() == len);
4036 MemCopy(chars, vector.start(), len);
4039 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
4041 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4042 size_t stream_length = vector.length();
4043 while (stream_length != 0) {
4044 size_t consumed = 0;
4045 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4046 DCHECK(c != unibrow::Utf8::kBadChar);
4047 DCHECK(consumed <= stream_length);
4048 stream_length -= consumed;
4050 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4053 *chars++ = unibrow::Utf16::LeadSurrogate(c);
4054 *chars++ = unibrow::Utf16::TrailSurrogate(c);
4061 DCHECK(stream_length == 0);
4066 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4067 DCHECK(s->length() == len);
4068 String::WriteToFlat(s, chars, 0, len);
4072 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4073 DCHECK(s->length() == len);
4074 String::WriteToFlat(s, chars, 0, len);
4078 template <bool is_one_byte, typename T>
4079 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
4080 uint32_t hash_field) {
4082 // Compute map and object size.
4086 DCHECK_LE(0, chars);
4087 DCHECK_GE(String::kMaxLength, chars);
4089 map = one_byte_internalized_string_map();
4090 size = SeqOneByteString::SizeFor(chars);
4092 map = internalized_string_map();
4093 size = SeqTwoByteString::SizeFor(chars);
4095 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4100 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4101 if (!allocation.To(&result)) return allocation;
4104 result->set_map_no_write_barrier(map);
4105 // Set length and hash fields of the allocated string.
4106 String* answer = String::cast(result);
4107 answer->set_length(chars);
4108 answer->set_hash_field(hash_field);
4110 DCHECK_EQ(size, answer->Size());
4113 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4115 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4121 // Need explicit instantiations.
4122 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
4125 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
4128 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
4129 Vector<const char>, int, uint32_t);
4132 AllocationResult Heap::AllocateRawOneByteString(int length,
4133 PretenureFlag pretenure) {
4134 DCHECK_LE(0, length);
4135 DCHECK_GE(String::kMaxLength, length);
4136 int size = SeqOneByteString::SizeFor(length);
4137 DCHECK(size <= SeqOneByteString::kMaxSize);
4138 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4142 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4143 if (!allocation.To(&result)) return allocation;
4146 // Partially initialize the object.
4147 result->set_map_no_write_barrier(one_byte_string_map());
4148 String::cast(result)->set_length(length);
4149 String::cast(result)->set_hash_field(String::kEmptyHashField);
4150 DCHECK_EQ(size, HeapObject::cast(result)->Size());
4156 AllocationResult Heap::AllocateRawTwoByteString(int length,
4157 PretenureFlag pretenure) {
4158 DCHECK_LE(0, length);
4159 DCHECK_GE(String::kMaxLength, length);
4160 int size = SeqTwoByteString::SizeFor(length);
4161 DCHECK(size <= SeqTwoByteString::kMaxSize);
4162 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4166 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4167 if (!allocation.To(&result)) return allocation;
4170 // Partially initialize the object.
4171 result->set_map_no_write_barrier(string_map());
4172 String::cast(result)->set_length(length);
4173 String::cast(result)->set_hash_field(String::kEmptyHashField);
4174 DCHECK_EQ(size, HeapObject::cast(result)->Size());
4179 AllocationResult Heap::AllocateEmptyFixedArray() {
4180 int size = FixedArray::SizeFor(0);
4183 AllocationResult allocation =
4184 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4185 if (!allocation.To(&result)) return allocation;
4187 // Initialize the object.
4188 result->set_map_no_write_barrier(fixed_array_map());
4189 FixedArray::cast(result)->set_length(0);
4194 AllocationResult Heap::AllocateEmptyExternalArray(
4195 ExternalArrayType array_type) {
4196 return AllocateExternalArray(0, array_type, NULL, TENURED);
4200 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
4201 if (!InNewSpace(src)) {
4205 int len = src->length();
4208 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
4209 if (!allocation.To(&obj)) return allocation;
4211 obj->set_map_no_write_barrier(fixed_array_map());
4212 FixedArray* result = FixedArray::cast(obj);
4213 result->set_length(len);
4216 DisallowHeapAllocation no_gc;
4217 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4218 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4220 // TODO(mvstanton): The map is set twice because of protection against calling
4221 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4222 // we might then be able to remove this whole method.
4223 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4228 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4229 ExternalArrayType array_type) {
4230 return AllocateFixedTypedArray(0, array_type, TENURED);
4234 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4235 int len = src->length();
4238 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4239 if (!allocation.To(&obj)) return allocation;
4241 if (InNewSpace(obj)) {
4242 obj->set_map_no_write_barrier(map);
4243 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4244 FixedArray::SizeFor(len) - kPointerSize);
4247 obj->set_map_no_write_barrier(map);
4248 FixedArray* result = FixedArray::cast(obj);
4249 result->set_length(len);
4252 DisallowHeapAllocation no_gc;
4253 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4254 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4259 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4261 int len = src->length();
4264 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4265 if (!allocation.To(&obj)) return allocation;
4267 obj->set_map_no_write_barrier(map);
4268 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4269 src->address() + FixedDoubleArray::kLengthOffset,
4270 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4275 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4278 if (src->is_extended_layout()) {
4279 ConstantPoolArray::NumberOfEntries small(src,
4280 ConstantPoolArray::SMALL_SECTION);
4281 ConstantPoolArray::NumberOfEntries extended(
4282 src, ConstantPoolArray::EXTENDED_SECTION);
4283 AllocationResult allocation =
4284 AllocateExtendedConstantPoolArray(small, extended);
4285 if (!allocation.To(&obj)) return allocation;
4287 ConstantPoolArray::NumberOfEntries small(src,
4288 ConstantPoolArray::SMALL_SECTION);
4289 AllocationResult allocation = AllocateConstantPoolArray(small);
4290 if (!allocation.To(&obj)) return allocation;
4292 obj->set_map_no_write_barrier(map);
4293 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4294 src->address() + ConstantPoolArray::kFirstEntryOffset,
4295 src->size() - ConstantPoolArray::kFirstEntryOffset);
4300 AllocationResult Heap::AllocateRawFixedArray(int length,
4301 PretenureFlag pretenure) {
4302 if (length < 0 || length > FixedArray::kMaxLength) {
4303 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4305 int size = FixedArray::SizeFor(length);
4306 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4308 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4312 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4313 PretenureFlag pretenure,
4315 DCHECK(length >= 0);
4316 DCHECK(empty_fixed_array()->IsFixedArray());
4317 if (length == 0) return empty_fixed_array();
4319 DCHECK(!InNewSpace(filler));
4322 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4323 if (!allocation.To(&result)) return allocation;
4326 result->set_map_no_write_barrier(fixed_array_map());
4327 FixedArray* array = FixedArray::cast(result);
4328 array->set_length(length);
4329 MemsetPointer(array->data_start(), filler, length);
4334 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4335 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4339 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4340 if (length == 0) return empty_fixed_array();
4344 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4345 if (!allocation.To(&obj)) return allocation;
4348 obj->set_map_no_write_barrier(fixed_array_map());
4349 FixedArray::cast(obj)->set_length(length);
4354 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4355 int length, PretenureFlag pretenure) {
4356 if (length == 0) return empty_fixed_array();
4358 HeapObject* elements;
4359 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4360 if (!allocation.To(&elements)) return allocation;
4362 elements->set_map_no_write_barrier(fixed_double_array_map());
4363 FixedDoubleArray::cast(elements)->set_length(length);
4368 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4369 PretenureFlag pretenure) {
4370 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4371 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4373 int size = FixedDoubleArray::SizeFor(length);
4374 #ifndef V8_HOST_ARCH_64_BIT
4375 size += kPointerSize;
4377 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4381 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4382 if (!allocation.To(&object)) return allocation;
4385 return EnsureDoubleAligned(this, object, size);
4389 AllocationResult Heap::AllocateConstantPoolArray(
4390 const ConstantPoolArray::NumberOfEntries& small) {
4391 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4392 int size = ConstantPoolArray::SizeFor(small);
4393 #ifndef V8_HOST_ARCH_64_BIT
4394 size += kPointerSize;
4396 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4400 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4401 if (!allocation.To(&object)) return allocation;
4403 object = EnsureDoubleAligned(this, object, size);
4404 object->set_map_no_write_barrier(constant_pool_array_map());
4406 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4407 constant_pool->Init(small);
4408 constant_pool->ClearPtrEntries(isolate());
4409 return constant_pool;
4413 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4414 const ConstantPoolArray::NumberOfEntries& small,
4415 const ConstantPoolArray::NumberOfEntries& extended) {
4416 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4417 CHECK(extended.are_in_range(0, kMaxInt));
4418 int size = ConstantPoolArray::SizeForExtended(small, extended);
4419 #ifndef V8_HOST_ARCH_64_BIT
4420 size += kPointerSize;
4422 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4426 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4427 if (!allocation.To(&object)) return allocation;
4429 object = EnsureDoubleAligned(this, object, size);
4430 object->set_map_no_write_barrier(constant_pool_array_map());
4432 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4433 constant_pool->InitExtended(small, extended);
4434 constant_pool->ClearPtrEntries(isolate());
4435 return constant_pool;
4439 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4440 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4441 int size = ConstantPoolArray::SizeFor(small);
4444 AllocationResult allocation =
4445 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4446 if (!allocation.To(&result)) return allocation;
4448 result->set_map_no_write_barrier(constant_pool_array_map());
4449 ConstantPoolArray::cast(result)->Init(small);
4454 AllocationResult Heap::AllocateSymbol() {
4455 // Statically ensure that it is safe to allocate symbols in paged spaces.
4456 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4459 AllocationResult allocation =
4460 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4461 if (!allocation.To(&result)) return allocation;
4463 result->set_map_no_write_barrier(symbol_map());
4465 // Generate a random hash value.
4469 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4471 } while (hash == 0 && attempts < 30);
4472 if (hash == 0) hash = 1; // never return 0
4474 Symbol::cast(result)
4475 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4476 Symbol::cast(result)->set_name(undefined_value());
4477 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4479 DCHECK(!Symbol::cast(result)->is_private());
4484 AllocationResult Heap::AllocateStruct(InstanceType type) {
4487 #define MAKE_CASE(NAME, Name, name) \
4489 map = name##_map(); \
4491 STRUCT_LIST(MAKE_CASE)
4497 int size = map->instance_size();
4498 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4501 AllocationResult allocation = Allocate(map, space);
4502 if (!allocation.To(&result)) return allocation;
4504 result->InitializeBody(size);
4509 bool Heap::IsHeapIterable() {
4510 // TODO(hpayer): This function is not correct. Allocation folding in old
4511 // space breaks the iterability.
4512 return new_space_top_after_last_gc_ == new_space()->top();
4516 void Heap::MakeHeapIterable() {
4517 DCHECK(AllowHeapAllocation::IsAllowed());
4518 if (!IsHeapIterable()) {
4519 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4521 if (mark_compact_collector()->sweeping_in_progress()) {
4522 mark_compact_collector()->EnsureSweepingCompleted();
4524 DCHECK(IsHeapIterable());
4528 void Heap::IdleMarkCompact(const char* message) {
4529 bool uncommit = false;
4530 if (gc_count_at_last_idle_gc_ == gc_count_) {
4531 // No GC since the last full GC, the mutator is probably not active.
4532 isolate_->compilation_cache()->Clear();
4535 CollectAllGarbage(kReduceMemoryFootprintMask, message);
4536 gc_idle_time_handler_.NotifyIdleMarkCompact();
4537 gc_count_at_last_idle_gc_ = gc_count_;
4539 new_space_.Shrink();
4540 UncommitFromSpace();
4545 bool Heap::TryFinalizeIdleIncrementalMarking(
4546 double idle_time_in_ms, size_t size_of_objects,
4547 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4548 if (incremental_marking()->IsComplete() ||
4549 (mark_compact_collector_.marking_deque()->IsEmpty() &&
4550 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4551 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4552 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4553 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4560 bool Heap::WorthActivatingIncrementalMarking() {
4561 return incremental_marking()->IsStopped() &&
4562 incremental_marking()->ShouldActivate();
4566 static double MonotonicallyIncreasingTimeInMs() {
4567 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4568 static_cast<double>(base::Time::kMillisecondsPerSecond);
4572 bool Heap::IdleNotification(int idle_time_in_ms) {
4573 return IdleNotification(
4574 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4575 (static_cast<double>(idle_time_in_ms) /
4576 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4580 bool Heap::IdleNotification(double deadline_in_seconds) {
4581 CHECK(HasBeenSetUp()); // http://crbug.com/425035
4582 double deadline_in_ms =
4583 deadline_in_seconds *
4584 static_cast<double>(base::Time::kMillisecondsPerSecond);
4585 HistogramTimerScope idle_notification_scope(
4586 isolate_->counters()->gc_idle_notification());
4587 double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4589 GCIdleTimeHandler::HeapState heap_state;
4590 heap_state.contexts_disposed = contexts_disposed_;
4591 heap_state.contexts_disposal_rate =
4592 tracer()->ContextDisposalRateInMilliseconds();
4593 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4594 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4595 // TODO(ulan): Start incremental marking only for large heaps.
4596 intptr_t limit = old_generation_allocation_limit_;
4597 if (static_cast<size_t>(idle_time_in_ms) >
4598 GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
4599 limit = idle_old_generation_allocation_limit_;
4602 heap_state.can_start_incremental_marking =
4603 incremental_marking()->WorthActivating() &&
4604 NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
4605 heap_state.sweeping_in_progress =
4606 mark_compact_collector()->sweeping_in_progress();
4607 heap_state.mark_compact_speed_in_bytes_per_ms =
4608 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4609 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4610 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4611 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
4612 static_cast<size_t>(
4613 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4614 heap_state.scavenge_speed_in_bytes_per_ms =
4615 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4616 heap_state.used_new_space_size = new_space_.Size();
4617 heap_state.new_space_capacity = new_space_.Capacity();
4618 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4619 static_cast<size_t>(
4620 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4622 GCIdleTimeAction action =
4623 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4624 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4625 static_cast<int>(idle_time_in_ms));
4627 bool result = false;
4628 switch (action.type) {
4632 case DO_INCREMENTAL_MARKING: {
4633 if (incremental_marking()->IsStopped()) {
4634 incremental_marking()->Start();
4636 double remaining_idle_time_in_ms = 0.0;
4638 incremental_marking()->Step(
4639 action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4640 IncrementalMarking::FORCE_MARKING,
4641 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
4642 remaining_idle_time_in_ms =
4643 deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4644 } while (remaining_idle_time_in_ms >=
4645 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
4646 !incremental_marking()->IsComplete() &&
4647 !mark_compact_collector_.marking_deque()->IsEmpty());
4648 if (remaining_idle_time_in_ms > 0.0) {
4649 action.additional_work = TryFinalizeIdleIncrementalMarking(
4650 remaining_idle_time_in_ms, heap_state.size_of_objects,
4651 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4656 if (contexts_disposed_) {
4657 HistogramTimerScope scope(isolate_->counters()->gc_context());
4658 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4659 gc_idle_time_handler_.NotifyIdleMarkCompact();
4660 gc_count_at_last_idle_gc_ = gc_count_;
4662 IdleMarkCompact("idle notification: finalize idle round");
4667 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4669 case DO_FINALIZE_SWEEPING:
4670 mark_compact_collector()->EnsureSweepingCompleted();
4676 double current_time = MonotonicallyIncreasingTimeInMs();
4677 last_idle_notification_time_ = current_time;
4678 double deadline_difference = deadline_in_ms - current_time;
4680 if (deadline_difference >= 0) {
4681 if (action.type != DONE && action.type != DO_NOTHING) {
4682 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4683 static_cast<int>(deadline_difference));
4686 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4687 static_cast<int>(-deadline_difference));
4690 if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4691 FLAG_trace_idle_notification_verbose) {
4693 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4694 "ms, deadline usage %.2f ms [",
4695 idle_time_in_ms, idle_time_in_ms - deadline_difference,
4696 deadline_difference);
4699 if (FLAG_trace_idle_notification_verbose) {
4707 contexts_disposed_ = 0;
4712 bool Heap::RecentIdleNotificationHappened() {
4713 return (last_idle_notification_time_ +
4714 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
4715 MonotonicallyIncreasingTimeInMs();
4721 void Heap::Print() {
4722 if (!HasBeenSetUp()) return;
4723 isolate()->PrintStack(stdout);
4724 AllSpaces spaces(this);
4725 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4731 void Heap::ReportCodeStatistics(const char* title) {
4732 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4733 PagedSpace::ResetCodeStatistics(isolate());
4734 // We do not look for code in new space, map space, or old space. If code
4735 // somehow ends up in those spaces, we would miss it here.
4736 code_space_->CollectCodeStatistics();
4737 lo_space_->CollectCodeStatistics();
4738 PagedSpace::ReportCodeStatistics(isolate());
4742 // This function expects that NewSpace's allocated objects histogram is
4743 // populated (via a call to CollectStatistics or else as a side effect of a
4744 // just-completed scavenge collection).
4745 void Heap::ReportHeapStatistics(const char* title) {
4747 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4749 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4750 old_generation_allocation_limit_);
4753 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4754 isolate_->global_handles()->PrintStats();
4757 PrintF("Heap statistics : ");
4758 isolate_->memory_allocator()->ReportStatistics();
4759 PrintF("To space : ");
4760 new_space_.ReportStatistics();
4761 PrintF("Old pointer space : ");
4762 old_pointer_space_->ReportStatistics();
4763 PrintF("Old data space : ");
4764 old_data_space_->ReportStatistics();
4765 PrintF("Code space : ");
4766 code_space_->ReportStatistics();
4767 PrintF("Map space : ");
4768 map_space_->ReportStatistics();
4769 PrintF("Cell space : ");
4770 cell_space_->ReportStatistics();
4771 PrintF("PropertyCell space : ");
4772 property_cell_space_->ReportStatistics();
4773 PrintF("Large object space : ");
4774 lo_space_->ReportStatistics();
4775 PrintF(">>>>>> ========================================= >>>>>>\n");
4780 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4783 bool Heap::Contains(Address addr) {
4784 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4785 return HasBeenSetUp() &&
4786 (new_space_.ToSpaceContains(addr) ||
4787 old_pointer_space_->Contains(addr) ||
4788 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4789 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4790 property_cell_space_->Contains(addr) ||
4791 lo_space_->SlowContains(addr));
4795 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4796 return InSpace(value->address(), space);
4800 bool Heap::InSpace(Address addr, AllocationSpace space) {
4801 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4802 if (!HasBeenSetUp()) return false;
4806 return new_space_.ToSpaceContains(addr);
4807 case OLD_POINTER_SPACE:
4808 return old_pointer_space_->Contains(addr);
4809 case OLD_DATA_SPACE:
4810 return old_data_space_->Contains(addr);
4812 return code_space_->Contains(addr);
4814 return map_space_->Contains(addr);
4816 return cell_space_->Contains(addr);
4817 case PROPERTY_CELL_SPACE:
4818 return property_cell_space_->Contains(addr);
4820 return lo_space_->SlowContains(addr);
4827 bool Heap::RootIsImmortalImmovable(int root_index) {
4828 switch (root_index) {
4829 #define CASE(name) \
4830 case Heap::k##name##RootIndex: \
4832 IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
4841 void Heap::Verify() {
4842 CHECK(HasBeenSetUp());
4843 HandleScope scope(isolate());
4845 store_buffer()->Verify();
4847 if (mark_compact_collector()->sweeping_in_progress()) {
4848 // We have to wait here for the sweeper threads to have an iterable heap.
4849 mark_compact_collector()->EnsureSweepingCompleted();
4852 VerifyPointersVisitor visitor;
4853 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4855 VerifySmisVisitor smis_visitor;
4856 IterateSmiRoots(&smis_visitor);
4858 new_space_.Verify();
4860 old_pointer_space_->Verify(&visitor);
4861 map_space_->Verify(&visitor);
4863 VerifyPointersVisitor no_dirty_regions_visitor;
4864 old_data_space_->Verify(&no_dirty_regions_visitor);
4865 code_space_->Verify(&no_dirty_regions_visitor);
4866 cell_space_->Verify(&no_dirty_regions_visitor);
4867 property_cell_space_->Verify(&no_dirty_regions_visitor);
4869 lo_space_->Verify();
4874 void Heap::ZapFromSpace() {
4875 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4876 new_space_.FromSpaceEnd());
4877 while (it.has_next()) {
4878 NewSpacePage* page = it.next();
4879 for (Address cursor = page->area_start(), limit = page->area_end();
4880 cursor < limit; cursor += kPointerSize) {
4881 Memory::Address_at(cursor) = kFromSpaceZapValue;
4887 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4888 ObjectSlotCallback callback) {
4889 Address slot_address = start;
4891 // We are not collecting slots on new space objects during mutation
4892 // thus we have to scan for pointers to evacuation candidates when we
4893 // promote objects. But we should not record any slots in non-black
4894 // objects. Grey object's slots would be rescanned.
4895 // White object might not survive until the end of collection
4896 // it would be a violation of the invariant to record it's slots.
4897 bool record_slots = false;
4898 if (incremental_marking()->IsCompacting()) {
4899 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4900 record_slots = Marking::IsBlack(mark_bit);
4903 while (slot_address < end) {
4904 Object** slot = reinterpret_cast<Object**>(slot_address);
4905 Object* object = *slot;
4906 // If the store buffer becomes overfull we mark pages as being exempt from
4907 // the store buffer. These pages are scanned to find pointers that point
4908 // to the new space. In that case we may hit newly promoted objects and
4909 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4910 if (object->IsHeapObject()) {
4911 if (Heap::InFromSpace(object)) {
4912 callback(reinterpret_cast<HeapObject**>(slot),
4913 HeapObject::cast(object));
4914 Object* new_object = *slot;
4915 if (InNewSpace(new_object)) {
4916 SLOW_DCHECK(Heap::InToSpace(new_object));
4917 SLOW_DCHECK(new_object->IsHeapObject());
4918 store_buffer_.EnterDirectlyIntoStoreBuffer(
4919 reinterpret_cast<Address>(slot));
4921 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4922 } else if (record_slots &&
4923 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4924 mark_compact_collector()->RecordSlot(slot, slot, object);
4927 slot_address += kPointerSize;
4933 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4936 bool IsAMapPointerAddress(Object** addr) {
4937 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4938 int mod = a % Map::kSize;
4939 return mod >= Map::kPointerFieldsBeginOffset &&
4940 mod < Map::kPointerFieldsEndOffset;
4944 bool EverythingsAPointer(Object** addr) { return true; }
4947 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4948 Object**** store_buffer_position,
4949 Object*** store_buffer_top,
4950 CheckStoreBufferFilter filter,
4951 Address special_garbage_start,
4952 Address special_garbage_end) {
4953 Map* free_space_map = heap->free_space_map();
4954 for (; current < limit; current++) {
4955 Object* o = *current;
4956 Address current_address = reinterpret_cast<Address>(current);
4958 if (o == free_space_map) {
4959 Address current_address = reinterpret_cast<Address>(current);
4960 FreeSpace* free_space =
4961 FreeSpace::cast(HeapObject::FromAddress(current_address));
4962 int skip = free_space->Size();
4963 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4965 current_address += skip - kPointerSize;
4966 current = reinterpret_cast<Object**>(current_address);
4969 // Skip the current linear allocation space between top and limit which is
4970 // unmarked with the free space map, but can contain junk.
4971 if (current_address == special_garbage_start &&
4972 special_garbage_end != special_garbage_start) {
4973 current_address = special_garbage_end - kPointerSize;
4974 current = reinterpret_cast<Object**>(current_address);
4977 if (!(*filter)(current)) continue;
4978 DCHECK(current_address < special_garbage_start ||
4979 current_address >= special_garbage_end);
4980 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4981 // We have to check that the pointer does not point into new space
4982 // without trying to cast it to a heap object since the hash field of
4983 // a string can contain values like 1 and 3 which are tagged null
4985 if (!heap->InNewSpace(o)) continue;
4986 while (**store_buffer_position < current &&
4987 *store_buffer_position < store_buffer_top) {
4988 (*store_buffer_position)++;
4990 if (**store_buffer_position != current ||
4991 *store_buffer_position == store_buffer_top) {
4992 Object** obj_start = current;
4993 while (!(*obj_start)->IsMap()) obj_start--;
5000 // Check that the store buffer contains all intergenerational pointers by
5001 // scanning a page and ensuring that all pointers to young space are in the
5003 void Heap::OldPointerSpaceCheckStoreBuffer() {
5004 OldSpace* space = old_pointer_space();
5005 PageIterator pages(space);
5007 store_buffer()->SortUniq();
5009 while (pages.has_next()) {
5010 Page* page = pages.next();
5011 Object** current = reinterpret_cast<Object**>(page->area_start());
5013 Address end = page->area_end();
5015 Object*** store_buffer_position = store_buffer()->Start();
5016 Object*** store_buffer_top = store_buffer()->Top();
5018 Object** limit = reinterpret_cast<Object**>(end);
5019 CheckStoreBuffer(this, current, limit, &store_buffer_position,
5020 store_buffer_top, &EverythingsAPointer, space->top(),
5026 void Heap::MapSpaceCheckStoreBuffer() {
5027 MapSpace* space = map_space();
5028 PageIterator pages(space);
5030 store_buffer()->SortUniq();
5032 while (pages.has_next()) {
5033 Page* page = pages.next();
5034 Object** current = reinterpret_cast<Object**>(page->area_start());
5036 Address end = page->area_end();
5038 Object*** store_buffer_position = store_buffer()->Start();
5039 Object*** store_buffer_top = store_buffer()->Top();
5041 Object** limit = reinterpret_cast<Object**>(end);
5042 CheckStoreBuffer(this, current, limit, &store_buffer_position,
5043 store_buffer_top, &IsAMapPointerAddress, space->top(),
5049 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5050 LargeObjectIterator it(lo_space());
5051 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5052 // We only have code, sequential strings, or fixed arrays in large
5053 // object space, and only fixed arrays can possibly contain pointers to
5054 // the young generation.
5055 if (object->IsFixedArray()) {
5056 Object*** store_buffer_position = store_buffer()->Start();
5057 Object*** store_buffer_top = store_buffer()->Top();
5058 Object** current = reinterpret_cast<Object**>(object->address());
5060 reinterpret_cast<Object**>(object->address() + object->Size());
5061 CheckStoreBuffer(this, current, limit, &store_buffer_position,
5062 store_buffer_top, &EverythingsAPointer, NULL, NULL);
5069 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5070 IterateStrongRoots(v, mode);
5071 IterateWeakRoots(v, mode);
5075 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5076 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
5077 v->Synchronize(VisitorSynchronization::kStringTable);
5078 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5079 // Scavenge collections have special processing for this.
5080 external_string_table_.Iterate(v);
5082 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5086 void Heap::IterateSmiRoots(ObjectVisitor* v) {
5087 // Acquire execution access since we are going to read stack limit values.
5088 ExecutionAccess access(isolate());
5089 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
5090 v->Synchronize(VisitorSynchronization::kSmiRootList);
5094 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5095 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5096 v->Synchronize(VisitorSynchronization::kStrongRootList);
5098 v->VisitPointer(bit_cast<Object**>(&hidden_string_));
5099 v->Synchronize(VisitorSynchronization::kInternalizedString);
5101 isolate_->bootstrapper()->Iterate(v);
5102 v->Synchronize(VisitorSynchronization::kBootstrapper);
5103 isolate_->Iterate(v);
5104 v->Synchronize(VisitorSynchronization::kTop);
5105 Relocatable::Iterate(isolate_, v);
5106 v->Synchronize(VisitorSynchronization::kRelocatable);
5108 if (isolate_->deoptimizer_data() != NULL) {
5109 isolate_->deoptimizer_data()->Iterate(v);
5111 v->Synchronize(VisitorSynchronization::kDebug);
5112 isolate_->compilation_cache()->Iterate(v);
5113 v->Synchronize(VisitorSynchronization::kCompilationCache);
5115 // Iterate over local handles in handle scopes.
5116 isolate_->handle_scope_implementer()->Iterate(v);
5117 isolate_->IterateDeferredHandles(v);
5118 v->Synchronize(VisitorSynchronization::kHandleScope);
5120 // Iterate over the builtin code objects and code stubs in the
5121 // heap. Note that it is not necessary to iterate over code objects
5122 // on scavenge collections.
5123 if (mode != VISIT_ALL_IN_SCAVENGE) {
5124 isolate_->builtins()->IterateBuiltins(v);
5126 v->Synchronize(VisitorSynchronization::kBuiltins);
5128 // Iterate over global handles.
5130 case VISIT_ONLY_STRONG:
5131 isolate_->global_handles()->IterateStrongRoots(v);
5133 case VISIT_ALL_IN_SCAVENGE:
5134 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5136 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5138 isolate_->global_handles()->IterateAllRoots(v);
5141 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5143 // Iterate over eternal handles.
5144 if (mode == VISIT_ALL_IN_SCAVENGE) {
5145 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
5147 isolate_->eternal_handles()->IterateAllRoots(v);
5149 v->Synchronize(VisitorSynchronization::kEternalHandles);
5151 // Iterate over pointers being held by inactive threads.
5152 isolate_->thread_manager()->Iterate(v);
5153 v->Synchronize(VisitorSynchronization::kThreadManager);
5155 // Iterate over the pointers the Serialization/Deserialization code is
5157 // During garbage collection this keeps the partial snapshot cache alive.
5158 // During deserialization of the startup snapshot this creates the partial
5159 // snapshot cache and deserializes the objects it refers to. During
5160 // serialization this does nothing, since the partial snapshot cache is
5161 // empty. However the next thing we do is create the partial snapshot,
5162 // filling up the partial snapshot cache with objects it needs as we go.
5163 SerializerDeserializer::Iterate(isolate_, v);
5164 // We don't do a v->Synchronize call here, because in debug mode that will
5165 // output a flag to the snapshot. However at this point the serializer and
5166 // deserializer are deliberately a little unsynchronized (see above) so the
5167 // checking of the sync flag in the snapshot would fail.
5171 // TODO(1236194): Since the heap size is configurable on the command line
5172 // and through the API, we should gracefully handle the case that the heap
5173 // size is not big enough to fit all the initial objects.
5174 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
5175 int max_executable_size, size_t code_range_size) {
5176 if (HasBeenSetUp()) return false;
5178 // Overwrite default configuration.
5179 if (max_semi_space_size > 0) {
5180 max_semi_space_size_ = max_semi_space_size * MB;
5182 if (max_old_space_size > 0) {
5183 max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
5185 if (max_executable_size > 0) {
5186 max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
5189 // If max space size flags are specified overwrite the configuration.
5190 if (FLAG_max_semi_space_size > 0) {
5191 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
5193 if (FLAG_max_old_space_size > 0) {
5194 max_old_generation_size_ =
5195 static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
5197 if (FLAG_max_executable_size > 0) {
5198 max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
5201 if (FLAG_stress_compaction) {
5202 // This will cause more frequent GCs when stressing.
5203 max_semi_space_size_ = Page::kPageSize;
5206 if (Snapshot::HaveASnapshotToStartFrom()) {
5207 // If we are using a snapshot we always reserve the default amount
5208 // of memory for each semispace because code in the snapshot has
5209 // write-barrier code that relies on the size and alignment of new
5210 // space. We therefore cannot use a larger max semispace size
5211 // than the default reserved semispace size.
5212 if (max_semi_space_size_ > reserved_semispace_size_) {
5213 max_semi_space_size_ = reserved_semispace_size_;
5214 if (FLAG_trace_gc) {
5215 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
5216 reserved_semispace_size_ >> 10);
5220 // If we are not using snapshots we reserve space for the actual
5221 // max semispace size.
5222 reserved_semispace_size_ = max_semi_space_size_;
5225 // The max executable size must be less than or equal to the max old
5227 if (max_executable_size_ > max_old_generation_size_) {
5228 max_executable_size_ = max_old_generation_size_;
5231 // The new space size must be a power of two to support single-bit testing
5233 max_semi_space_size_ =
5234 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
5235 reserved_semispace_size_ =
5236 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
5238 if (FLAG_min_semi_space_size > 0) {
5239 int initial_semispace_size = FLAG_min_semi_space_size * MB;
5240 if (initial_semispace_size > max_semi_space_size_) {
5241 initial_semispace_size_ = max_semi_space_size_;
5242 if (FLAG_trace_gc) {
5244 "Min semi-space size cannot be more than the maximum "
5245 "semi-space size of %d MB\n",
5246 max_semi_space_size_ / MB);
5249 initial_semispace_size_ = initial_semispace_size;
5253 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5255 if (FLAG_target_semi_space_size > 0) {
5256 int target_semispace_size = FLAG_target_semi_space_size * MB;
5257 if (target_semispace_size < initial_semispace_size_) {
5258 target_semispace_size_ = initial_semispace_size_;
5259 if (FLAG_trace_gc) {
5261 "Target semi-space size cannot be less than the minimum "
5262 "semi-space size of %d MB\n",
5263 initial_semispace_size_ / MB);
5265 } else if (target_semispace_size > max_semi_space_size_) {
5266 target_semispace_size_ = max_semi_space_size_;
5267 if (FLAG_trace_gc) {
5269 "Target semi-space size cannot be less than the maximum "
5270 "semi-space size of %d MB\n",
5271 max_semi_space_size_ / MB);
5274 target_semispace_size_ = target_semispace_size;
5278 target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
5280 if (FLAG_semi_space_growth_factor < 2) {
5281 FLAG_semi_space_growth_factor = 2;
5284 // The old generation is paged and needs at least one page for each space.
5285 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5286 max_old_generation_size_ =
5287 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5288 max_old_generation_size_);
5290 if (FLAG_initial_old_space_size > 0) {
5291 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5293 initial_old_generation_size_ =
5294 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5296 old_generation_allocation_limit_ = initial_old_generation_size_;
5298 // We rely on being able to allocate new arrays in paged spaces.
5299 DCHECK(Page::kMaxRegularHeapObjectSize >=
5301 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5302 AllocationMemento::kSize));
5304 code_range_size_ = code_range_size * MB;
5311 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5314 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5315 *stats->start_marker = HeapStats::kStartMarker;
5316 *stats->end_marker = HeapStats::kEndMarker;
5317 *stats->new_space_size = new_space_.SizeAsInt();
5318 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5319 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5320 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5321 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5322 *stats->old_data_space_capacity = old_data_space_->Capacity();
5323 *stats->code_space_size = code_space_->SizeOfObjects();
5324 *stats->code_space_capacity = code_space_->Capacity();
5325 *stats->map_space_size = map_space_->SizeOfObjects();
5326 *stats->map_space_capacity = map_space_->Capacity();
5327 *stats->cell_space_size = cell_space_->SizeOfObjects();
5328 *stats->cell_space_capacity = cell_space_->Capacity();
5329 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
5330 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
5331 *stats->lo_space_size = lo_space_->Size();
5332 isolate_->global_handles()->RecordStats(stats);
5333 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5334 *stats->memory_allocator_capacity =
5335 isolate()->memory_allocator()->Size() +
5336 isolate()->memory_allocator()->Available();
5337 *stats->os_error = base::OS::GetLastError();
5338 isolate()->memory_allocator()->Available();
5339 if (take_snapshot) {
5340 HeapIterator iterator(this);
5341 for (HeapObject* obj = iterator.next(); obj != NULL;
5342 obj = iterator.next()) {
5343 InstanceType type = obj->map()->instance_type();
5344 DCHECK(0 <= type && type <= LAST_TYPE);
5345 stats->objects_per_type[type]++;
5346 stats->size_per_type[type] += obj->Size();
5352 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5353 return old_pointer_space_->SizeOfObjects() +
5354 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5355 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5356 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5360 int64_t Heap::PromotedExternalMemorySize() {
5361 if (amount_of_external_allocated_memory_ <=
5362 amount_of_external_allocated_memory_at_last_global_gc_)
5364 return amount_of_external_allocated_memory_ -
5365 amount_of_external_allocated_memory_at_last_global_gc_;
5369 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5370 intptr_t old_gen_size) {
5371 CHECK(factor > 1.0);
5372 CHECK(old_gen_size > 0);
5373 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5374 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5375 limit += new_space_.Capacity();
5376 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5377 return Min(limit, halfway_to_the_max);
5381 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5382 int freed_global_handles) {
5383 const int kMaxHandles = 1000;
5384 const int kMinHandles = 100;
5385 const double min_factor = 1.1;
5386 double max_factor = 4;
5387 const double idle_max_factor = 1.5;
5388 // We set the old generation growing factor to 2 to grow the heap slower on
5389 // memory-constrained devices.
5390 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5394 // If there are many freed global handles, then the next full GC will
5395 // likely collect a lot of garbage. Choose the heap growing factor
5396 // depending on freed global handles.
5397 // TODO(ulan, hpayer): Take into account mutator utilization.
5398 // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
5401 if (freed_global_handles <= kMinHandles) {
5402 factor = max_factor;
5403 } else if (freed_global_handles >= kMaxHandles) {
5404 factor = min_factor;
5406 // Compute factor using linear interpolation between points
5407 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5408 factor = max_factor -
5409 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5410 (kMaxHandles - kMinHandles);
5413 if (FLAG_stress_compaction ||
5414 mark_compact_collector()->reduce_memory_footprint_) {
5415 factor = min_factor;
5418 old_generation_allocation_limit_ =
5419 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5420 idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5421 Min(factor, idle_max_factor), old_gen_size);
5425 void Heap::EnableInlineAllocation() {
5426 if (!inline_allocation_disabled_) return;
5427 inline_allocation_disabled_ = false;
5429 // Update inline allocation limit for new space.
5430 new_space()->UpdateInlineAllocationLimit(0);
5434 void Heap::DisableInlineAllocation() {
5435 if (inline_allocation_disabled_) return;
5436 inline_allocation_disabled_ = true;
5438 // Update inline allocation limit for new space.
5439 new_space()->UpdateInlineAllocationLimit(0);
5441 // Update inline allocation limit for old spaces.
5442 PagedSpaces spaces(this);
5443 for (PagedSpace* space = spaces.next(); space != NULL;
5444 space = spaces.next()) {
5445 space->EmptyAllocationInfo();
5450 V8_DECLARE_ONCE(initialize_gc_once);
5452 static void InitializeGCOnce() {
5453 InitializeScavengingVisitorsTables();
5454 NewSpaceScavenger::Initialize();
5455 MarkCompactCollector::Initialize();
5459 bool Heap::SetUp() {
5461 allocation_timeout_ = FLAG_gc_interval;
5464 // Initialize heap spaces and initial maps and objects. Whenever something
5465 // goes wrong, just return false. The caller should check the results and
5466 // call Heap::TearDown() to release allocated memory.
5468 // If the heap is not yet configured (e.g. through the API), configure it.
5469 // Configuration is based on the flags new-space-size (really the semispace
5470 // size) and old-space-size if set or the initial values of semispace_size_
5471 // and old_generation_size_ otherwise.
5473 if (!ConfigureHeapDefault()) return false;
5476 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5478 MarkMapPointersAsEncoded(false);
5480 // Set up memory allocator.
5481 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5484 // Set up new space.
5485 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5488 new_space_top_after_last_gc_ = new_space()->top();
5490 // Initialize old pointer space.
5491 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5492 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5493 if (old_pointer_space_ == NULL) return false;
5494 if (!old_pointer_space_->SetUp()) return false;
5496 // Initialize old data space.
5497 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5499 if (old_data_space_ == NULL) return false;
5500 if (!old_data_space_->SetUp()) return false;
5502 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5504 // Initialize the code space, set its maximum capacity to the old
5505 // generation size. It needs executable memory.
5507 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5508 if (code_space_ == NULL) return false;
5509 if (!code_space_->SetUp()) return false;
5511 // Initialize map space.
5512 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5513 if (map_space_ == NULL) return false;
5514 if (!map_space_->SetUp()) return false;
5516 // Initialize simple cell space.
5517 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5518 if (cell_space_ == NULL) return false;
5519 if (!cell_space_->SetUp()) return false;
5521 // Initialize global property cell space.
5522 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5523 PROPERTY_CELL_SPACE);
5524 if (property_cell_space_ == NULL) return false;
5525 if (!property_cell_space_->SetUp()) return false;
5527 // The large object code space may contain code or data. We set the memory
5528 // to be non-executable here for safety, but this means we need to enable it
5529 // explicitly when allocating large code objects.
5530 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5531 if (lo_space_ == NULL) return false;
5532 if (!lo_space_->SetUp()) return false;
5534 // Set up the seed that is used to randomize the string hash function.
5535 DCHECK(hash_seed() == 0);
5536 if (FLAG_randomize_hashes) {
5537 if (FLAG_hash_seed == 0) {
5538 int rnd = isolate()->random_number_generator()->NextInt();
5539 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5541 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5545 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5546 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5548 store_buffer()->SetUp();
5550 mark_compact_collector()->SetUp();
5556 bool Heap::CreateHeapObjects() {
5557 // Create initial maps.
5558 if (!CreateInitialMaps()) return false;
5561 // Create initial objects
5562 CreateInitialObjects();
5563 CHECK_EQ(0u, gc_count_);
5565 set_native_contexts_list(undefined_value());
5566 set_array_buffers_list(undefined_value());
5567 set_allocation_sites_list(undefined_value());
5572 void Heap::SetStackLimits() {
5573 DCHECK(isolate_ != NULL);
5574 DCHECK(isolate_ == isolate());
5575 // On 64 bit machines, pointers are generally out of range of Smis. We write
5576 // something that looks like an out of range Smi to the GC.
5578 // Set up the special root array entries containing the stack limits.
5579 // These are actually addresses, but the tag makes the GC ignore it.
5580 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5581 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5582 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5583 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5587 void Heap::NotifyDeserializationComplete() {
5588 deserialization_complete_ = true;
5590 // All pages right after bootstrapping must be marked as never-evacuate.
5591 PagedSpaces spaces(this);
5592 for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5594 while (it.has_next()) CHECK(it.next()->NeverEvacuate());
5600 void Heap::TearDown() {
5602 if (FLAG_verify_heap) {
5607 UpdateMaximumCommitted();
5609 if (FLAG_print_cumulative_gc_stat) {
5611 PrintF("gc_count=%d ", gc_count_);
5612 PrintF("mark_sweep_count=%d ", ms_count_);
5613 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5614 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5615 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5616 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5617 PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
5618 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5622 if (FLAG_print_max_heap_committed) {
5624 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5625 MaximumCommittedMemory());
5626 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5627 new_space_.MaximumCommittedMemory());
5628 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5629 old_data_space_->MaximumCommittedMemory());
5630 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5631 old_pointer_space_->MaximumCommittedMemory());
5632 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5633 old_pointer_space_->MaximumCommittedMemory());
5634 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5635 code_space_->MaximumCommittedMemory());
5636 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5637 map_space_->MaximumCommittedMemory());
5638 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5639 cell_space_->MaximumCommittedMemory());
5640 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5641 property_cell_space_->MaximumCommittedMemory());
5642 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5643 lo_space_->MaximumCommittedMemory());
5647 if (FLAG_verify_predictable) {
5648 PrintAlloctionsHash();
5651 TearDownArrayBuffers();
5653 isolate_->global_handles()->TearDown();
5655 external_string_table_.TearDown();
5657 mark_compact_collector()->TearDown();
5659 new_space_.TearDown();
5661 if (old_pointer_space_ != NULL) {
5662 old_pointer_space_->TearDown();
5663 delete old_pointer_space_;
5664 old_pointer_space_ = NULL;
5667 if (old_data_space_ != NULL) {
5668 old_data_space_->TearDown();
5669 delete old_data_space_;
5670 old_data_space_ = NULL;
5673 if (code_space_ != NULL) {
5674 code_space_->TearDown();
5679 if (map_space_ != NULL) {
5680 map_space_->TearDown();
5685 if (cell_space_ != NULL) {
5686 cell_space_->TearDown();
5691 if (property_cell_space_ != NULL) {
5692 property_cell_space_->TearDown();
5693 delete property_cell_space_;
5694 property_cell_space_ = NULL;
5697 if (lo_space_ != NULL) {
5698 lo_space_->TearDown();
5703 store_buffer()->TearDown();
5705 isolate_->memory_allocator()->TearDown();
5709 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5710 GCType gc_type, bool pass_isolate) {
5711 DCHECK(callback != NULL);
5712 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5713 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5714 return gc_prologue_callbacks_.Add(pair);
5718 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5719 DCHECK(callback != NULL);
5720 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5721 if (gc_prologue_callbacks_[i].callback == callback) {
5722 gc_prologue_callbacks_.Remove(i);
5730 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5731 GCType gc_type, bool pass_isolate) {
5732 DCHECK(callback != NULL);
5733 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5734 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5735 return gc_epilogue_callbacks_.Add(pair);
5739 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5740 DCHECK(callback != NULL);
5741 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5742 if (gc_epilogue_callbacks_[i].callback == callback) {
5743 gc_epilogue_callbacks_.Remove(i);
5751 // TODO(ishell): Find a better place for this.
5752 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5753 Handle<DependentCode> dep) {
5754 DCHECK(!InNewSpace(*obj));
5755 DCHECK(!InNewSpace(*dep));
5756 Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5757 table = WeakHashTable::Put(table, obj, dep);
5758 if (*table != weak_object_to_code_table())
5759 set_weak_object_to_code_table(*table);
5760 DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5764 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5765 Object* dep = weak_object_to_code_table()->Lookup(obj);
5766 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5767 return DependentCode::cast(empty_fixed_array());
5771 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5772 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5777 class PrintHandleVisitor : public ObjectVisitor {
5779 void VisitPointers(Object** start, Object** end) {
5780 for (Object** p = start; p < end; p++)
5781 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5782 reinterpret_cast<void*>(*p));
5787 void Heap::PrintHandles() {
5788 PrintF("Handles:\n");
5789 PrintHandleVisitor v;
5790 isolate_->handle_scope_implementer()->Iterate(&v);
5796 Space* AllSpaces::next() {
5797 switch (counter_++) {
5799 return heap_->new_space();
5800 case OLD_POINTER_SPACE:
5801 return heap_->old_pointer_space();
5802 case OLD_DATA_SPACE:
5803 return heap_->old_data_space();
5805 return heap_->code_space();
5807 return heap_->map_space();
5809 return heap_->cell_space();
5810 case PROPERTY_CELL_SPACE:
5811 return heap_->property_cell_space();
5813 return heap_->lo_space();
5820 PagedSpace* PagedSpaces::next() {
5821 switch (counter_++) {
5822 case OLD_POINTER_SPACE:
5823 return heap_->old_pointer_space();
5824 case OLD_DATA_SPACE:
5825 return heap_->old_data_space();
5827 return heap_->code_space();
5829 return heap_->map_space();
5831 return heap_->cell_space();
5832 case PROPERTY_CELL_SPACE:
5833 return heap_->property_cell_space();
5840 OldSpace* OldSpaces::next() {
5841 switch (counter_++) {
5842 case OLD_POINTER_SPACE:
5843 return heap_->old_pointer_space();
5844 case OLD_DATA_SPACE:
5845 return heap_->old_data_space();
5847 return heap_->code_space();
5854 SpaceIterator::SpaceIterator(Heap* heap)
5856 current_space_(FIRST_SPACE),
5861 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5863 current_space_(FIRST_SPACE),
5865 size_func_(size_func) {}
5868 SpaceIterator::~SpaceIterator() {
5869 // Delete active iterator if any.
5874 bool SpaceIterator::has_next() {
5875 // Iterate until no more spaces.
5876 return current_space_ != LAST_SPACE;
5880 ObjectIterator* SpaceIterator::next() {
5881 if (iterator_ != NULL) {
5884 // Move to the next space
5886 if (current_space_ > LAST_SPACE) {
5891 // Return iterator for the new current space.
5892 return CreateIterator();
5896 // Create an iterator for the space to iterate.
5897 ObjectIterator* SpaceIterator::CreateIterator() {
5898 DCHECK(iterator_ == NULL);
5900 switch (current_space_) {
5902 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5904 case OLD_POINTER_SPACE:
5906 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5908 case OLD_DATA_SPACE:
5909 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5912 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5915 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5918 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5920 case PROPERTY_CELL_SPACE:
5922 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5925 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5929 // Return the newly allocated iterator;
5930 DCHECK(iterator_ != NULL);
5935 class HeapObjectsFilter {
5937 virtual ~HeapObjectsFilter() {}
5938 virtual bool SkipObject(HeapObject* object) = 0;
5942 class UnreachableObjectsFilter : public HeapObjectsFilter {
5944 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5945 MarkReachableObjects();
5948 ~UnreachableObjectsFilter() {
5949 heap_->mark_compact_collector()->ClearMarkbits();
5952 bool SkipObject(HeapObject* object) {
5953 MarkBit mark_bit = Marking::MarkBitFrom(object);
5954 return !mark_bit.Get();
5958 class MarkingVisitor : public ObjectVisitor {
5960 MarkingVisitor() : marking_stack_(10) {}
5962 void VisitPointers(Object** start, Object** end) {
5963 for (Object** p = start; p < end; p++) {
5964 if (!(*p)->IsHeapObject()) continue;
5965 HeapObject* obj = HeapObject::cast(*p);
5966 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5967 if (!mark_bit.Get()) {
5969 marking_stack_.Add(obj);
5974 void TransitiveClosure() {
5975 while (!marking_stack_.is_empty()) {
5976 HeapObject* obj = marking_stack_.RemoveLast();
5982 List<HeapObject*> marking_stack_;
5985 void MarkReachableObjects() {
5986 MarkingVisitor visitor;
5987 heap_->IterateRoots(&visitor, VISIT_ALL);
5988 visitor.TransitiveClosure();
5992 DisallowHeapAllocation no_allocation_;
5996 HeapIterator::HeapIterator(Heap* heap)
5997 : make_heap_iterable_helper_(heap),
5998 no_heap_allocation_(),
6000 filtering_(HeapIterator::kNoFiltering),
6006 HeapIterator::HeapIterator(Heap* heap,
6007 HeapIterator::HeapObjectsFiltering filtering)
6008 : make_heap_iterable_helper_(heap),
6009 no_heap_allocation_(),
6011 filtering_(filtering),
6017 HeapIterator::~HeapIterator() { Shutdown(); }
6020 void HeapIterator::Init() {
6021 // Start the iteration.
6022 space_iterator_ = new SpaceIterator(heap_);
6023 switch (filtering_) {
6024 case kFilterUnreachable:
6025 filter_ = new UnreachableObjectsFilter(heap_);
6030 object_iterator_ = space_iterator_->next();
6034 void HeapIterator::Shutdown() {
6036 // Assert that in filtering mode we have iterated through all
6037 // objects. Otherwise, heap will be left in an inconsistent state.
6038 if (filtering_ != kNoFiltering) {
6039 DCHECK(object_iterator_ == NULL);
6042 // Make sure the last iterator is deallocated.
6043 delete space_iterator_;
6044 space_iterator_ = NULL;
6045 object_iterator_ = NULL;
6051 HeapObject* HeapIterator::next() {
6052 if (filter_ == NULL) return NextObject();
6054 HeapObject* obj = NextObject();
6055 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6060 HeapObject* HeapIterator::NextObject() {
6061 // No iterator means we are done.
6062 if (object_iterator_ == NULL) return NULL;
6064 if (HeapObject* obj = object_iterator_->next_object()) {
6065 // If the current iterator has more objects we are fine.
6068 // Go though the spaces looking for one that has objects.
6069 while (space_iterator_->has_next()) {
6070 object_iterator_ = space_iterator_->next();
6071 if (HeapObject* obj = object_iterator_->next_object()) {
6076 // Done with the last space.
6077 object_iterator_ = NULL;
6082 void HeapIterator::reset() {
6083 // Restart the iterator.
6091 Object* const PathTracer::kAnyGlobalObject = NULL;
6093 class PathTracer::MarkVisitor : public ObjectVisitor {
6095 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6096 void VisitPointers(Object** start, Object** end) {
6097 // Scan all HeapObject pointers in [start, end)
6098 for (Object** p = start; !tracer_->found() && (p < end); p++) {
6099 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
6104 PathTracer* tracer_;
6108 class PathTracer::UnmarkVisitor : public ObjectVisitor {
6110 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6111 void VisitPointers(Object** start, Object** end) {
6112 // Scan all HeapObject pointers in [start, end)
6113 for (Object** p = start; p < end; p++) {
6114 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
6119 PathTracer* tracer_;
6123 void PathTracer::VisitPointers(Object** start, Object** end) {
6124 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6125 // Visit all HeapObject pointers in [start, end)
6126 for (Object** p = start; !done && (p < end); p++) {
6127 if ((*p)->IsHeapObject()) {
6129 done = ((what_to_find_ == FIND_FIRST) && found_target_);
6135 void PathTracer::Reset() {
6136 found_target_ = false;
6137 object_stack_.Clear();
6141 void PathTracer::TracePathFrom(Object** root) {
6142 DCHECK((search_target_ == kAnyGlobalObject) ||
6143 search_target_->IsHeapObject());
6144 found_target_in_trace_ = false;
6147 MarkVisitor mark_visitor(this);
6148 MarkRecursively(root, &mark_visitor);
6150 UnmarkVisitor unmark_visitor(this);
6151 UnmarkRecursively(root, &unmark_visitor);
6157 static bool SafeIsNativeContext(HeapObject* obj) {
6158 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6162 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6163 if (!(*p)->IsHeapObject()) return;
6165 HeapObject* obj = HeapObject::cast(*p);
6167 MapWord map_word = obj->map_word();
6168 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
6170 if (found_target_in_trace_) return; // stop if target found
6171 object_stack_.Add(obj);
6172 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6173 (obj == search_target_)) {
6174 found_target_in_trace_ = true;
6175 found_target_ = true;
6179 bool is_native_context = SafeIsNativeContext(obj);
6182 Map* map = Map::cast(map_word.ToMap());
6184 MapWord marked_map_word =
6185 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6186 obj->set_map_word(marked_map_word);
6188 // Scan the object body.
6189 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6190 // This is specialized to scan Context's properly.
6192 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6194 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6195 Context::FIRST_WEAK_SLOT * kPointerSize);
6196 mark_visitor->VisitPointers(start, end);
6198 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6201 // Scan the map after the body because the body is a lot more interesting
6202 // when doing leak detection.
6203 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6205 if (!found_target_in_trace_) { // don't pop if found the target
6206 object_stack_.RemoveLast();
6211 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6212 if (!(*p)->IsHeapObject()) return;
6214 HeapObject* obj = HeapObject::cast(*p);
6216 MapWord map_word = obj->map_word();
6217 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
6219 MapWord unmarked_map_word =
6220 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6221 obj->set_map_word(unmarked_map_word);
6223 Map* map = Map::cast(unmarked_map_word.ToMap());
6225 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6227 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6231 void PathTracer::ProcessResults() {
6232 if (found_target_) {
6233 OFStream os(stdout);
6234 os << "=====================================\n"
6235 << "==== Path to object ====\n"
6236 << "=====================================\n\n";
6238 DCHECK(!object_stack_.is_empty());
6239 for (int i = 0; i < object_stack_.length(); i++) {
6240 if (i > 0) os << "\n |\n |\n V\n\n";
6241 object_stack_[i]->Print(os);
6243 os << "=====================================\n";
6248 // Triggers a depth-first traversal of reachable objects from one
6249 // given root object and finds a path to a specific heap object and
6251 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6252 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6253 tracer.VisitPointer(&root);
6257 // Triggers a depth-first traversal of reachable objects from roots
6258 // and finds a path to a specific heap object and prints it.
6259 void Heap::TracePathToObject(Object* target) {
6260 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6261 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6265 // Triggers a depth-first traversal of reachable objects from roots
6266 // and finds a path to any global object and prints it. Useful for
6267 // determining the source for leaks of global objects.
6268 void Heap::TracePathToGlobal() {
6269 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6271 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6276 void Heap::UpdateCumulativeGCStatistics(double duration,
6277 double spent_in_mutator,
6278 double marking_time) {
6279 if (FLAG_print_cumulative_gc_stat) {
6280 total_gc_time_ms_ += duration;
6281 max_gc_pause_ = Max(max_gc_pause_, duration);
6282 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6283 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6284 } else if (FLAG_trace_gc_verbose) {
6285 total_gc_time_ms_ += duration;
6288 marking_time_ += marking_time;
6292 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6293 DisallowHeapAllocation no_gc;
6294 // Uses only lower 32 bits if pointers are larger.
6295 uintptr_t addr_hash =
6296 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6297 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6301 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6302 DisallowHeapAllocation no_gc;
6303 int index = (Hash(map, name) & kHashMask);
6304 for (int i = 0; i < kEntriesPerBucket; i++) {
6305 Key& key = keys_[index + i];
6306 if ((key.map == *map) && key.name->Equals(*name)) {
6307 return field_offsets_[index + i];
6314 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6316 DisallowHeapAllocation no_gc;
6317 if (!name->IsUniqueName()) {
6318 if (!StringTable::InternalizeStringIfExists(
6319 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6323 // This cache is cleared only between mark compact passes, so we expect the
6324 // cache to only contain old space names.
6325 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6327 int index = (Hash(map, name) & kHashMask);
6328 // After a GC there will be free slots, so we use them in order (this may
6329 // help to get the most frequently used one in position 0).
6330 for (int i = 0; i < kEntriesPerBucket; i++) {
6331 Key& key = keys_[index];
6332 Object* free_entry_indicator = NULL;
6333 if (key.map == free_entry_indicator) {
6336 field_offsets_[index + i] = field_offset;
6340 // No free entry found in this bucket, so we move them all down one and
6341 // put the new entry at position zero.
6342 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6343 Key& key = keys_[index + i];
6344 Key& key2 = keys_[index + i - 1];
6346 field_offsets_[index + i] = field_offsets_[index + i - 1];
6349 // Write the new first entry.
6350 Key& key = keys_[index];
6353 field_offsets_[index] = field_offset;
6357 void KeyedLookupCache::Clear() {
6358 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6362 void DescriptorLookupCache::Clear() {
6363 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6367 void ExternalStringTable::CleanUp() {
6369 for (int i = 0; i < new_space_strings_.length(); ++i) {
6370 if (new_space_strings_[i] == heap_->the_hole_value()) {
6373 DCHECK(new_space_strings_[i]->IsExternalString());
6374 if (heap_->InNewSpace(new_space_strings_[i])) {
6375 new_space_strings_[last++] = new_space_strings_[i];
6377 old_space_strings_.Add(new_space_strings_[i]);
6380 new_space_strings_.Rewind(last);
6381 new_space_strings_.Trim();
6384 for (int i = 0; i < old_space_strings_.length(); ++i) {
6385 if (old_space_strings_[i] == heap_->the_hole_value()) {
6388 DCHECK(old_space_strings_[i]->IsExternalString());
6389 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6390 old_space_strings_[last++] = old_space_strings_[i];
6392 old_space_strings_.Rewind(last);
6393 old_space_strings_.Trim();
6395 if (FLAG_verify_heap) {
6402 void ExternalStringTable::TearDown() {
6403 for (int i = 0; i < new_space_strings_.length(); ++i) {
6404 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6406 new_space_strings_.Free();
6407 for (int i = 0; i < old_space_strings_.length(); ++i) {
6408 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6410 old_space_strings_.Free();
6414 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6415 chunk->set_next_chunk(chunks_queued_for_free_);
6416 chunks_queued_for_free_ = chunk;
6420 void Heap::FreeQueuedChunks() {
6421 if (chunks_queued_for_free_ == NULL) return;
6424 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6425 next = chunk->next_chunk();
6426 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6428 if (chunk->owner()->identity() == LO_SPACE) {
6429 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6430 // If FromAnyPointerAddress encounters a slot that belongs to a large
6431 // chunk queued for deletion it will fail to find the chunk because
6432 // it try to perform a search in the list of pages owned by of the large
6433 // object space and queued chunks were detached from that list.
6434 // To work around this we split large chunk into normal kPageSize aligned
6435 // pieces and initialize size, owner and flags field of every piece.
6436 // If FromAnyPointerAddress encounters a slot that belongs to one of
6437 // these smaller pieces it will treat it as a slot on a normal Page.
6438 Address chunk_end = chunk->address() + chunk->size();
6439 MemoryChunk* inner =
6440 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6441 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6442 while (inner <= inner_last) {
6443 // Size of a large chunk is always a multiple of
6444 // OS::AllocateAlignment() so there is always
6445 // enough space for a fake MemoryChunk header.
6446 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6447 // Guard against overflow.
6448 if (area_end < inner->address()) area_end = chunk_end;
6449 inner->SetArea(inner->address(), area_end);
6450 inner->set_size(Page::kPageSize);
6451 inner->set_owner(lo_space());
6452 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6453 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6457 isolate_->heap()->store_buffer()->Compact();
6458 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6459 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6460 next = chunk->next_chunk();
6461 isolate_->memory_allocator()->Free(chunk);
6463 chunks_queued_for_free_ = NULL;
6467 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6468 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6469 // Tag the page pointer to make it findable in the dump file.
6471 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6473 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6475 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6476 reinterpret_cast<Address>(p);
6477 remembered_unmapped_pages_index_++;
6478 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6482 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6483 memset(object_counts_, 0, sizeof(object_counts_));
6484 memset(object_sizes_, 0, sizeof(object_sizes_));
6485 if (clear_last_time_stats) {
6486 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6487 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6492 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6495 void Heap::CheckpointObjectStats() {
6496 base::LockGuard<base::Mutex> lock_guard(
6497 checkpoint_object_stats_mutex.Pointer());
6498 Counters* counters = isolate()->counters();
6499 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6500 counters->count_of_##name()->Increment( \
6501 static_cast<int>(object_counts_[name])); \
6502 counters->count_of_##name()->Decrement( \
6503 static_cast<int>(object_counts_last_time_[name])); \
6504 counters->size_of_##name()->Increment( \
6505 static_cast<int>(object_sizes_[name])); \
6506 counters->size_of_##name()->Decrement( \
6507 static_cast<int>(object_sizes_last_time_[name]));
6508 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6509 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6511 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6512 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6513 counters->count_of_CODE_TYPE_##name()->Increment( \
6514 static_cast<int>(object_counts_[index])); \
6515 counters->count_of_CODE_TYPE_##name()->Decrement( \
6516 static_cast<int>(object_counts_last_time_[index])); \
6517 counters->size_of_CODE_TYPE_##name()->Increment( \
6518 static_cast<int>(object_sizes_[index])); \
6519 counters->size_of_CODE_TYPE_##name()->Decrement( \
6520 static_cast<int>(object_sizes_last_time_[index]));
6521 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6522 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6523 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6524 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6525 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6526 static_cast<int>(object_counts_[index])); \
6527 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6528 static_cast<int>(object_counts_last_time_[index])); \
6529 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6530 static_cast<int>(object_sizes_[index])); \
6531 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6532 static_cast<int>(object_sizes_last_time_[index]));
6533 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6534 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6535 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6537 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6538 counters->count_of_CODE_AGE_##name()->Increment( \
6539 static_cast<int>(object_counts_[index])); \
6540 counters->count_of_CODE_AGE_##name()->Decrement( \
6541 static_cast<int>(object_counts_last_time_[index])); \
6542 counters->size_of_CODE_AGE_##name()->Increment( \
6543 static_cast<int>(object_sizes_[index])); \
6544 counters->size_of_CODE_AGE_##name()->Decrement( \
6545 static_cast<int>(object_sizes_last_time_[index]));
6546 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6547 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6549 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6550 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6554 } // namespace v8::internal