1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/once.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/compilation-cache.h"
13 #include "src/conversions.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/deoptimizer.h"
17 #include "src/global-handles.h"
18 #include "src/heap-profiler.h"
19 #include "src/incremental-marking.h"
20 #include "src/isolate-inl.h"
21 #include "src/mark-compact.h"
22 #include "src/natives.h"
23 #include "src/objects-visiting.h"
24 #include "src/objects-visiting-inl.h"
25 #include "src/runtime-profiler.h"
26 #include "src/scopeinfo.h"
27 #include "src/snapshot.h"
28 #include "src/store-buffer.h"
29 #include "src/utils/random-number-generator.h"
30 #include "src/utils.h"
31 #include "src/v8threads.h"
32 #include "src/vm-state-inl.h"
33 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
34 #include "src/regexp-macro-assembler.h"
35 #include "src/arm/regexp-macro-assembler-arm.h"
37 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h"
39 #include "src/mips/regexp-macro-assembler-mips.h"
47 : amount_of_external_allocated_memory_(0),
48 amount_of_external_allocated_memory_at_last_global_gc_(0),
51 // semispace_size_ should be a power of 2 and old_generation_size_ should be
52 // a multiple of Page::kPageSize.
53 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
54 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
55 initial_semispace_size_(Page::kPageSize),
56 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
57 max_executable_size_(256ul * (kPointerSize / 4) * MB),
58 // Variables set based on semispace_size_ and old_generation_size_ in
60 // Will be 4 * reserved_semispace_size_ to ensure that young
61 // generation can be aligned to its size.
62 maximum_committed_(0),
63 survived_since_last_expansion_(0),
65 always_allocate_scope_depth_(0),
66 linear_allocation_scope_depth_(0),
67 contexts_disposed_(0),
69 flush_monomorphic_ics_(false),
70 scan_on_scavenge_pages_(0),
72 old_pointer_space_(NULL),
73 old_data_space_(NULL),
77 property_cell_space_(NULL),
80 gc_post_processing_depth_(0),
83 remembered_unmapped_pages_index_(0),
84 unflattened_strings_length_(0),
86 allocation_timeout_(0),
88 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
89 old_gen_exhausted_(false),
90 inline_allocation_disabled_(false),
91 store_buffer_rebuilder_(store_buffer()),
93 gc_safe_size_of_old_object_(NULL),
94 total_regexp_code_generated_(0),
96 high_survival_rate_period_length_(0),
97 promoted_objects_size_(0),
99 semi_space_copied_object_size_(0),
100 semi_space_copied_rate_(0),
101 maximum_size_scavenges_(0),
103 total_gc_time_ms_(0.0),
104 max_alive_after_gc_(0),
105 min_in_mutator_(kMaxInt),
106 alive_after_last_gc_(0),
107 last_gc_end_timestamp_(0.0),
110 mark_compact_collector_(this),
113 incremental_marking_(this),
114 number_idle_notifications_(0),
115 last_idle_notification_gc_count_(0),
116 last_idle_notification_gc_count_init_(false),
117 mark_sweeps_since_idle_round_started_(0),
118 gc_count_at_last_idle_gc_(0),
119 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
120 full_codegen_bytes_generated_(0),
121 crankshaft_codegen_bytes_generated_(0),
122 gcs_since_last_deopt_(0),
124 no_weak_object_verification_scope_depth_(0),
126 allocation_sites_scratchpad_length_(0),
127 promotion_queue_(this),
129 external_string_table_(this),
130 chunks_queued_for_free_(NULL),
131 gc_callbacks_depth_(0) {
132 // Allow build-time customization of the max semispace size. Building
133 // V8 with snapshots and a non-default max semispace size is much
134 // easier if you can define it as part of the build environment.
135 #if defined(V8_MAX_SEMISPACE_SIZE)
136 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
139 // Ensure old_generation_size_ is a multiple of kPageSize.
140 ASSERT(MB >= Page::kPageSize);
142 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
143 set_native_contexts_list(NULL);
144 set_array_buffers_list(Smi::FromInt(0));
145 set_allocation_sites_list(Smi::FromInt(0));
146 set_encountered_weak_collections(Smi::FromInt(0));
147 // Put a dummy entry in the remembered pages so we can find the list the
148 // minidump even if there are no real unmapped pages.
149 RememberUnmappedPage(NULL, false);
151 ClearObjectStats(true);
155 intptr_t Heap::Capacity() {
156 if (!HasBeenSetUp()) return 0;
158 return new_space_.Capacity() +
159 old_pointer_space_->Capacity() +
160 old_data_space_->Capacity() +
161 code_space_->Capacity() +
162 map_space_->Capacity() +
163 cell_space_->Capacity() +
164 property_cell_space_->Capacity();
168 intptr_t Heap::CommittedMemory() {
169 if (!HasBeenSetUp()) return 0;
171 return new_space_.CommittedMemory() +
172 old_pointer_space_->CommittedMemory() +
173 old_data_space_->CommittedMemory() +
174 code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() +
176 cell_space_->CommittedMemory() +
177 property_cell_space_->CommittedMemory() +
182 size_t Heap::CommittedPhysicalMemory() {
183 if (!HasBeenSetUp()) return 0;
185 return new_space_.CommittedPhysicalMemory() +
186 old_pointer_space_->CommittedPhysicalMemory() +
187 old_data_space_->CommittedPhysicalMemory() +
188 code_space_->CommittedPhysicalMemory() +
189 map_space_->CommittedPhysicalMemory() +
190 cell_space_->CommittedPhysicalMemory() +
191 property_cell_space_->CommittedPhysicalMemory() +
192 lo_space_->CommittedPhysicalMemory();
196 intptr_t Heap::CommittedMemoryExecutable() {
197 if (!HasBeenSetUp()) return 0;
199 return isolate()->memory_allocator()->SizeExecutable();
203 void Heap::UpdateMaximumCommitted() {
204 if (!HasBeenSetUp()) return;
206 intptr_t current_committed_memory = CommittedMemory();
207 if (current_committed_memory > maximum_committed_) {
208 maximum_committed_ = current_committed_memory;
213 intptr_t Heap::Available() {
214 if (!HasBeenSetUp()) return 0;
216 return new_space_.Available() +
217 old_pointer_space_->Available() +
218 old_data_space_->Available() +
219 code_space_->Available() +
220 map_space_->Available() +
221 cell_space_->Available() +
222 property_cell_space_->Available();
226 bool Heap::HasBeenSetUp() {
227 return old_pointer_space_ != NULL &&
228 old_data_space_ != NULL &&
229 code_space_ != NULL &&
230 map_space_ != NULL &&
231 cell_space_ != NULL &&
232 property_cell_space_ != NULL &&
237 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
238 if (IntrusiveMarking::IsMarked(object)) {
239 return IntrusiveMarking::SizeOfMarkedObject(object);
241 return object->SizeFromMap(object->map());
245 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
246 const char** reason) {
247 // Is global GC requested?
248 if (space != NEW_SPACE) {
249 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
250 *reason = "GC in old space requested";
251 return MARK_COMPACTOR;
254 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
255 *reason = "GC in old space forced by flags";
256 return MARK_COMPACTOR;
259 // Is enough data promoted to justify a global GC?
260 if (OldGenerationAllocationLimitReached()) {
261 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
262 *reason = "promotion limit reached";
263 return MARK_COMPACTOR;
266 // Have allocation in OLD and LO failed?
267 if (old_gen_exhausted_) {
268 isolate_->counters()->
269 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
270 *reason = "old generations exhausted";
271 return MARK_COMPACTOR;
274 // Is there enough space left in OLD to guarantee that a scavenge can
277 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
278 // for object promotion. It counts only the bytes that the memory
279 // allocator has not yet allocated from the OS and assigned to any space,
280 // and does not count available bytes already in the old space or code
281 // space. Undercounting is safe---we may get an unrequested full GC when
282 // a scavenge would have succeeded.
283 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
284 isolate_->counters()->
285 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
286 *reason = "scavenge might not succeed";
287 return MARK_COMPACTOR;
296 // TODO(1238405): Combine the infrastructure for --heap-stats and
297 // --log-gc to avoid the complicated preprocessor and flag testing.
298 void Heap::ReportStatisticsBeforeGC() {
299 // Heap::ReportHeapStatistics will also log NewSpace statistics when
300 // compiled --log-gc is set. The following logic is used to avoid
303 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
304 if (FLAG_heap_stats) {
305 ReportHeapStatistics("Before GC");
306 } else if (FLAG_log_gc) {
307 new_space_.ReportStatistics();
309 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
312 new_space_.CollectStatistics();
313 new_space_.ReportStatistics();
314 new_space_.ClearHistograms();
320 void Heap::PrintShortHeapStatistics() {
321 if (!FLAG_trace_gc_verbose) return;
322 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
323 ", available: %6" V8_PTR_PREFIX "d KB\n",
324 isolate_->memory_allocator()->Size() / KB,
325 isolate_->memory_allocator()->Available() / KB);
326 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
327 ", available: %6" V8_PTR_PREFIX "d KB"
328 ", committed: %6" V8_PTR_PREFIX "d KB\n",
329 new_space_.Size() / KB,
330 new_space_.Available() / KB,
331 new_space_.CommittedMemory() / KB);
332 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
333 ", available: %6" V8_PTR_PREFIX "d KB"
334 ", committed: %6" V8_PTR_PREFIX "d KB\n",
335 old_pointer_space_->SizeOfObjects() / KB,
336 old_pointer_space_->Available() / KB,
337 old_pointer_space_->CommittedMemory() / KB);
338 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
339 ", available: %6" V8_PTR_PREFIX "d KB"
340 ", committed: %6" V8_PTR_PREFIX "d KB\n",
341 old_data_space_->SizeOfObjects() / KB,
342 old_data_space_->Available() / KB,
343 old_data_space_->CommittedMemory() / KB);
344 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
345 ", available: %6" V8_PTR_PREFIX "d KB"
346 ", committed: %6" V8_PTR_PREFIX "d KB\n",
347 code_space_->SizeOfObjects() / KB,
348 code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
351 ", available: %6" V8_PTR_PREFIX "d KB"
352 ", committed: %6" V8_PTR_PREFIX "d KB\n",
353 map_space_->SizeOfObjects() / KB,
354 map_space_->Available() / KB,
355 map_space_->CommittedMemory() / KB);
356 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
357 ", available: %6" V8_PTR_PREFIX "d KB"
358 ", committed: %6" V8_PTR_PREFIX "d KB\n",
359 cell_space_->SizeOfObjects() / KB,
360 cell_space_->Available() / KB,
361 cell_space_->CommittedMemory() / KB);
362 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
363 ", available: %6" V8_PTR_PREFIX "d KB"
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 property_cell_space_->SizeOfObjects() / KB,
366 property_cell_space_->Available() / KB,
367 property_cell_space_->CommittedMemory() / KB);
368 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
369 ", available: %6" V8_PTR_PREFIX "d KB"
370 ", committed: %6" V8_PTR_PREFIX "d KB\n",
371 lo_space_->SizeOfObjects() / KB,
372 lo_space_->Available() / KB,
373 lo_space_->CommittedMemory() / KB);
374 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
375 ", available: %6" V8_PTR_PREFIX "d KB"
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 this->SizeOfObjects() / KB,
378 this->Available() / KB,
379 this->CommittedMemory() / KB);
380 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
381 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
382 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
386 // TODO(1238405): Combine the infrastructure for --heap-stats and
387 // --log-gc to avoid the complicated preprocessor and flag testing.
388 void Heap::ReportStatisticsAfterGC() {
389 // Similar to the before GC, we use some complicated logic to ensure that
390 // NewSpace statistics are logged exactly once when --log-gc is turned on.
392 if (FLAG_heap_stats) {
393 new_space_.CollectStatistics();
394 ReportHeapStatistics("After GC");
395 } else if (FLAG_log_gc) {
396 new_space_.ReportStatistics();
399 if (FLAG_log_gc) new_space_.ReportStatistics();
404 void Heap::GarbageCollectionPrologue() {
405 { AllowHeapAllocation for_the_first_part_of_prologue;
406 ClearJSFunctionResultCaches();
408 unflattened_strings_length_ = 0;
410 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
411 mark_compact_collector()->EnableCodeFlushing(true);
415 if (FLAG_verify_heap) {
421 // Reset GC statistics.
422 promoted_objects_size_ = 0;
423 semi_space_copied_object_size_ = 0;
425 UpdateMaximumCommitted();
428 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
430 if (FLAG_gc_verbose) Print();
432 ReportStatisticsBeforeGC();
435 store_buffer()->GCPrologue();
437 if (isolate()->concurrent_osr_enabled()) {
438 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
441 if (new_space_.IsAtMaximumCapacity()) {
442 maximum_size_scavenges_++;
444 maximum_size_scavenges_ = 0;
446 CheckNewSpaceExpansionCriteria();
450 intptr_t Heap::SizeOfObjects() {
452 AllSpaces spaces(this);
453 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454 total += space->SizeOfObjects();
460 void Heap::ClearAllICsByKind(Code::Kind kind) {
461 HeapObjectIterator it(code_space());
463 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
464 Code* code = Code::cast(object);
465 Code::Kind current_kind = code->kind();
466 if (current_kind == Code::FUNCTION ||
467 current_kind == Code::OPTIMIZED_FUNCTION) {
468 code->ClearInlineCaches(kind);
474 void Heap::RepairFreeListsAfterBoot() {
475 PagedSpaces spaces(this);
476 for (PagedSpace* space = spaces.next();
478 space = spaces.next()) {
479 space->RepairFreeListsAfterBoot();
484 void Heap::ProcessPretenuringFeedback() {
485 if (FLAG_allocation_site_pretenuring) {
486 int tenure_decisions = 0;
487 int dont_tenure_decisions = 0;
488 int allocation_mementos_found = 0;
489 int allocation_sites = 0;
490 int active_allocation_sites = 0;
492 // If the scratchpad overflowed, we have to iterate over the allocation
494 // TODO(hpayer): We iterate over the whole list of allocation sites when
495 // we grew to the maximum semi-space size to deopt maybe tenured
496 // allocation sites. We could hold the maybe tenured allocation sites
497 // in a seperate data structure if this is a performance problem.
498 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
499 bool use_scratchpad =
500 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
501 !deopt_maybe_tenured;
504 Object* list_element = allocation_sites_list();
505 bool trigger_deoptimization = false;
506 bool maximum_size_scavenge = MaximumSizeScavenge();
507 while (use_scratchpad ?
508 i < allocation_sites_scratchpad_length_ :
509 list_element->IsAllocationSite()) {
510 AllocationSite* site = use_scratchpad ?
511 AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
512 AllocationSite::cast(list_element);
513 allocation_mementos_found += site->memento_found_count();
514 if (site->memento_found_count() > 0) {
515 active_allocation_sites++;
516 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
517 trigger_deoptimization = true;
519 if (site->GetPretenureMode() == TENURED) {
522 dont_tenure_decisions++;
527 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
528 site->set_deopt_dependent_code(true);
529 trigger_deoptimization = true;
532 if (use_scratchpad) {
535 list_element = site->weak_next();
539 if (trigger_deoptimization) {
540 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
543 FlushAllocationSitesScratchpad();
545 if (FLAG_trace_pretenuring_statistics &&
546 (allocation_mementos_found > 0 ||
547 tenure_decisions > 0 ||
548 dont_tenure_decisions > 0)) {
549 PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
550 "#mementos, #tenure decisions, #donttenure decisions) "
551 "(%s, %d, %d, %d, %d, %d)\n",
552 use_scratchpad ? "use scratchpad" : "use list",
554 active_allocation_sites,
555 allocation_mementos_found,
557 dont_tenure_decisions);
563 void Heap::DeoptMarkedAllocationSites() {
564 // TODO(hpayer): If iterating over the allocation sites list becomes a
565 // performance issue, use a cache heap data structure instead (similar to the
566 // allocation sites scratchpad).
567 Object* list_element = allocation_sites_list();
568 while (list_element->IsAllocationSite()) {
569 AllocationSite* site = AllocationSite::cast(list_element);
570 if (site->deopt_dependent_code()) {
571 site->dependent_code()->MarkCodeForDeoptimization(
573 DependentCode::kAllocationSiteTenuringChangedGroup);
574 site->set_deopt_dependent_code(false);
576 list_element = site->weak_next();
578 Deoptimizer::DeoptimizeMarkedCode(isolate_);
582 void Heap::GarbageCollectionEpilogue() {
583 store_buffer()->GCEpilogue();
585 // In release mode, we only zap the from space under heap verification.
586 if (Heap::ShouldZapGarbage()) {
590 // Process pretenuring feedback and update allocation sites.
591 ProcessPretenuringFeedback();
594 if (FLAG_verify_heap) {
599 AllowHeapAllocation for_the_rest_of_the_epilogue;
602 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
603 if (FLAG_print_handles) PrintHandles();
604 if (FLAG_gc_verbose) Print();
605 if (FLAG_code_stats) ReportCodeStatistics("After GC");
607 if (FLAG_deopt_every_n_garbage_collections > 0) {
608 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
609 // the topmost optimized frame can be deoptimized safely, because it
610 // might not have a lazy bailout point right after its current PC.
611 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
612 Deoptimizer::DeoptimizeAll(isolate());
613 gcs_since_last_deopt_ = 0;
617 UpdateMaximumCommitted();
619 isolate_->counters()->alive_after_last_gc()->Set(
620 static_cast<int>(SizeOfObjects()));
622 isolate_->counters()->string_table_capacity()->Set(
623 string_table()->Capacity());
624 isolate_->counters()->number_of_symbols()->Set(
625 string_table()->NumberOfElements());
627 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
628 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
629 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
630 (crankshaft_codegen_bytes_generated_
631 + full_codegen_bytes_generated_)));
634 if (CommittedMemory() > 0) {
635 isolate_->counters()->external_fragmentation_total()->AddSample(
636 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
638 isolate_->counters()->heap_fraction_new_space()->
639 AddSample(static_cast<int>(
640 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
641 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
643 (old_pointer_space()->CommittedMemory() * 100.0) /
645 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
647 (old_data_space()->CommittedMemory() * 100.0) /
649 isolate_->counters()->heap_fraction_code_space()->
650 AddSample(static_cast<int>(
651 (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
652 isolate_->counters()->heap_fraction_map_space()->AddSample(
654 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
655 isolate_->counters()->heap_fraction_cell_space()->AddSample(
657 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
658 isolate_->counters()->heap_fraction_property_cell_space()->
659 AddSample(static_cast<int>(
660 (property_cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_lo_space()->
663 AddSample(static_cast<int>(
664 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
666 isolate_->counters()->heap_sample_total_committed()->AddSample(
667 static_cast<int>(CommittedMemory() / KB));
668 isolate_->counters()->heap_sample_total_used()->AddSample(
669 static_cast<int>(SizeOfObjects() / KB));
670 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
671 static_cast<int>(map_space()->CommittedMemory() / KB));
672 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
673 static_cast<int>(cell_space()->CommittedMemory() / KB));
674 isolate_->counters()->
675 heap_sample_property_cell_space_committed()->
676 AddSample(static_cast<int>(
677 property_cell_space()->CommittedMemory() / KB));
678 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
679 static_cast<int>(code_space()->CommittedMemory() / KB));
681 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
682 static_cast<int>(MaximumCommittedMemory() / KB));
685 #define UPDATE_COUNTERS_FOR_SPACE(space) \
686 isolate_->counters()->space##_bytes_available()->Set( \
687 static_cast<int>(space()->Available())); \
688 isolate_->counters()->space##_bytes_committed()->Set( \
689 static_cast<int>(space()->CommittedMemory())); \
690 isolate_->counters()->space##_bytes_used()->Set( \
691 static_cast<int>(space()->SizeOfObjects()));
692 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
693 if (space()->CommittedMemory() > 0) { \
694 isolate_->counters()->external_fragmentation_##space()->AddSample( \
695 static_cast<int>(100 - \
696 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
715 ReportStatisticsAfterGC();
717 isolate_->debug()->AfterGarbageCollection();
719 // Remember the last top pointer so that we can later find out
720 // whether we allocated in new space since the last GC.
721 new_space_top_after_last_gc_ = new_space()->top();
725 void Heap::CollectAllGarbage(int flags,
726 const char* gc_reason,
727 const v8::GCCallbackFlags gc_callback_flags) {
728 // Since we are ignoring the return value, the exact choice of space does
729 // not matter, so long as we do not specify NEW_SPACE, which would not
731 mark_compact_collector_.SetFlags(flags);
732 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
733 mark_compact_collector_.SetFlags(kNoGCFlags);
737 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
738 // Since we are ignoring the return value, the exact choice of space does
739 // not matter, so long as we do not specify NEW_SPACE, which would not
741 // Major GC would invoke weak handle callbacks on weakly reachable
742 // handles, but won't collect weakly reachable objects until next
743 // major GC. Therefore if we collect aggressively and weak handle callback
744 // has been invoked, we rerun major GC to release objects which become
746 // Note: as weak callbacks can execute arbitrary code, we cannot
747 // hope that eventually there will be no weak callbacks invocations.
748 // Therefore stop recollecting after several attempts.
749 if (isolate()->concurrent_recompilation_enabled()) {
750 // The optimizing compiler may be unnecessarily holding on to memory.
751 DisallowHeapAllocation no_recursive_gc;
752 isolate()->optimizing_compiler_thread()->Flush();
754 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
755 kReduceMemoryFootprintMask);
756 isolate_->compilation_cache()->Clear();
757 const int kMaxNumberOfAttempts = 7;
758 const int kMinNumberOfAttempts = 2;
759 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
760 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
761 attempt + 1 >= kMinNumberOfAttempts) {
765 mark_compact_collector()->SetFlags(kNoGCFlags);
768 incremental_marking()->UncommitMarkingDeque();
772 void Heap::EnsureFillerObjectAtTop() {
773 // There may be an allocation memento behind every object in new space.
774 // If we evacuate a not full new space or if we are on the last page of
775 // the new space, then there may be uninitialized memory behind the top
776 // pointer of the new space page. We store a filler object there to
777 // identify the unused space.
778 Address from_top = new_space_.top();
779 Address from_limit = new_space_.limit();
780 if (from_top < from_limit) {
781 int remaining_in_page = static_cast<int>(from_limit - from_top);
782 CreateFillerObjectAt(from_top, remaining_in_page);
787 bool Heap::CollectGarbage(GarbageCollector collector,
788 const char* gc_reason,
789 const char* collector_reason,
790 const v8::GCCallbackFlags gc_callback_flags) {
791 // The VM is in the GC state until exiting this function.
792 VMState<GC> state(isolate_);
795 // Reset the allocation timeout to the GC interval, but make sure to
796 // allow at least a few allocations after a collection. The reason
797 // for this is that we have a lot of allocation sequences and we
798 // assume that a garbage collection will allow the subsequent
799 // allocation attempts to go through.
800 allocation_timeout_ = Max(6, FLAG_gc_interval);
803 EnsureFillerObjectAtTop();
805 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
806 if (FLAG_trace_incremental_marking) {
807 PrintF("[IncrementalMarking] Scavenge during marking.\n");
811 if (collector == MARK_COMPACTOR &&
812 !mark_compact_collector()->abort_incremental_marking() &&
813 !incremental_marking()->IsStopped() &&
814 !incremental_marking()->should_hurry() &&
815 FLAG_incremental_marking_steps) {
816 // Make progress in incremental marking.
817 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
818 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
819 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
820 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
821 if (FLAG_trace_incremental_marking) {
822 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
824 collector = SCAVENGER;
825 collector_reason = "incremental marking delaying mark-sweep";
829 bool next_gc_likely_to_collect_more = false;
831 { GCTracer tracer(this, gc_reason, collector_reason);
832 ASSERT(AllowHeapAllocation::IsAllowed());
833 DisallowHeapAllocation no_allocation_during_gc;
834 GarbageCollectionPrologue();
835 // The GC count was incremented in the prologue. Tell the tracer about
837 tracer.set_gc_count(gc_count_);
839 // Tell the tracer which collector we've selected.
840 tracer.set_collector(collector);
843 HistogramTimerScope histogram_timer_scope(
844 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
845 : isolate_->counters()->gc_compactor());
846 next_gc_likely_to_collect_more =
847 PerformGarbageCollection(collector, &tracer, gc_callback_flags);
850 GarbageCollectionEpilogue();
853 // Start incremental marking for the next cycle. The heap snapshot
854 // generator needs incremental marking to stay off after it aborted.
855 if (!mark_compact_collector()->abort_incremental_marking() &&
856 incremental_marking()->IsStopped() &&
857 incremental_marking()->WorthActivating() &&
858 NextGCIsLikelyToBeFull()) {
859 incremental_marking()->Start();
862 return next_gc_likely_to_collect_more;
866 int Heap::NotifyContextDisposed() {
867 if (isolate()->concurrent_recompilation_enabled()) {
868 // Flush the queued recompilation tasks.
869 isolate()->optimizing_compiler_thread()->Flush();
871 flush_monomorphic_ics_ = true;
873 return ++contexts_disposed_;
877 void Heap::MoveElements(FixedArray* array,
881 if (len == 0) return;
883 ASSERT(array->map() != fixed_cow_array_map());
884 Object** dst_objects = array->data_start() + dst_index;
885 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
886 if (!InNewSpace(array)) {
887 for (int i = 0; i < len; i++) {
888 // TODO(hpayer): check store buffer for entries
889 if (InNewSpace(dst_objects[i])) {
890 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
894 incremental_marking()->RecordWrites(array);
899 // Helper class for verifying the string table.
900 class StringTableVerifier : public ObjectVisitor {
902 void VisitPointers(Object** start, Object** end) {
903 // Visit all HeapObject pointers in [start, end).
904 for (Object** p = start; p < end; p++) {
905 if ((*p)->IsHeapObject()) {
906 // Check that the string is actually internalized.
907 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
908 (*p)->IsInternalizedString());
915 static void VerifyStringTable(Heap* heap) {
916 StringTableVerifier verifier;
917 heap->string_table()->IterateElements(&verifier);
919 #endif // VERIFY_HEAP
922 static bool AbortIncrementalMarkingAndCollectGarbage(
924 AllocationSpace space,
925 const char* gc_reason = NULL) {
926 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
927 bool result = heap->CollectGarbage(space, gc_reason);
928 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
933 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
934 bool gc_performed = true;
936 static const int kThreshold = 20;
937 while (gc_performed && counter++ < kThreshold) {
938 gc_performed = false;
939 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
940 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
941 if (sizes[space] != 0) {
942 AllocationResult allocation;
943 if (space == NEW_SPACE) {
944 allocation = new_space()->AllocateRaw(sizes[space]);
946 allocation = paged_space(space)->AllocateRaw(sizes[space]);
949 if (!allocation.To(&node)) {
950 if (space == NEW_SPACE) {
951 Heap::CollectGarbage(NEW_SPACE,
952 "failed to reserve space in the new space");
954 AbortIncrementalMarkingAndCollectGarbage(
956 static_cast<AllocationSpace>(space),
957 "failed to reserve space in paged space");
962 // Mark with a free list node, in case we have a GC before
964 node->set_size(this, sizes[space]);
965 locations_out[space] = node->address();
972 // Failed to reserve the space after several attempts.
973 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
978 void Heap::EnsureFromSpaceIsCommitted() {
979 if (new_space_.CommitFromSpaceIfNeeded()) return;
981 // Committing memory to from space failed.
982 // Memory is exhausted and we will die.
983 V8::FatalProcessOutOfMemory("Committing semi space failed.");
987 void Heap::ClearJSFunctionResultCaches() {
988 if (isolate_->bootstrapper()->IsActive()) return;
990 Object* context = native_contexts_list();
991 while (!context->IsUndefined()) {
992 // Get the caches for this context. GC can happen when the context
993 // is not fully initialized, so the caches can be undefined.
994 Object* caches_or_undefined =
995 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
996 if (!caches_or_undefined->IsUndefined()) {
997 FixedArray* caches = FixedArray::cast(caches_or_undefined);
999 int length = caches->length();
1000 for (int i = 0; i < length; i++) {
1001 JSFunctionResultCache::cast(caches->get(i))->Clear();
1004 // Get the next context:
1005 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1010 void Heap::ClearNormalizedMapCaches() {
1011 if (isolate_->bootstrapper()->IsActive() &&
1012 !incremental_marking()->IsMarking()) {
1016 Object* context = native_contexts_list();
1017 while (!context->IsUndefined()) {
1018 // GC can happen when the context is not fully initialized,
1019 // so the cache can be undefined.
1021 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1022 if (!cache->IsUndefined()) {
1023 NormalizedMapCache::cast(cache)->Clear();
1025 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1030 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1031 if (start_new_space_size == 0) return;
1034 (static_cast<double>(promoted_objects_size_) /
1035 static_cast<double>(start_new_space_size) * 100);
1037 semi_space_copied_rate_ =
1038 (static_cast<double>(semi_space_copied_object_size_) /
1039 static_cast<double>(start_new_space_size) * 100);
1041 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1043 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1044 high_survival_rate_period_length_++;
1046 high_survival_rate_period_length_ = 0;
1050 bool Heap::PerformGarbageCollection(
1051 GarbageCollector collector,
1053 const v8::GCCallbackFlags gc_callback_flags) {
1054 int freed_global_handles = 0;
1056 if (collector != SCAVENGER) {
1057 PROFILE(isolate_, CodeMovingGCEvent());
1061 if (FLAG_verify_heap) {
1062 VerifyStringTable(this);
1067 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1069 { GCCallbacksScope scope(this);
1070 if (scope.CheckReenter()) {
1071 AllowHeapAllocation allow_allocation;
1072 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1073 VMState<EXTERNAL> state(isolate_);
1074 HandleScope handle_scope(isolate_);
1075 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1079 EnsureFromSpaceIsCommitted();
1081 int start_new_space_size = Heap::new_space()->SizeAsInt();
1083 if (IsHighSurvivalRate()) {
1084 // We speed up the incremental marker if it is running so that it
1085 // does not fall behind the rate of promotion, which would cause a
1086 // constantly growing old space.
1087 incremental_marking()->NotifyOfHighPromotionRate();
1090 if (collector == MARK_COMPACTOR) {
1091 // Perform mark-sweep with optional compaction.
1092 MarkCompact(tracer);
1093 sweep_generation_++;
1094 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1095 // allocates and triggers GC. The real limit is set at after
1096 // PostGarbageCollectionProcessing.
1097 old_generation_allocation_limit_ =
1098 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1099 old_gen_exhausted_ = false;
1106 UpdateSurvivalStatistics(start_new_space_size);
1108 isolate_->counters()->objs_since_last_young()->Set(0);
1110 // Callbacks that fire after this point might trigger nested GCs and
1111 // restart incremental marking, the assertion can't be moved down.
1112 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1114 gc_post_processing_depth_++;
1115 { AllowHeapAllocation allow_allocation;
1116 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1117 freed_global_handles =
1118 isolate_->global_handles()->PostGarbageCollectionProcessing(
1121 gc_post_processing_depth_--;
1123 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1125 // Update relocatables.
1126 Relocatable::PostGarbageCollectionProcessing(isolate_);
1128 if (collector == MARK_COMPACTOR) {
1129 // Register the amount of external allocated memory.
1130 amount_of_external_allocated_memory_at_last_global_gc_ =
1131 amount_of_external_allocated_memory_;
1132 old_generation_allocation_limit_ =
1133 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1134 freed_global_handles);
1137 { GCCallbacksScope scope(this);
1138 if (scope.CheckReenter()) {
1139 AllowHeapAllocation allow_allocation;
1140 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1141 VMState<EXTERNAL> state(isolate_);
1142 HandleScope handle_scope(isolate_);
1143 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1148 if (FLAG_verify_heap) {
1149 VerifyStringTable(this);
1153 return freed_global_handles > 0;
1157 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1158 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1160 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_prologue_callbacks_[i].callback);
1164 callback(gc_type, flags);
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1174 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1175 GCCallbackFlags gc_callback_flags) {
1176 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1177 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1178 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1179 v8::GCPrologueCallback callback =
1180 reinterpret_cast<v8::GCPrologueCallback>(
1181 gc_epilogue_callbacks_[i].callback);
1182 callback(gc_type, gc_callback_flags);
1184 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1185 gc_epilogue_callbacks_[i].callback(
1186 isolate, gc_type, gc_callback_flags);
1193 void Heap::MarkCompact(GCTracer* tracer) {
1194 gc_state_ = MARK_COMPACT;
1195 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1197 uint64_t size_of_objects_before_gc = SizeOfObjects();
1199 mark_compact_collector_.Prepare(tracer);
1202 tracer->set_full_gc_count(ms_count_);
1204 MarkCompactPrologue();
1206 mark_compact_collector_.CollectGarbage();
1208 LOG(isolate_, ResourceEvent("markcompact", "end"));
1210 gc_state_ = NOT_IN_GC;
1212 isolate_->counters()->objs_since_last_full()->Set(0);
1214 flush_monomorphic_ics_ = false;
1216 if (FLAG_allocation_site_pretenuring) {
1217 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1222 void Heap::MarkCompactPrologue() {
1223 // At any old GC clear the keyed lookup cache to enable collection of unused
1225 isolate_->keyed_lookup_cache()->Clear();
1226 isolate_->context_slot_cache()->Clear();
1227 isolate_->descriptor_lookup_cache()->Clear();
1228 RegExpResultsCache::Clear(string_split_cache());
1229 RegExpResultsCache::Clear(regexp_multiple_cache());
1231 isolate_->compilation_cache()->MarkCompactPrologue();
1233 CompletelyClearInstanceofCache();
1235 FlushNumberStringCache();
1236 if (FLAG_cleanup_code_caches_at_gc) {
1237 polymorphic_code_cache()->set_cache(undefined_value());
1240 ClearNormalizedMapCaches();
1244 // Helper class for copying HeapObjects
1245 class ScavengeVisitor: public ObjectVisitor {
1247 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1249 void VisitPointer(Object** p) { ScavengePointer(p); }
1251 void VisitPointers(Object** start, Object** end) {
1252 // Copy all HeapObject pointers in [start, end)
1253 for (Object** p = start; p < end; p++) ScavengePointer(p);
1257 void ScavengePointer(Object** p) {
1258 Object* object = *p;
1259 if (!heap_->InNewSpace(object)) return;
1260 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1261 reinterpret_cast<HeapObject*>(object));
1269 // Visitor class to verify pointers in code or data space do not point into
1271 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1273 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1274 void VisitPointers(Object** start, Object**end) {
1275 for (Object** current = start; current < end; current++) {
1276 if ((*current)->IsHeapObject()) {
1277 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1287 static void VerifyNonPointerSpacePointers(Heap* heap) {
1288 // Verify that there are no pointers to new space in spaces where we
1289 // do not expect them.
1290 VerifyNonPointerSpacePointersVisitor v(heap);
1291 HeapObjectIterator code_it(heap->code_space());
1292 for (HeapObject* object = code_it.Next();
1293 object != NULL; object = code_it.Next())
1294 object->Iterate(&v);
1296 // The old data space was normally swept conservatively so that the iterator
1297 // doesn't work, so we normally skip the next bit.
1298 if (!heap->old_data_space()->was_swept_conservatively()) {
1299 HeapObjectIterator data_it(heap->old_data_space());
1300 for (HeapObject* object = data_it.Next();
1301 object != NULL; object = data_it.Next())
1302 object->Iterate(&v);
1305 #endif // VERIFY_HEAP
1308 void Heap::CheckNewSpaceExpansionCriteria() {
1309 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1310 survived_since_last_expansion_ > new_space_.Capacity()) {
1311 // Grow the size of new space if there is room to grow, enough data
1312 // has survived scavenge since the last expansion and we are not in
1313 // high promotion mode.
1315 survived_since_last_expansion_ = 0;
1320 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1321 return heap->InNewSpace(*p) &&
1322 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1326 void Heap::ScavengeStoreBufferCallback(
1329 StoreBufferEvent event) {
1330 heap->store_buffer_rebuilder_.Callback(page, event);
1334 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1335 if (event == kStoreBufferStartScanningPagesEvent) {
1336 start_of_current_page_ = NULL;
1337 current_page_ = NULL;
1338 } else if (event == kStoreBufferScanningPageEvent) {
1339 if (current_page_ != NULL) {
1340 // If this page already overflowed the store buffer during this iteration.
1341 if (current_page_->scan_on_scavenge()) {
1342 // Then we should wipe out the entries that have been added for it.
1343 store_buffer_->SetTop(start_of_current_page_);
1344 } else if (store_buffer_->Top() - start_of_current_page_ >=
1345 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1346 // Did we find too many pointers in the previous page? The heuristic is
1347 // that no page can take more then 1/5 the remaining slots in the store
1349 current_page_->set_scan_on_scavenge(true);
1350 store_buffer_->SetTop(start_of_current_page_);
1352 // In this case the page we scanned took a reasonable number of slots in
1353 // the store buffer. It has now been rehabilitated and is no longer
1354 // marked scan_on_scavenge.
1355 ASSERT(!current_page_->scan_on_scavenge());
1358 start_of_current_page_ = store_buffer_->Top();
1359 current_page_ = page;
1360 } else if (event == kStoreBufferFullEvent) {
1361 // The current page overflowed the store buffer again. Wipe out its entries
1362 // in the store buffer and mark it scan-on-scavenge again. This may happen
1363 // several times while scanning.
1364 if (current_page_ == NULL) {
1365 // Store Buffer overflowed while scanning promoted objects. These are not
1366 // in any particular page, though they are likely to be clustered by the
1367 // allocation routines.
1368 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1370 // Store Buffer overflowed while scanning a particular old space page for
1371 // pointers to new space.
1372 ASSERT(current_page_ == page);
1373 ASSERT(page != NULL);
1374 current_page_->set_scan_on_scavenge(true);
1375 ASSERT(start_of_current_page_ != store_buffer_->Top());
1376 store_buffer_->SetTop(start_of_current_page_);
1384 void PromotionQueue::Initialize() {
1385 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1386 // entries (where each is a pair of intptr_t). This allows us to simplify
1387 // the test fpr when to switch pages.
1388 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1390 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1392 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1393 emergency_stack_ = NULL;
1398 void PromotionQueue::RelocateQueueHead() {
1399 ASSERT(emergency_stack_ == NULL);
1401 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1402 intptr_t* head_start = rear_;
1403 intptr_t* head_end =
1404 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1407 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1409 emergency_stack_ = new List<Entry>(2 * entries_count);
1411 while (head_start != head_end) {
1412 int size = static_cast<int>(*(head_start++));
1413 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1414 emergency_stack_->Add(Entry(obj, size));
1420 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1422 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1424 virtual Object* RetainAs(Object* object) {
1425 if (!heap_->InFromSpace(object)) {
1429 MapWord map_word = HeapObject::cast(object)->map_word();
1430 if (map_word.IsForwardingAddress()) {
1431 return map_word.ToForwardingAddress();
1441 void Heap::Scavenge() {
1442 RelocationLock relocation_lock(this);
1445 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1448 gc_state_ = SCAVENGE;
1450 // Implements Cheney's copying algorithm
1451 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1453 // Clear descriptor cache.
1454 isolate_->descriptor_lookup_cache()->Clear();
1456 // Used for updating survived_since_last_expansion_ at function end.
1457 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1459 SelectScavengingVisitorsTable();
1461 incremental_marking()->PrepareForScavenge();
1463 // Flip the semispaces. After flipping, to space is empty, from space has
1466 new_space_.ResetAllocationInfo();
1468 // We need to sweep newly copied objects which can be either in the
1469 // to space or promoted to the old generation. For to-space
1470 // objects, we treat the bottom of the to space as a queue. Newly
1471 // copied and unswept objects lie between a 'front' mark and the
1472 // allocation pointer.
1474 // Promoted objects can go into various old-generation spaces, and
1475 // can be allocated internally in the spaces (from the free list).
1476 // We treat the top of the to space as a queue of addresses of
1477 // promoted objects. The addresses of newly promoted and unswept
1478 // objects lie between a 'front' mark and a 'rear' mark that is
1479 // updated as a side effect of promoting an object.
1481 // There is guaranteed to be enough room at the top of the to space
1482 // for the addresses of promoted objects: every object promoted
1483 // frees up its size in bytes from the top of the new space, and
1484 // objects are at least one pointer in size.
1485 Address new_space_front = new_space_.ToSpaceStart();
1486 promotion_queue_.Initialize();
1489 store_buffer()->Clean();
1492 ScavengeVisitor scavenge_visitor(this);
1494 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1496 // Copy objects reachable from the old generation.
1498 StoreBufferRebuildScope scope(this,
1500 &ScavengeStoreBufferCallback);
1501 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1504 // Copy objects reachable from simple cells by scavenging cell values
1506 HeapObjectIterator cell_iterator(cell_space_);
1507 for (HeapObject* heap_object = cell_iterator.Next();
1508 heap_object != NULL;
1509 heap_object = cell_iterator.Next()) {
1510 if (heap_object->IsCell()) {
1511 Cell* cell = Cell::cast(heap_object);
1512 Address value_address = cell->ValueAddress();
1513 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1517 // Copy objects reachable from global property cells by scavenging global
1518 // property cell values directly.
1519 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1520 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1521 heap_object != NULL;
1522 heap_object = js_global_property_cell_iterator.Next()) {
1523 if (heap_object->IsPropertyCell()) {
1524 PropertyCell* cell = PropertyCell::cast(heap_object);
1525 Address value_address = cell->ValueAddress();
1526 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1527 Address type_address = cell->TypeAddress();
1528 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1532 // Copy objects reachable from the encountered weak collections list.
1533 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1535 // Copy objects reachable from the code flushing candidates list.
1536 MarkCompactCollector* collector = mark_compact_collector();
1537 if (collector->is_code_flushing_enabled()) {
1538 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1541 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1543 while (isolate()->global_handles()->IterateObjectGroups(
1544 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1545 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1547 isolate()->global_handles()->RemoveObjectGroups();
1548 isolate()->global_handles()->RemoveImplicitRefGroups();
1550 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1551 &IsUnscavengedHeapObject);
1552 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1554 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1556 UpdateNewSpaceReferencesInExternalStringTable(
1557 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1559 promotion_queue_.Destroy();
1561 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1563 ScavengeWeakObjectRetainer weak_object_retainer(this);
1564 ProcessWeakReferences(&weak_object_retainer);
1566 ASSERT(new_space_front == new_space_.top());
1569 new_space_.set_age_mark(new_space_.top());
1571 new_space_.LowerInlineAllocationLimit(
1572 new_space_.inline_allocation_limit_step());
1574 // Update how much has survived scavenge.
1575 IncrementYoungSurvivorsCounter(static_cast<int>(
1576 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1578 LOG(isolate_, ResourceEvent("scavenge", "end"));
1580 gc_state_ = NOT_IN_GC;
1582 scavenges_since_last_idle_round_++;
1586 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1588 MapWord first_word = HeapObject::cast(*p)->map_word();
1590 if (!first_word.IsForwardingAddress()) {
1591 // Unreachable external string can be finalized.
1592 heap->FinalizeExternalString(String::cast(*p));
1596 // String is still reachable.
1597 return String::cast(first_word.ToForwardingAddress());
1601 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1602 ExternalStringTableUpdaterCallback updater_func) {
1604 if (FLAG_verify_heap) {
1605 external_string_table_.Verify();
1609 if (external_string_table_.new_space_strings_.is_empty()) return;
1611 Object** start = &external_string_table_.new_space_strings_[0];
1612 Object** end = start + external_string_table_.new_space_strings_.length();
1613 Object** last = start;
1615 for (Object** p = start; p < end; ++p) {
1616 ASSERT(InFromSpace(*p));
1617 String* target = updater_func(this, p);
1619 if (target == NULL) continue;
1621 ASSERT(target->IsExternalString());
1623 if (InNewSpace(target)) {
1624 // String is still in new space. Update the table entry.
1628 // String got promoted. Move it to the old string list.
1629 external_string_table_.AddOldString(target);
1633 ASSERT(last <= end);
1634 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1638 void Heap::UpdateReferencesInExternalStringTable(
1639 ExternalStringTableUpdaterCallback updater_func) {
1641 // Update old space string references.
1642 if (external_string_table_.old_space_strings_.length() > 0) {
1643 Object** start = &external_string_table_.old_space_strings_[0];
1644 Object** end = start + external_string_table_.old_space_strings_.length();
1645 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1648 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653 ProcessArrayBuffers(retainer);
1654 ProcessNativeContexts(retainer);
1655 // TODO(mvstanton): AllocationSites only need to be processed during
1656 // MARK_COMPACT, as they live in old space. Verify and address.
1657 ProcessAllocationSites(retainer);
1661 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1662 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1663 // Update the head of the list of contexts.
1664 set_native_contexts_list(head);
1668 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1669 Object* array_buffer_obj =
1670 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1671 set_array_buffers_list(array_buffer_obj);
1675 void Heap::TearDownArrayBuffers() {
1676 Object* undefined = undefined_value();
1677 for (Object* o = array_buffers_list(); o != undefined;) {
1678 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1679 Runtime::FreeArrayBuffer(isolate(), buffer);
1680 o = buffer->weak_next();
1682 set_array_buffers_list(undefined);
1686 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1687 Object* allocation_site_obj =
1688 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1689 set_allocation_sites_list(allocation_site_obj);
1693 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1694 DisallowHeapAllocation no_allocation_scope;
1695 Object* cur = allocation_sites_list();
1696 bool marked = false;
1697 while (cur->IsAllocationSite()) {
1698 AllocationSite* casted = AllocationSite::cast(cur);
1699 if (casted->GetPretenureMode() == flag) {
1700 casted->ResetPretenureDecision();
1701 casted->set_deopt_dependent_code(true);
1704 cur = casted->weak_next();
1706 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1710 void Heap::EvaluateOldSpaceLocalPretenuring(
1711 uint64_t size_of_objects_before_gc) {
1712 uint64_t size_of_objects_after_gc = SizeOfObjects();
1713 double old_generation_survival_rate =
1714 (static_cast<double>(size_of_objects_after_gc) * 100) /
1715 static_cast<double>(size_of_objects_before_gc);
1717 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1718 // Too many objects died in the old generation, pretenuring of wrong
1719 // allocation sites may be the cause for that. We have to deopt all
1720 // dependent code registered in the allocation sites to re-evaluate
1721 // our pretenuring decisions.
1722 ResetAllAllocationSitesDependentCode(TENURED);
1723 if (FLAG_trace_pretenuring) {
1724 PrintF("Deopt all allocation sites dependent code due to low survival "
1725 "rate in the old generation %f\n", old_generation_survival_rate);
1731 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1732 DisallowHeapAllocation no_allocation;
1733 // All external strings are listed in the external string table.
1735 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1737 explicit ExternalStringTableVisitorAdapter(
1738 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1739 virtual void VisitPointers(Object** start, Object** end) {
1740 for (Object** p = start; p < end; p++) {
1741 ASSERT((*p)->IsExternalString());
1742 visitor_->VisitExternalString(Utils::ToLocal(
1743 Handle<String>(String::cast(*p))));
1747 v8::ExternalResourceVisitor* visitor_;
1748 } external_string_table_visitor(visitor);
1750 external_string_table_.Iterate(&external_string_table_visitor);
1754 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1756 static inline void VisitPointer(Heap* heap, Object** p) {
1757 Object* object = *p;
1758 if (!heap->InNewSpace(object)) return;
1759 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1760 reinterpret_cast<HeapObject*>(object));
1765 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1766 Address new_space_front) {
1768 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1769 // The addresses new_space_front and new_space_.top() define a
1770 // queue of unprocessed copied objects. Process them until the
1772 while (new_space_front != new_space_.top()) {
1773 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1774 HeapObject* object = HeapObject::FromAddress(new_space_front);
1776 NewSpaceScavenger::IterateBody(object->map(), object);
1779 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1783 // Promote and process all the to-be-promoted objects.
1785 StoreBufferRebuildScope scope(this,
1787 &ScavengeStoreBufferCallback);
1788 while (!promotion_queue()->is_empty()) {
1791 promotion_queue()->remove(&target, &size);
1793 // Promoted object might be already partially visited
1794 // during old space pointer iteration. Thus we search specificly
1795 // for pointers to from semispace instead of looking for pointers
1797 ASSERT(!target->IsMap());
1798 IterateAndMarkPointersToFromSpace(target->address(),
1799 target->address() + size,
1804 // Take another spin if there are now unswept objects in new space
1805 // (there are currently no more unswept promoted objects).
1806 } while (new_space_front != new_space_.top());
1808 return new_space_front;
1812 STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
1813 kDoubleAlignmentMask) == 0); // NOLINT
1814 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
1815 kDoubleAlignmentMask) == 0); // NOLINT
1816 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1817 kDoubleAlignmentMask) == 0); // NOLINT
1820 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1824 static HeapObject* EnsureDoubleAligned(Heap* heap,
1827 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1828 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1829 return HeapObject::FromAddress(object->address() + kPointerSize);
1831 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1838 enum LoggingAndProfiling {
1839 LOGGING_AND_PROFILING_ENABLED,
1840 LOGGING_AND_PROFILING_DISABLED
1844 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1847 template<MarksHandling marks_handling,
1848 LoggingAndProfiling logging_and_profiling_mode>
1849 class ScavengingVisitor : public StaticVisitorBase {
1851 static void Initialize() {
1852 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1853 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1854 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1855 table_.Register(kVisitByteArray, &EvacuateByteArray);
1856 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1857 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1858 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1859 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1861 table_.Register(kVisitNativeContext,
1862 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1863 template VisitSpecialized<Context::kSize>);
1865 table_.Register(kVisitConsString,
1866 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1867 template VisitSpecialized<ConsString::kSize>);
1869 table_.Register(kVisitSlicedString,
1870 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1871 template VisitSpecialized<SlicedString::kSize>);
1873 table_.Register(kVisitSymbol,
1874 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1875 template VisitSpecialized<Symbol::kSize>);
1877 table_.Register(kVisitSharedFunctionInfo,
1878 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1879 template VisitSpecialized<SharedFunctionInfo::kSize>);
1881 table_.Register(kVisitJSWeakCollection,
1882 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1885 table_.Register(kVisitJSArrayBuffer,
1886 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1889 table_.Register(kVisitJSTypedArray,
1890 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1893 table_.Register(kVisitJSDataView,
1894 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1897 table_.Register(kVisitJSRegExp,
1898 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1901 if (marks_handling == IGNORE_MARKS) {
1902 table_.Register(kVisitJSFunction,
1903 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1904 template VisitSpecialized<JSFunction::kSize>);
1906 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1909 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1911 kVisitDataObjectGeneric>();
1913 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1915 kVisitJSObjectGeneric>();
1917 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1919 kVisitStructGeneric>();
1922 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1927 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1929 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1930 bool should_record = false;
1932 should_record = FLAG_heap_stats;
1934 should_record = should_record || FLAG_log_gc;
1935 if (should_record) {
1936 if (heap->new_space()->Contains(obj)) {
1937 heap->new_space()->RecordAllocation(obj);
1939 heap->new_space()->RecordPromotion(obj);
1944 // Helper function used by CopyObject to copy a source object to an
1945 // allocated target object and update the forwarding pointer in the source
1946 // object. Returns the target object.
1947 INLINE(static void MigrateObject(Heap* heap,
1951 // Copy the content of source to target.
1952 heap->CopyBlock(target->address(), source->address(), size);
1954 // Set the forwarding address.
1955 source->set_map_word(MapWord::FromForwardingAddress(target));
1957 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1958 // Update NewSpace stats if necessary.
1959 RecordCopiedObject(heap, target);
1960 Isolate* isolate = heap->isolate();
1961 HeapProfiler* heap_profiler = isolate->heap_profiler();
1962 if (heap_profiler->is_tracking_object_moves()) {
1963 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
1966 if (isolate->logger()->is_logging_code_events() ||
1967 isolate->cpu_profiler()->is_profiling()) {
1968 if (target->IsSharedFunctionInfo()) {
1969 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1970 source->address(), target->address()));
1975 if (marks_handling == TRANSFER_MARKS) {
1976 if (Marking::TransferColor(source, target)) {
1977 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1983 template<ObjectContents object_contents, int alignment>
1984 static inline void EvacuateObject(Map* map,
1988 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1989 SLOW_ASSERT(object->Size() == object_size);
1991 int allocation_size = object_size;
1992 if (alignment != kObjectAlignment) {
1993 ASSERT(alignment == kDoubleAlignment);
1994 allocation_size += kPointerSize;
1997 Heap* heap = map->GetHeap();
1998 if (heap->ShouldBePromoted(object->address(), object_size)) {
1999 AllocationResult allocation;
2001 if (object_contents == DATA_OBJECT) {
2002 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2003 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2005 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2006 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2009 HeapObject* target = NULL; // Initialization to please compiler.
2010 if (allocation.To(&target)) {
2011 if (alignment != kObjectAlignment) {
2012 target = EnsureDoubleAligned(heap, target, allocation_size);
2015 // Order is important: slot might be inside of the target if target
2016 // was allocated over a dead object and slot comes from the store
2019 MigrateObject(heap, object, target, object_size);
2021 if (object_contents == POINTER_OBJECT) {
2022 if (map->instance_type() == JS_FUNCTION_TYPE) {
2023 heap->promotion_queue()->insert(
2024 target, JSFunction::kNonWeakFieldsEndOffset);
2026 heap->promotion_queue()->insert(target, object_size);
2030 heap->IncrementPromotedObjectsSize(object_size);
2034 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2035 AllocationResult allocation =
2036 heap->new_space()->AllocateRaw(allocation_size);
2037 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2038 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
2040 if (alignment != kObjectAlignment) {
2041 target = EnsureDoubleAligned(heap, target, allocation_size);
2044 // Order is important: slot might be inside of the target if target
2045 // was allocated over a dead object and slot comes from the store
2048 MigrateObject(heap, object, target, object_size);
2049 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2054 static inline void EvacuateJSFunction(Map* map,
2056 HeapObject* object) {
2057 ObjectEvacuationStrategy<POINTER_OBJECT>::
2058 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2060 HeapObject* target = *slot;
2061 MarkBit mark_bit = Marking::MarkBitFrom(target);
2062 if (Marking::IsBlack(mark_bit)) {
2063 // This object is black and it might not be rescanned by marker.
2064 // We should explicitly record code entry slot for compaction because
2065 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2066 // miss it as it is not HeapObject-tagged.
2067 Address code_entry_slot =
2068 target->address() + JSFunction::kCodeEntryOffset;
2069 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2070 map->GetHeap()->mark_compact_collector()->
2071 RecordCodeEntrySlot(code_entry_slot, code);
2076 static inline void EvacuateFixedArray(Map* map,
2078 HeapObject* object) {
2079 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2080 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2081 map, slot, object, object_size);
2085 static inline void EvacuateFixedDoubleArray(Map* map,
2087 HeapObject* object) {
2088 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2089 int object_size = FixedDoubleArray::SizeFor(length);
2090 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2091 map, slot, object, object_size);
2095 static inline void EvacuateFixedTypedArray(Map* map,
2097 HeapObject* object) {
2098 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2099 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2100 map, slot, object, object_size);
2104 static inline void EvacuateFixedFloat64Array(Map* map,
2106 HeapObject* object) {
2107 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2108 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2109 map, slot, object, object_size);
2113 static inline void EvacuateByteArray(Map* map,
2115 HeapObject* object) {
2116 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2117 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2118 map, slot, object, object_size);
2122 static inline void EvacuateSeqOneByteString(Map* map,
2124 HeapObject* object) {
2125 int object_size = SeqOneByteString::cast(object)->
2126 SeqOneByteStringSize(map->instance_type());
2127 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2128 map, slot, object, object_size);
2132 static inline void EvacuateSeqTwoByteString(Map* map,
2134 HeapObject* object) {
2135 int object_size = SeqTwoByteString::cast(object)->
2136 SeqTwoByteStringSize(map->instance_type());
2137 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2138 map, slot, object, object_size);
2142 static inline bool IsShortcutCandidate(int type) {
2143 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2146 static inline void EvacuateShortcutCandidate(Map* map,
2148 HeapObject* object) {
2149 ASSERT(IsShortcutCandidate(map->instance_type()));
2151 Heap* heap = map->GetHeap();
2153 if (marks_handling == IGNORE_MARKS &&
2154 ConsString::cast(object)->unchecked_second() ==
2155 heap->empty_string()) {
2157 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2161 if (!heap->InNewSpace(first)) {
2162 object->set_map_word(MapWord::FromForwardingAddress(first));
2166 MapWord first_word = first->map_word();
2167 if (first_word.IsForwardingAddress()) {
2168 HeapObject* target = first_word.ToForwardingAddress();
2171 object->set_map_word(MapWord::FromForwardingAddress(target));
2175 heap->DoScavengeObject(first->map(), slot, first);
2176 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2180 int object_size = ConsString::kSize;
2181 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2182 map, slot, object, object_size);
2185 template<ObjectContents object_contents>
2186 class ObjectEvacuationStrategy {
2188 template<int object_size>
2189 static inline void VisitSpecialized(Map* map,
2191 HeapObject* object) {
2192 EvacuateObject<object_contents, kObjectAlignment>(
2193 map, slot, object, object_size);
2196 static inline void Visit(Map* map,
2198 HeapObject* object) {
2199 int object_size = map->instance_size();
2200 EvacuateObject<object_contents, kObjectAlignment>(
2201 map, slot, object, object_size);
2205 static VisitorDispatchTable<ScavengingCallback> table_;
2209 template<MarksHandling marks_handling,
2210 LoggingAndProfiling logging_and_profiling_mode>
2211 VisitorDispatchTable<ScavengingCallback>
2212 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2215 static void InitializeScavengingVisitorsTables() {
2216 ScavengingVisitor<TRANSFER_MARKS,
2217 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2218 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219 ScavengingVisitor<TRANSFER_MARKS,
2220 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2221 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2225 void Heap::SelectScavengingVisitorsTable() {
2226 bool logging_and_profiling =
2227 isolate()->logger()->is_logging() ||
2228 isolate()->cpu_profiler()->is_profiling() ||
2229 (isolate()->heap_profiler() != NULL &&
2230 isolate()->heap_profiler()->is_tracking_object_moves());
2232 if (!incremental_marking()->IsMarking()) {
2233 if (!logging_and_profiling) {
2234 scavenging_visitors_table_.CopyFrom(
2235 ScavengingVisitor<IGNORE_MARKS,
2236 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2238 scavenging_visitors_table_.CopyFrom(
2239 ScavengingVisitor<IGNORE_MARKS,
2240 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2243 if (!logging_and_profiling) {
2244 scavenging_visitors_table_.CopyFrom(
2245 ScavengingVisitor<TRANSFER_MARKS,
2246 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2248 scavenging_visitors_table_.CopyFrom(
2249 ScavengingVisitor<TRANSFER_MARKS,
2250 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2253 if (incremental_marking()->IsCompacting()) {
2254 // When compacting forbid short-circuiting of cons-strings.
2255 // Scavenging code relies on the fact that new space object
2256 // can't be evacuated into evacuation candidate but
2257 // short-circuiting violates this assumption.
2258 scavenging_visitors_table_.Register(
2259 StaticVisitorBase::kVisitShortcutCandidate,
2260 scavenging_visitors_table_.GetVisitorById(
2261 StaticVisitorBase::kVisitConsString));
2267 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2268 SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2269 MapWord first_word = object->map_word();
2270 SLOW_ASSERT(!first_word.IsForwardingAddress());
2271 Map* map = first_word.ToMap();
2272 map->GetHeap()->DoScavengeObject(map, p, object);
2276 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2277 int instance_size) {
2279 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2280 if (!allocation.To(&result)) return allocation;
2282 // Map::cast cannot be used due to uninitialized map field.
2283 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2284 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2285 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2286 reinterpret_cast<Map*>(result)->set_visitor_id(
2287 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2288 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2289 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2290 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2291 reinterpret_cast<Map*>(result)->set_bit_field(0);
2292 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2293 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2294 Map::OwnsDescriptors::encode(true);
2295 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2300 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2302 ElementsKind elements_kind) {
2304 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2305 if (!allocation.To(&result)) return allocation;
2307 result->set_map_no_write_barrier(meta_map());
2308 Map* map = Map::cast(result);
2309 map->set_instance_type(instance_type);
2310 map->set_visitor_id(
2311 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2312 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2313 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2314 map->set_instance_size(instance_size);
2315 map->set_inobject_properties(0);
2316 map->set_pre_allocated_property_fields(0);
2317 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2318 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2319 SKIP_WRITE_BARRIER);
2320 map->init_back_pointer(undefined_value());
2321 map->set_unused_property_fields(0);
2322 map->set_instance_descriptors(empty_descriptor_array());
2323 map->set_bit_field(0);
2324 map->set_bit_field2(1 << Map::kIsExtensible);
2325 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2326 Map::OwnsDescriptors::encode(true);
2327 map->set_bit_field3(bit_field3);
2328 map->set_elements_kind(elements_kind);
2334 AllocationResult Heap::AllocateFillerObject(int size,
2336 AllocationSpace space) {
2338 { AllocationResult allocation = AllocateRaw(size, space, space);
2339 if (!allocation.To(&obj)) return allocation;
2342 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2343 ASSERT(chunk->owner()->identity() == space);
2345 CreateFillerObjectAt(obj->address(), size);
2350 const Heap::StringTypeTable Heap::string_type_table[] = {
2351 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2352 {type, size, k##camel_name##MapRootIndex},
2353 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2354 #undef STRING_TYPE_ELEMENT
2358 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2359 #define CONSTANT_STRING_ELEMENT(name, contents) \
2360 {contents, k##name##RootIndex},
2361 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2362 #undef CONSTANT_STRING_ELEMENT
2366 const Heap::StructTable Heap::struct_table[] = {
2367 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2368 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2369 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2370 #undef STRUCT_TABLE_ELEMENT
2374 bool Heap::CreateInitialMaps() {
2376 { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2377 if (!allocation.To(&obj)) return false;
2379 // Map::cast cannot be used due to uninitialized map field.
2380 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2381 set_meta_map(new_meta_map);
2382 new_meta_map->set_map(new_meta_map);
2384 { // Partial map allocation
2385 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2387 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388 set_##field_name##_map(map); \
2391 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2392 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2393 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2394 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2395 constant_pool_array);
2397 #undef ALLOCATE_PARTIAL_MAP
2400 // Allocate the empty array.
2401 { AllocationResult allocation = AllocateEmptyFixedArray();
2402 if (!allocation.To(&obj)) return false;
2404 set_empty_fixed_array(FixedArray::cast(obj));
2406 { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2407 if (!allocation.To(&obj)) return false;
2409 set_null_value(Oddball::cast(obj));
2410 Oddball::cast(obj)->set_kind(Oddball::kNull);
2412 { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2413 if (!allocation.To(&obj)) return false;
2415 set_undefined_value(Oddball::cast(obj));
2416 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2417 ASSERT(!InNewSpace(undefined_value()));
2419 // Set preliminary exception sentinel value before actually initializing it.
2420 set_exception(null_value());
2422 // Allocate the empty descriptor array.
2423 { AllocationResult allocation = AllocateEmptyFixedArray();
2424 if (!allocation.To(&obj)) return false;
2426 set_empty_descriptor_array(DescriptorArray::cast(obj));
2428 // Allocate the constant pool array.
2429 { AllocationResult allocation = AllocateEmptyConstantPoolArray();
2430 if (!allocation.To(&obj)) return false;
2432 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2434 // Fix the instance_descriptors for the existing maps.
2435 meta_map()->set_code_cache(empty_fixed_array());
2436 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2437 meta_map()->init_back_pointer(undefined_value());
2438 meta_map()->set_instance_descriptors(empty_descriptor_array());
2440 fixed_array_map()->set_code_cache(empty_fixed_array());
2441 fixed_array_map()->set_dependent_code(
2442 DependentCode::cast(empty_fixed_array()));
2443 fixed_array_map()->init_back_pointer(undefined_value());
2444 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2446 undefined_map()->set_code_cache(empty_fixed_array());
2447 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2448 undefined_map()->init_back_pointer(undefined_value());
2449 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2451 null_map()->set_code_cache(empty_fixed_array());
2452 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 null_map()->init_back_pointer(undefined_value());
2454 null_map()->set_instance_descriptors(empty_descriptor_array());
2456 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2457 constant_pool_array_map()->set_dependent_code(
2458 DependentCode::cast(empty_fixed_array()));
2459 constant_pool_array_map()->init_back_pointer(undefined_value());
2460 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2462 // Fix prototype object for existing maps.
2463 meta_map()->set_prototype(null_value());
2464 meta_map()->set_constructor(null_value());
2466 fixed_array_map()->set_prototype(null_value());
2467 fixed_array_map()->set_constructor(null_value());
2469 undefined_map()->set_prototype(null_value());
2470 undefined_map()->set_constructor(null_value());
2472 null_map()->set_prototype(null_value());
2473 null_map()->set_constructor(null_value());
2475 constant_pool_array_map()->set_prototype(null_value());
2476 constant_pool_array_map()->set_constructor(null_value());
2479 #define ALLOCATE_MAP(instance_type, size, field_name) \
2481 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2482 set_##field_name##_map(map); \
2485 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2486 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2488 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2489 ASSERT(fixed_array_map() != fixed_cow_array_map());
2491 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2492 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2493 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2494 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2496 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2497 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2498 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2504 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2505 const StringTypeTable& entry = string_type_table[i];
2506 { AllocationResult allocation = AllocateMap(entry.type, entry.size);
2507 if (!allocation.To(&obj)) return false;
2509 // Mark cons string maps as unstable, because their objects can change
2511 Map* map = Map::cast(obj);
2512 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2513 roots_[entry.index] = map;
2516 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2517 undetectable_string_map()->set_is_undetectable();
2519 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2520 undetectable_ascii_string_map()->set_is_undetectable();
2522 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2523 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2524 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2526 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2527 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2528 external_##type##_array)
2530 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2531 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2533 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2534 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
2535 fixed_##type##_array)
2537 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2538 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2540 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2542 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2544 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2545 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2546 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2547 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2550 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2551 const StructTable& entry = struct_table[i];
2553 if (!AllocateMap(entry.type, entry.size).To(&map))
2555 roots_[entry.index] = map;
2558 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569 native_context_map()->set_dictionary_map(true);
2570 native_context_map()->set_visitor_id(
2571 StaticVisitorBase::kVisitNativeContext);
2573 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574 shared_function_info)
2576 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2578 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2580 external_map()->set_is_extensible(false);
2581 #undef ALLOCATE_VARSIZE_MAP
2586 { ByteArray* byte_array;
2587 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2588 set_empty_byte_array(byte_array);
2591 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2592 { ExternalArray* obj; \
2593 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2595 set_empty_external_##type##_array(obj); \
2598 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2602 { FixedTypedArrayBase* obj; \
2603 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2605 set_empty_fixed_##type##_array(obj); \
2608 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2609 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2611 ASSERT(!InNewSpace(empty_fixed_array()));
2616 AllocationResult Heap::AllocateHeapNumber(double value,
2617 PretenureFlag pretenure) {
2618 // Statically ensure that it is safe to allocate heap numbers in paged
2620 int size = HeapNumber::kSize;
2621 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2623 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2626 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2627 if (!allocation.To(&result)) return allocation;
2630 result->set_map_no_write_barrier(heap_number_map());
2631 HeapNumber::cast(result)->set_value(value);
2636 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2637 V(Float32x4, float32x4) \
2638 V(Float64x2, float64x2) \
2642 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
2643 AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
2644 PretenureFlag pretenure) { \
2645 STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
2647 AllocationSpace space = \
2648 SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
2650 HeapObject* result; \
2651 { AllocationResult allocation = \
2652 AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
2653 if (!allocation.To(&result)) return allocation; \
2656 result->set_map_no_write_barrier( \
2657 isolate()->native_context()->type##_function()->initial_map()); \
2658 JSObject::cast(result)->set_properties(empty_fixed_array()); \
2659 JSObject::cast(result)->set_elements(empty_fixed_array()); \
2661 HeapObject* storage; \
2662 int storage_size = \
2663 FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
2664 space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
2665 { AllocationResult allocation = \
2666 AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
2667 if (!allocation.To(&storage)) return allocation; \
2671 *isolate()->factory()->fixed_##type##_array_map()); \
2672 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2673 elements->set_length(static_cast<int>(1)); \
2674 memset(elements->DataPtr(), 0, elements->DataSize()); \
2675 Fixed##TYPE##Array::cast(storage)->set(0, value); \
2676 TYPE::cast(result)->set_value(storage); \
2681 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2684 AllocationResult Heap::AllocateCell(Object* value) {
2685 int size = Cell::kSize;
2686 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2689 { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2690 if (!allocation.To(&result)) return allocation;
2692 result->set_map_no_write_barrier(cell_map());
2693 Cell::cast(result)->set_value(value);
2698 AllocationResult Heap::AllocatePropertyCell() {
2699 int size = PropertyCell::kSize;
2700 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2703 AllocationResult allocation =
2704 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2705 if (!allocation.To(&result)) return allocation;
2707 result->set_map_no_write_barrier(global_property_cell_map());
2708 PropertyCell* cell = PropertyCell::cast(result);
2709 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2710 SKIP_WRITE_BARRIER);
2711 cell->set_value(the_hole_value());
2712 cell->set_type(HeapType::None());
2717 void Heap::CreateApiObjects() {
2718 HandleScope scope(isolate());
2719 Factory* factory = isolate()->factory();
2720 Handle<Map> new_neander_map =
2721 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2723 // Don't use Smi-only elements optimizations for objects with the neander
2724 // map. There are too many cases where element values are set directly with a
2725 // bottleneck to trap the Smi-only -> fast elements transition, and there
2726 // appears to be no benefit for optimize this case.
2727 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2728 set_neander_map(*new_neander_map);
2730 Handle<JSObject> listeners = factory->NewNeanderObject();
2731 Handle<FixedArray> elements = factory->NewFixedArray(2);
2732 elements->set(0, Smi::FromInt(0));
2733 listeners->set_elements(*elements);
2734 set_message_listeners(*listeners);
2738 void Heap::CreateJSEntryStub() {
2739 JSEntryStub stub(isolate());
2740 set_js_entry_code(*stub.GetCode());
2744 void Heap::CreateJSConstructEntryStub() {
2745 JSConstructEntryStub stub(isolate());
2746 set_js_construct_entry_code(*stub.GetCode());
2750 void Heap::CreateFixedStubs() {
2751 // Here we create roots for fixed stubs. They are needed at GC
2752 // for cooking and uncooking (check out frames.cc).
2753 // The eliminates the need for doing dictionary lookup in the
2754 // stub cache for these stubs.
2755 HandleScope scope(isolate());
2757 // Create stubs that should be there, so we don't unexpectedly have to
2758 // create them if we need them during the creation of another stub.
2759 // Stub creation mixes raw pointers and handles in an unsafe manner so
2760 // we cannot create stubs while we are creating stubs.
2761 CodeStub::GenerateStubsAheadOfTime(isolate());
2763 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2764 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2767 // gcc-4.4 has problem generating correct code of following snippet:
2768 // { JSEntryStub stub;
2769 // js_entry_code_ = *stub.GetCode();
2771 // { JSConstructEntryStub stub;
2772 // js_construct_entry_code_ = *stub.GetCode();
2774 // To workaround the problem, make separate functions without inlining.
2775 Heap::CreateJSEntryStub();
2776 Heap::CreateJSConstructEntryStub();
2780 void Heap::CreateInitialObjects() {
2781 HandleScope scope(isolate());
2782 Factory* factory = isolate()->factory();
2784 // The -0 value must be set before NumberFromDouble works.
2785 set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
2786 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2788 set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
2789 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
2791 // The hole has not been created yet, but we want to put something
2792 // predictable in the gaps in the string table, so lets make that Smi zero.
2793 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2795 // Allocate initial string table.
2796 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2798 // Finish initializing oddballs after creating the string table.
2799 Oddball::Initialize(isolate(),
2800 factory->undefined_value(),
2802 factory->nan_value(),
2803 Oddball::kUndefined);
2805 // Initialize the null_value.
2806 Oddball::Initialize(isolate(),
2807 factory->null_value(),
2809 handle(Smi::FromInt(0), isolate()),
2812 set_true_value(*factory->NewOddball(factory->boolean_map(),
2814 handle(Smi::FromInt(1), isolate()),
2817 set_false_value(*factory->NewOddball(factory->boolean_map(),
2819 handle(Smi::FromInt(0), isolate()),
2822 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
2824 handle(Smi::FromInt(-1), isolate()),
2825 Oddball::kTheHole));
2827 set_uninitialized_value(
2828 *factory->NewOddball(factory->uninitialized_map(),
2830 handle(Smi::FromInt(-1), isolate()),
2831 Oddball::kUninitialized));
2833 set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
2835 handle(Smi::FromInt(-4), isolate()),
2836 Oddball::kArgumentMarker));
2838 set_no_interceptor_result_sentinel(
2839 *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
2840 "no_interceptor_result_sentinel",
2841 handle(Smi::FromInt(-2), isolate()),
2844 set_termination_exception(
2845 *factory->NewOddball(factory->termination_exception_map(),
2846 "termination_exception",
2847 handle(Smi::FromInt(-3), isolate()),
2851 *factory->NewOddball(factory->exception_map(),
2853 handle(Smi::FromInt(-5), isolate()),
2854 Oddball::kException));
2856 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2857 Handle<String> str =
2858 factory->InternalizeUtf8String(constant_string_table[i].contents);
2859 roots_[constant_string_table[i].index] = *str;
2862 // Allocate the hidden string which is used to identify the hidden properties
2863 // in JSObjects. The hash code has a special value so that it will not match
2864 // the empty string when searching for the property. It cannot be part of the
2865 // loop above because it needs to be allocated manually with the special
2866 // hash code in place. The hash code for the hidden_string is zero to ensure
2867 // that it will always be at the first entry in property descriptors.
2868 hidden_string_ = *factory->NewOneByteInternalizedString(
2869 OneByteVector("", 0), String::kEmptyStringHash);
2871 // Create the code_stubs dictionary. The initial size is set to avoid
2872 // expanding the dictionary during bootstrapping.
2873 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2875 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2876 // is set to avoid expanding the dictionary during bootstrapping.
2877 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2879 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2880 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2882 set_instanceof_cache_function(Smi::FromInt(0));
2883 set_instanceof_cache_map(Smi::FromInt(0));
2884 set_instanceof_cache_answer(Smi::FromInt(0));
2888 // Allocate the dictionary of intrinsic function names.
2889 Handle<NameDictionary> intrinsic_names =
2890 NameDictionary::New(isolate(), Runtime::kNumFunctions);
2891 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2892 set_intrinsic_function_names(*intrinsic_names);
2894 set_number_string_cache(*factory->NewFixedArray(
2895 kInitialNumberStringCacheSize * 2, TENURED));
2897 // Allocate cache for single character one byte strings.
2898 set_single_character_string_cache(*factory->NewFixedArray(
2899 String::kMaxOneByteCharCode + 1, TENURED));
2901 // Allocate cache for string split and regexp-multiple.
2902 set_string_split_cache(*factory->NewFixedArray(
2903 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2904 set_regexp_multiple_cache(*factory->NewFixedArray(
2905 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2907 // Allocate cache for external strings pointing to native source code.
2908 set_natives_source_cache(*factory->NewFixedArray(
2909 Natives::GetBuiltinsCount()));
2911 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2913 // The symbol registry is initialized lazily.
2914 set_symbol_registry(undefined_value());
2916 // Allocate object to hold object observation state.
2917 set_observation_state(*factory->NewJSObjectFromMap(
2918 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2920 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2921 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2922 set_microtask_queue(empty_fixed_array());
2924 set_frozen_symbol(*factory->NewPrivateSymbol());
2925 set_nonexistent_symbol(*factory->NewPrivateSymbol());
2926 set_elements_transition_symbol(*factory->NewPrivateSymbol());
2927 set_uninitialized_symbol(*factory->NewPrivateSymbol());
2928 set_megamorphic_symbol(*factory->NewPrivateSymbol());
2929 set_observed_symbol(*factory->NewPrivateSymbol());
2931 Handle<SeededNumberDictionary> slow_element_dictionary =
2932 SeededNumberDictionary::New(isolate(), 0, TENURED);
2933 slow_element_dictionary->set_requires_slow_elements();
2934 set_empty_slow_element_dictionary(*slow_element_dictionary);
2936 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2938 // Handling of script id generation is in Factory::NewScript.
2939 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2941 set_allocation_sites_scratchpad(*factory->NewFixedArray(
2942 kAllocationSiteScratchpadSize, TENURED));
2943 InitializeAllocationSitesScratchpad();
2945 // Initialize keyed lookup cache.
2946 isolate_->keyed_lookup_cache()->Clear();
2948 // Initialize context slot cache.
2949 isolate_->context_slot_cache()->Clear();
2951 // Initialize descriptor cache.
2952 isolate_->descriptor_lookup_cache()->Clear();
2954 // Initialize compilation cache.
2955 isolate_->compilation_cache()->Clear();
2959 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2960 RootListIndex writable_roots[] = {
2961 kStoreBufferTopRootIndex,
2962 kStackLimitRootIndex,
2963 kNumberStringCacheRootIndex,
2964 kInstanceofCacheFunctionRootIndex,
2965 kInstanceofCacheMapRootIndex,
2966 kInstanceofCacheAnswerRootIndex,
2967 kCodeStubsRootIndex,
2968 kNonMonomorphicCacheRootIndex,
2969 kPolymorphicCodeCacheRootIndex,
2970 kLastScriptIdRootIndex,
2971 kEmptyScriptRootIndex,
2972 kRealStackLimitRootIndex,
2973 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2974 kConstructStubDeoptPCOffsetRootIndex,
2975 kGetterStubDeoptPCOffsetRootIndex,
2976 kSetterStubDeoptPCOffsetRootIndex,
2977 kStringTableRootIndex,
2980 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2981 if (root_index == writable_roots[i])
2988 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2989 return !RootCanBeWrittenAfterInitialization(root_index) &&
2990 !InNewSpace(roots_array_start()[root_index]);
2994 Object* RegExpResultsCache::Lookup(Heap* heap,
2996 Object* key_pattern,
2997 ResultsCacheType type) {
2999 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3000 if (type == STRING_SPLIT_SUBSTRINGS) {
3001 ASSERT(key_pattern->IsString());
3002 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3003 cache = heap->string_split_cache();
3005 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3006 ASSERT(key_pattern->IsFixedArray());
3007 cache = heap->regexp_multiple_cache();
3010 uint32_t hash = key_string->Hash();
3011 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3012 ~(kArrayEntriesPerCacheEntry - 1));
3013 if (cache->get(index + kStringOffset) == key_string &&
3014 cache->get(index + kPatternOffset) == key_pattern) {
3015 return cache->get(index + kArrayOffset);
3018 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3019 if (cache->get(index + kStringOffset) == key_string &&
3020 cache->get(index + kPatternOffset) == key_pattern) {
3021 return cache->get(index + kArrayOffset);
3023 return Smi::FromInt(0);
3027 void RegExpResultsCache::Enter(Isolate* isolate,
3028 Handle<String> key_string,
3029 Handle<Object> key_pattern,
3030 Handle<FixedArray> value_array,
3031 ResultsCacheType type) {
3032 Factory* factory = isolate->factory();
3033 Handle<FixedArray> cache;
3034 if (!key_string->IsInternalizedString()) return;
3035 if (type == STRING_SPLIT_SUBSTRINGS) {
3036 ASSERT(key_pattern->IsString());
3037 if (!key_pattern->IsInternalizedString()) return;
3038 cache = factory->string_split_cache();
3040 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3041 ASSERT(key_pattern->IsFixedArray());
3042 cache = factory->regexp_multiple_cache();
3045 uint32_t hash = key_string->Hash();
3046 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3047 ~(kArrayEntriesPerCacheEntry - 1));
3048 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3049 cache->set(index + kStringOffset, *key_string);
3050 cache->set(index + kPatternOffset, *key_pattern);
3051 cache->set(index + kArrayOffset, *value_array);
3054 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3055 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3056 cache->set(index2 + kStringOffset, *key_string);
3057 cache->set(index2 + kPatternOffset, *key_pattern);
3058 cache->set(index2 + kArrayOffset, *value_array);
3060 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3061 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3062 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3063 cache->set(index + kStringOffset, *key_string);
3064 cache->set(index + kPatternOffset, *key_pattern);
3065 cache->set(index + kArrayOffset, *value_array);
3068 // If the array is a reasonably short list of substrings, convert it into a
3069 // list of internalized strings.
3070 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3071 for (int i = 0; i < value_array->length(); i++) {
3072 Handle<String> str(String::cast(value_array->get(i)), isolate);
3073 Handle<String> internalized_str = factory->InternalizeString(str);
3074 value_array->set(i, *internalized_str);
3077 // Convert backing store to a copy-on-write array.
3078 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3082 void RegExpResultsCache::Clear(FixedArray* cache) {
3083 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3084 cache->set(i, Smi::FromInt(0));
3089 int Heap::FullSizeNumberStringCacheLength() {
3090 // Compute the size of the number string cache based on the max newspace size.
3091 // The number string cache has a minimum size based on twice the initial cache
3092 // size to ensure that it is bigger after being made 'full size'.
3093 int number_string_cache_size = max_semi_space_size_ / 512;
3094 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3095 Min(0x4000, number_string_cache_size));
3096 // There is a string and a number per entry so the length is twice the number
3098 return number_string_cache_size * 2;
3102 void Heap::FlushNumberStringCache() {
3103 // Flush the number to string cache.
3104 int len = number_string_cache()->length();
3105 for (int i = 0; i < len; i++) {
3106 number_string_cache()->set_undefined(i);
3111 void Heap::FlushAllocationSitesScratchpad() {
3112 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3113 allocation_sites_scratchpad()->set_undefined(i);
3115 allocation_sites_scratchpad_length_ = 0;
3119 void Heap::InitializeAllocationSitesScratchpad() {
3120 ASSERT(allocation_sites_scratchpad()->length() ==
3121 kAllocationSiteScratchpadSize);
3122 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3123 allocation_sites_scratchpad()->set_undefined(i);
3128 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3129 ScratchpadSlotMode mode) {
3130 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3131 // We cannot use the normal write-barrier because slots need to be
3132 // recorded with non-incremental marking as well. We have to explicitly
3133 // record the slot to take evacuation candidates into account.
3134 allocation_sites_scratchpad()->set(
3135 allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
3136 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3137 allocation_sites_scratchpad_length_);
3139 if (mode == RECORD_SCRATCHPAD_SLOT) {
3140 // We need to allow slots buffer overflow here since the evacuation
3141 // candidates are not part of the global list of old space pages and
3142 // releasing an evacuation candidate due to a slots buffer overflow
3143 // results in lost pages.
3144 mark_compact_collector()->RecordSlot(
3145 slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
3147 allocation_sites_scratchpad_length_++;
3152 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3153 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3157 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3158 ExternalArrayType array_type) {
3159 switch (array_type) {
3160 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3161 case kExternal##Type##Array: \
3162 return kExternal##Type##ArrayMapRootIndex;
3164 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3165 #undef ARRAY_TYPE_TO_ROOT_INDEX
3169 return kUndefinedValueRootIndex;
3174 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3175 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3179 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3180 ExternalArrayType array_type) {
3181 switch (array_type) {
3182 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3183 case kExternal##Type##Array: \
3184 return kFixed##Type##ArrayMapRootIndex;
3186 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3187 #undef ARRAY_TYPE_TO_ROOT_INDEX
3191 return kUndefinedValueRootIndex;
3196 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3197 ElementsKind elementsKind) {
3198 switch (elementsKind) {
3199 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3200 case EXTERNAL_##TYPE##_ELEMENTS: \
3201 return kEmptyExternal##Type##ArrayRootIndex;
3203 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3204 #undef ELEMENT_KIND_TO_ROOT_INDEX
3208 return kUndefinedValueRootIndex;
3213 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3214 ElementsKind elementsKind) {
3215 switch (elementsKind) {
3216 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3217 case TYPE##_ELEMENTS: \
3218 return kEmptyFixed##Type##ArrayRootIndex;
3220 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3221 #undef ELEMENT_KIND_TO_ROOT_INDEX
3224 return kUndefinedValueRootIndex;
3229 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3230 return ExternalArray::cast(
3231 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3235 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3236 return FixedTypedArrayBase::cast(
3237 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3241 AllocationResult Heap::AllocateForeign(Address address,
3242 PretenureFlag pretenure) {
3243 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3244 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3245 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3247 AllocationResult allocation = Allocate(foreign_map(), space);
3248 if (!allocation.To(&result)) return allocation;
3249 result->set_foreign_address(address);
3254 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3255 if (length < 0 || length > ByteArray::kMaxLength) {
3256 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3258 int size = ByteArray::SizeFor(length);
3259 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3261 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3262 if (!allocation.To(&result)) return allocation;
3265 result->set_map_no_write_barrier(byte_array_map());
3266 ByteArray::cast(result)->set_length(length);
3271 void Heap::CreateFillerObjectAt(Address addr, int size) {
3272 if (size == 0) return;
3273 HeapObject* filler = HeapObject::FromAddress(addr);
3274 if (size == kPointerSize) {
3275 filler->set_map_no_write_barrier(one_pointer_filler_map());
3276 } else if (size == 2 * kPointerSize) {
3277 filler->set_map_no_write_barrier(two_pointer_filler_map());
3279 filler->set_map_no_write_barrier(free_space_map());
3280 FreeSpace::cast(filler)->set_size(size);
3285 bool Heap::CanMoveObjectStart(HeapObject* object) {
3286 Address address = object->address();
3287 bool is_in_old_pointer_space = InOldPointerSpace(address);
3288 bool is_in_old_data_space = InOldDataSpace(address);
3290 if (lo_space()->Contains(object)) return false;
3292 Page* page = Page::FromAddress(address);
3293 // We can move the object start if:
3294 // (1) the object is not in old pointer or old data space,
3295 // (2) the page of the object was already swept,
3296 // (3) the page was already concurrently swept. This case is an optimization
3297 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3298 // pages is set after sweeping all pages.
3299 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3301 (mark_compact_collector()->AreSweeperThreadsActivated() &&
3302 page->parallel_sweeping() <=
3303 MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
3307 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3308 if (incremental_marking()->IsMarking() &&
3309 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3310 if (mode == FROM_GC) {
3311 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3313 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3319 AllocationResult Heap::AllocateExternalArray(int length,
3320 ExternalArrayType array_type,
3321 void* external_pointer,
3322 PretenureFlag pretenure) {
3323 int size = ExternalArray::kAlignedSize;
3324 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3326 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3327 if (!allocation.To(&result)) return allocation;
3330 result->set_map_no_write_barrier(
3331 MapForExternalArrayType(array_type));
3332 ExternalArray::cast(result)->set_length(length);
3333 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3337 static void ForFixedTypedArray(ExternalArrayType array_type,
3339 ElementsKind* element_kind) {
3340 switch (array_type) {
3341 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3342 case kExternal##Type##Array: \
3343 *element_size = size; \
3344 *element_kind = TYPE##_ELEMENTS; \
3347 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3348 #undef TYPED_ARRAY_CASE
3351 *element_size = 0; // Bogus
3352 *element_kind = UINT8_ELEMENTS; // Bogus
3358 AllocationResult Heap::AllocateFixedTypedArray(int length,
3359 ExternalArrayType array_type,
3360 PretenureFlag pretenure) {
3362 ElementsKind elements_kind;
3363 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3364 int size = OBJECT_POINTER_ALIGN(
3365 length * element_size + FixedTypedArrayBase::kDataOffset);
3366 #ifndef V8_HOST_ARCH_64_BIT
3367 if (array_type == kExternalFloat64Array) {
3368 size += kPointerSize;
3371 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3374 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3375 if (!allocation.To(&object)) return allocation;
3377 if (array_type == kExternalFloat64Array) {
3378 object = EnsureDoubleAligned(this, object, size);
3381 object->set_map(MapForFixedTypedArray(array_type));
3382 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3383 elements->set_length(length);
3384 memset(elements->DataPtr(), 0, elements->DataSize());
3389 AllocationResult Heap::AllocateCode(int object_size,
3391 ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3392 AllocationResult allocation;
3393 // Large code objects and code objects which should stay at a fixed address
3394 // are allocated in large object space.
3396 bool force_lo_space = object_size > code_space()->AreaSize();
3397 if (force_lo_space) {
3398 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3400 allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3402 if (!allocation.To(&result)) return allocation;
3404 if (immovable && !force_lo_space &&
3405 // Objects on the first page of each space are never moved.
3406 !code_space_->FirstPage()->Contains(result->address())) {
3407 // Discard the first code allocation, which was on a page where it could be
3409 CreateFillerObjectAt(result->address(), object_size);
3410 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3411 if (!allocation.To(&result)) return allocation;
3414 result->set_map_no_write_barrier(code_map());
3415 Code* code = Code::cast(result);
3416 ASSERT(isolate_->code_range() == NULL ||
3417 !isolate_->code_range()->valid() ||
3418 isolate_->code_range()->contains(code->address()));
3419 code->set_gc_metadata(Smi::FromInt(0));
3420 code->set_ic_age(global_ic_age_);
3425 AllocationResult Heap::CopyCode(Code* code) {
3426 AllocationResult allocation;
3427 HeapObject* new_constant_pool;
3428 if (FLAG_enable_ool_constant_pool &&
3429 code->constant_pool() != empty_constant_pool_array()) {
3430 // Copy the constant pool, since edits to the copied code may modify
3431 // the constant pool.
3432 allocation = CopyConstantPoolArray(code->constant_pool());
3433 if (!allocation.To(&new_constant_pool)) return allocation;
3435 new_constant_pool = empty_constant_pool_array();
3438 // Allocate an object the same size as the code object.
3439 int obj_size = code->Size();
3440 if (obj_size > code_space()->AreaSize()) {
3441 allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3443 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3447 if (!allocation.To(&result)) return allocation;
3449 // Copy code object.
3450 Address old_addr = code->address();
3451 Address new_addr = result->address();
3452 CopyBlock(new_addr, old_addr, obj_size);
3453 Code* new_code = Code::cast(result);
3455 // Update the constant pool.
3456 new_code->set_constant_pool(new_constant_pool);
3458 // Relocate the copy.
3459 ASSERT(isolate_->code_range() == NULL ||
3460 !isolate_->code_range()->valid() ||
3461 isolate_->code_range()->contains(code->address()));
3462 new_code->Relocate(new_addr - old_addr);
3467 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3468 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3469 // do not risk leaving uninitialized Code object (and breaking the heap).
3470 ByteArray* reloc_info_array;
3471 { AllocationResult allocation =
3472 AllocateByteArray(reloc_info.length(), TENURED);
3473 if (!allocation.To(&reloc_info_array)) return allocation;
3475 HeapObject* new_constant_pool;
3476 if (FLAG_enable_ool_constant_pool &&
3477 code->constant_pool() != empty_constant_pool_array()) {
3478 // Copy the constant pool, since edits to the copied code may modify
3479 // the constant pool.
3480 AllocationResult allocation =
3481 CopyConstantPoolArray(code->constant_pool());
3482 if (!allocation.To(&new_constant_pool)) return allocation;
3484 new_constant_pool = empty_constant_pool_array();
3487 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3489 int new_obj_size = Code::SizeFor(new_body_size);
3491 Address old_addr = code->address();
3493 size_t relocation_offset =
3494 static_cast<size_t>(code->instruction_end() - old_addr);
3496 AllocationResult allocation;
3497 if (new_obj_size > code_space()->AreaSize()) {
3498 allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3500 allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3504 if (!allocation.To(&result)) return allocation;
3506 // Copy code object.
3507 Address new_addr = result->address();
3509 // Copy header and instructions.
3510 CopyBytes(new_addr, old_addr, relocation_offset);
3512 Code* new_code = Code::cast(result);
3513 new_code->set_relocation_info(reloc_info_array);
3515 // Update constant pool.
3516 new_code->set_constant_pool(new_constant_pool);
3518 // Copy patched rinfo.
3519 CopyBytes(new_code->relocation_start(),
3521 static_cast<size_t>(reloc_info.length()));
3523 // Relocate the copy.
3524 ASSERT(isolate_->code_range() == NULL ||
3525 !isolate_->code_range()->valid() ||
3526 isolate_->code_range()->contains(code->address()));
3527 new_code->Relocate(new_addr - old_addr);
3530 if (FLAG_verify_heap) code->ObjectVerify();
3536 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3537 AllocationSite* allocation_site) {
3538 memento->set_map_no_write_barrier(allocation_memento_map());
3539 ASSERT(allocation_site->map() == allocation_site_map());
3540 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3541 if (FLAG_allocation_site_pretenuring) {
3542 allocation_site->IncrementMementoCreateCount();
3547 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3548 AllocationSite* allocation_site) {
3549 ASSERT(gc_state_ == NOT_IN_GC);
3550 ASSERT(map->instance_type() != MAP_TYPE);
3551 // If allocation failures are disallowed, we may allocate in a different
3552 // space when new space is full and the object is not a large object.
3553 AllocationSpace retry_space =
3554 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3555 int size = map->instance_size();
3556 if (allocation_site != NULL) {
3557 size += AllocationMemento::kSize;
3560 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3561 if (!allocation.To(&result)) return allocation;
3562 // No need for write barrier since object is white and map is in old space.
3563 result->set_map_no_write_barrier(map);
3564 if (allocation_site != NULL) {
3565 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3566 reinterpret_cast<Address>(result) + map->instance_size());
3567 InitializeAllocationMemento(alloc_memento, allocation_site);
3573 AllocationResult Heap::AllocateArgumentsObject(Object* callee, int length) {
3574 // To get fast allocation and map sharing for arguments objects we
3575 // allocate them based on an arguments boilerplate.
3577 JSObject* boilerplate;
3578 int arguments_object_size;
3579 bool strict_mode_callee = callee->IsJSFunction() &&
3580 JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
3581 if (strict_mode_callee) {
3583 isolate()->context()->native_context()->strict_arguments_boilerplate();
3584 arguments_object_size = kStrictArgumentsObjectSize;
3587 isolate()->context()->native_context()->sloppy_arguments_boilerplate();
3588 arguments_object_size = kSloppyArgumentsObjectSize;
3591 // Check that the size of the boilerplate matches our
3592 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3593 // on the size being a known constant.
3594 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3596 // Do the allocation.
3598 { AllocationResult allocation =
3599 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3600 if (!allocation.To(&result)) return allocation;
3603 // Copy the content. The arguments boilerplate doesn't have any
3604 // fields that point to new space so it's safe to skip the write
3606 CopyBlock(result->address(), boilerplate->address(), JSObject::kHeaderSize);
3608 // Set the length property.
3609 JSObject* js_obj = JSObject::cast(result);
3610 js_obj->InObjectPropertyAtPut(
3611 kArgumentsLengthIndex, Smi::FromInt(length), SKIP_WRITE_BARRIER);
3612 // Set the callee property for sloppy mode arguments object only.
3613 if (!strict_mode_callee) {
3614 js_obj->InObjectPropertyAtPut(kArgumentsCalleeIndex, callee);
3617 // Check the state of the object
3618 ASSERT(js_obj->HasFastProperties());
3619 ASSERT(js_obj->HasFastObjectElements());
3625 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3626 FixedArray* properties,
3628 obj->set_properties(properties);
3629 obj->initialize_elements();
3630 // TODO(1240798): Initialize the object's body using valid initial values
3631 // according to the object's initial map. For example, if the map's
3632 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3633 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3634 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3635 // verification code has to cope with (temporarily) invalid objects. See
3636 // for example, JSArray::JSArrayVerify).
3638 // We cannot always fill with one_pointer_filler_map because objects
3639 // created from API functions expect their internal fields to be initialized
3640 // with undefined_value.
3641 // Pre-allocated fields need to be initialized with undefined_value as well
3642 // so that object accesses before the constructor completes (e.g. in the
3643 // debugger) will not cause a crash.
3644 if (map->constructor()->IsJSFunction() &&
3645 JSFunction::cast(map->constructor())->
3646 IsInobjectSlackTrackingInProgress()) {
3647 // We might want to shrink the object later.
3648 ASSERT(obj->GetInternalFieldCount() == 0);
3649 filler = Heap::one_pointer_filler_map();
3651 filler = Heap::undefined_value();
3653 obj->InitializeBody(map, Heap::undefined_value(), filler);
3657 AllocationResult Heap::AllocateJSObjectFromMap(
3659 PretenureFlag pretenure,
3660 bool allocate_properties,
3661 AllocationSite* allocation_site) {
3662 // JSFunctions should be allocated using AllocateFunction to be
3663 // properly initialized.
3664 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3666 // Both types of global objects should be allocated using
3667 // AllocateGlobalObject to be properly initialized.
3668 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3669 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3671 // Allocate the backing storage for the properties.
3672 FixedArray* properties;
3673 if (allocate_properties) {
3674 int prop_size = map->InitialPropertiesLength();
3675 ASSERT(prop_size >= 0);
3676 { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3677 if (!allocation.To(&properties)) return allocation;
3680 properties = empty_fixed_array();
3683 // Allocate the JSObject.
3684 int size = map->instance_size();
3685 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3687 AllocationResult allocation = Allocate(map, space, allocation_site);
3688 if (!allocation.To(&js_obj)) return allocation;
3690 // Initialize the JSObject.
3691 InitializeJSObjectFromMap(js_obj, properties, map);
3692 ASSERT(js_obj->HasFastElements() ||
3693 js_obj->HasExternalArrayElements() ||
3694 js_obj->HasFixedTypedArrayElements());
3699 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3700 PretenureFlag pretenure,
3701 AllocationSite* allocation_site) {
3702 ASSERT(constructor->has_initial_map());
3704 // Allocate the object based on the constructors initial map.
3705 AllocationResult allocation = AllocateJSObjectFromMap(
3706 constructor->initial_map(), pretenure, true, allocation_site);
3708 // Make sure result is NOT a global object if valid.
3710 ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject());
3716 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3717 // Never used to copy functions. If functions need to be copied we
3718 // have to be careful to clear the literals array.
3719 SLOW_ASSERT(!source->IsJSFunction());
3722 Map* map = source->map();
3723 int object_size = map->instance_size();
3726 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3728 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3730 // If we're forced to always allocate, we use the general allocation
3731 // functions which may leave us with an object in old space.
3732 if (always_allocate()) {
3733 { AllocationResult allocation =
3734 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3735 if (!allocation.To(&clone)) return allocation;
3737 Address clone_address = clone->address();
3738 CopyBlock(clone_address,
3741 // Update write barrier for all fields that lie beyond the header.
3742 RecordWrites(clone_address,
3743 JSObject::kHeaderSize,
3744 (object_size - JSObject::kHeaderSize) / kPointerSize);
3746 wb_mode = SKIP_WRITE_BARRIER;
3748 { int adjusted_object_size = site != NULL
3749 ? object_size + AllocationMemento::kSize
3751 AllocationResult allocation =
3752 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3753 if (!allocation.To(&clone)) return allocation;
3755 SLOW_ASSERT(InNewSpace(clone));
3756 // Since we know the clone is allocated in new space, we can copy
3757 // the contents without worrying about updating the write barrier.
3758 CopyBlock(clone->address(),
3763 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3764 reinterpret_cast<Address>(clone) + object_size);
3765 InitializeAllocationMemento(alloc_memento, site);
3770 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3771 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3772 FixedArray* properties = FixedArray::cast(source->properties());
3773 // Update elements if necessary.
3774 if (elements->length() > 0) {
3775 FixedArrayBase* elem;
3776 { AllocationResult allocation;
3777 if (elements->map() == fixed_cow_array_map()) {
3778 allocation = FixedArray::cast(elements);
3779 } else if (source->HasFastDoubleElements()) {
3780 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3782 allocation = CopyFixedArray(FixedArray::cast(elements));
3784 if (!allocation.To(&elem)) return allocation;
3786 JSObject::cast(clone)->set_elements(elem, wb_mode);
3788 // Update properties if necessary.
3789 if (properties->length() > 0) {
3791 { AllocationResult allocation = CopyFixedArray(properties);
3792 if (!allocation.To(&prop)) return allocation;
3794 JSObject::cast(clone)->set_properties(prop, wb_mode);
3796 // Return the new clone.
3801 static inline void WriteOneByteData(Vector<const char> vector,
3804 // Only works for ascii.
3805 ASSERT(vector.length() == len);
3806 MemCopy(chars, vector.start(), len);
3809 static inline void WriteTwoByteData(Vector<const char> vector,
3812 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3813 unsigned stream_length = vector.length();
3814 while (stream_length != 0) {
3815 unsigned consumed = 0;
3816 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3817 ASSERT(c != unibrow::Utf8::kBadChar);
3818 ASSERT(consumed <= stream_length);
3819 stream_length -= consumed;
3821 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3824 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3825 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3832 ASSERT(stream_length == 0);
3837 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3838 ASSERT(s->length() == len);
3839 String::WriteToFlat(s, chars, 0, len);
3843 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3844 ASSERT(s->length() == len);
3845 String::WriteToFlat(s, chars, 0, len);
3849 template<bool is_one_byte, typename T>
3850 AllocationResult Heap::AllocateInternalizedStringImpl(
3851 T t, int chars, uint32_t hash_field) {
3853 // Compute map and object size.
3857 ASSERT_LE(0, chars);
3858 ASSERT_GE(String::kMaxLength, chars);
3860 map = ascii_internalized_string_map();
3861 size = SeqOneByteString::SizeFor(chars);
3863 map = internalized_string_map();
3864 size = SeqTwoByteString::SizeFor(chars);
3866 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3870 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3871 if (!allocation.To(&result)) return allocation;
3874 result->set_map_no_write_barrier(map);
3875 // Set length and hash fields of the allocated string.
3876 String* answer = String::cast(result);
3877 answer->set_length(chars);
3878 answer->set_hash_field(hash_field);
3880 ASSERT_EQ(size, answer->Size());
3883 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3885 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3891 // Need explicit instantiations.
3893 AllocationResult Heap::AllocateInternalizedStringImpl<true>(
3894 String*, int, uint32_t);
3896 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3897 String*, int, uint32_t);
3899 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3900 Vector<const char>, int, uint32_t);
3903 AllocationResult Heap::AllocateRawOneByteString(int length,
3904 PretenureFlag pretenure) {
3905 ASSERT_LE(0, length);
3906 ASSERT_GE(String::kMaxLength, length);
3907 int size = SeqOneByteString::SizeFor(length);
3908 ASSERT(size <= SeqOneByteString::kMaxSize);
3909 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3912 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3913 if (!allocation.To(&result)) return allocation;
3916 // Partially initialize the object.
3917 result->set_map_no_write_barrier(ascii_string_map());
3918 String::cast(result)->set_length(length);
3919 String::cast(result)->set_hash_field(String::kEmptyHashField);
3920 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3926 AllocationResult Heap::AllocateRawTwoByteString(int length,
3927 PretenureFlag pretenure) {
3928 ASSERT_LE(0, length);
3929 ASSERT_GE(String::kMaxLength, length);
3930 int size = SeqTwoByteString::SizeFor(length);
3931 ASSERT(size <= SeqTwoByteString::kMaxSize);
3932 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3935 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3936 if (!allocation.To(&result)) return allocation;
3939 // Partially initialize the object.
3940 result->set_map_no_write_barrier(string_map());
3941 String::cast(result)->set_length(length);
3942 String::cast(result)->set_hash_field(String::kEmptyHashField);
3943 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3948 AllocationResult Heap::AllocateEmptyFixedArray() {
3949 int size = FixedArray::SizeFor(0);
3951 { AllocationResult allocation =
3952 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3953 if (!allocation.To(&result)) return allocation;
3955 // Initialize the object.
3956 result->set_map_no_write_barrier(fixed_array_map());
3957 FixedArray::cast(result)->set_length(0);
3962 AllocationResult Heap::AllocateEmptyExternalArray(
3963 ExternalArrayType array_type) {
3964 return AllocateExternalArray(0, array_type, NULL, TENURED);
3968 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3969 if (!InNewSpace(src)) {
3973 int len = src->length();
3975 { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3976 if (!allocation.To(&obj)) return allocation;
3978 obj->set_map_no_write_barrier(fixed_array_map());
3979 FixedArray* result = FixedArray::cast(obj);
3980 result->set_length(len);
3983 DisallowHeapAllocation no_gc;
3984 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3985 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3987 // TODO(mvstanton): The map is set twice because of protection against calling
3988 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3989 // we might then be able to remove this whole method.
3990 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3995 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3996 ExternalArrayType array_type) {
3997 return AllocateFixedTypedArray(0, array_type, TENURED);
4001 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4002 int len = src->length();
4004 { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4005 if (!allocation.To(&obj)) return allocation;
4007 if (InNewSpace(obj)) {
4008 obj->set_map_no_write_barrier(map);
4009 CopyBlock(obj->address() + kPointerSize,
4010 src->address() + kPointerSize,
4011 FixedArray::SizeFor(len) - kPointerSize);
4014 obj->set_map_no_write_barrier(map);
4015 FixedArray* result = FixedArray::cast(obj);
4016 result->set_length(len);
4019 DisallowHeapAllocation no_gc;
4020 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4021 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4026 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4028 int len = src->length();
4030 { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4031 if (!allocation.To(&obj)) return allocation;
4033 obj->set_map_no_write_barrier(map);
4035 obj->address() + FixedDoubleArray::kLengthOffset,
4036 src->address() + FixedDoubleArray::kLengthOffset,
4037 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4042 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4045 if (src->is_extended_layout()) {
4046 ConstantPoolArray::NumberOfEntries small(src,
4047 ConstantPoolArray::SMALL_SECTION);
4048 ConstantPoolArray::NumberOfEntries extended(src,
4049 ConstantPoolArray::EXTENDED_SECTION);
4050 AllocationResult allocation =
4051 AllocateExtendedConstantPoolArray(small, extended);
4052 if (!allocation.To(&obj)) return allocation;
4054 ConstantPoolArray::NumberOfEntries small(src,
4055 ConstantPoolArray::SMALL_SECTION);
4056 AllocationResult allocation = AllocateConstantPoolArray(small);
4057 if (!allocation.To(&obj)) return allocation;
4059 obj->set_map_no_write_barrier(map);
4061 obj->address() + ConstantPoolArray::kFirstEntryOffset,
4062 src->address() + ConstantPoolArray::kFirstEntryOffset,
4063 src->size() - ConstantPoolArray::kFirstEntryOffset);
4068 AllocationResult Heap::AllocateRawFixedArray(int length,
4069 PretenureFlag pretenure) {
4070 if (length < 0 || length > FixedArray::kMaxLength) {
4071 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4073 int size = FixedArray::SizeFor(length);
4074 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4076 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4080 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4081 PretenureFlag pretenure,
4083 ASSERT(length >= 0);
4084 ASSERT(empty_fixed_array()->IsFixedArray());
4085 if (length == 0) return empty_fixed_array();
4087 ASSERT(!InNewSpace(filler));
4089 { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4090 if (!allocation.To(&result)) return allocation;
4093 result->set_map_no_write_barrier(fixed_array_map());
4094 FixedArray* array = FixedArray::cast(result);
4095 array->set_length(length);
4096 MemsetPointer(array->data_start(), filler, length);
4101 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4102 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4106 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4107 if (length == 0) return empty_fixed_array();
4110 { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4111 if (!allocation.To(&obj)) return allocation;
4114 obj->set_map_no_write_barrier(fixed_array_map());
4115 FixedArray::cast(obj)->set_length(length);
4120 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4122 PretenureFlag pretenure) {
4123 if (length == 0) return empty_fixed_array();
4125 HeapObject* elements;
4126 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4127 if (!allocation.To(&elements)) return allocation;
4129 elements->set_map_no_write_barrier(fixed_double_array_map());
4130 FixedDoubleArray::cast(elements)->set_length(length);
4135 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4136 PretenureFlag pretenure) {
4137 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4138 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4140 int size = FixedDoubleArray::SizeFor(length);
4141 #ifndef V8_HOST_ARCH_64_BIT
4142 size += kPointerSize;
4144 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4147 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4148 if (!allocation.To(&object)) return allocation;
4151 return EnsureDoubleAligned(this, object, size);
4155 AllocationResult Heap::AllocateConstantPoolArray(
4156 const ConstantPoolArray::NumberOfEntries& small) {
4157 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4158 int size = ConstantPoolArray::SizeFor(small);
4159 #ifndef V8_HOST_ARCH_64_BIT
4160 size += kPointerSize;
4162 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4165 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4166 if (!allocation.To(&object)) return allocation;
4168 object = EnsureDoubleAligned(this, object, size);
4169 object->set_map_no_write_barrier(constant_pool_array_map());
4171 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4172 constant_pool->Init(small);
4173 constant_pool->ClearPtrEntries(isolate());
4174 return constant_pool;
4178 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4179 const ConstantPoolArray::NumberOfEntries& small,
4180 const ConstantPoolArray::NumberOfEntries& extended) {
4181 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4182 CHECK(extended.are_in_range(0, kMaxInt));
4183 int size = ConstantPoolArray::SizeForExtended(small, extended);
4184 #ifndef V8_HOST_ARCH_64_BIT
4185 size += kPointerSize;
4187 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4190 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4191 if (!allocation.To(&object)) return allocation;
4193 object = EnsureDoubleAligned(this, object, size);
4194 object->set_map_no_write_barrier(constant_pool_array_map());
4196 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4197 constant_pool->InitExtended(small, extended);
4198 constant_pool->ClearPtrEntries(isolate());
4199 return constant_pool;
4203 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4204 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4205 int size = ConstantPoolArray::SizeFor(small);
4207 { AllocationResult allocation =
4208 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4209 if (!allocation.To(&result)) return allocation;
4211 result->set_map_no_write_barrier(constant_pool_array_map());
4212 ConstantPoolArray::cast(result)->Init(small);
4217 AllocationResult Heap::AllocateSymbol() {
4218 // Statically ensure that it is safe to allocate symbols in paged spaces.
4219 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4222 AllocationResult allocation =
4223 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4224 if (!allocation.To(&result)) return allocation;
4226 result->set_map_no_write_barrier(symbol_map());
4228 // Generate a random hash value.
4232 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4234 } while (hash == 0 && attempts < 30);
4235 if (hash == 0) hash = 1; // never return 0
4237 Symbol::cast(result)->set_hash_field(
4238 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4239 Symbol::cast(result)->set_name(undefined_value());
4240 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4242 ASSERT(!Symbol::cast(result)->is_private());
4247 AllocationResult Heap::AllocateStruct(InstanceType type) {
4250 #define MAKE_CASE(NAME, Name, name) \
4251 case NAME##_TYPE: map = name##_map(); break;
4252 STRUCT_LIST(MAKE_CASE)
4258 int size = map->instance_size();
4259 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4261 { AllocationResult allocation = Allocate(map, space);
4262 if (!allocation.To(&result)) return allocation;
4264 result->InitializeBody(size);
4269 bool Heap::IsHeapIterable() {
4270 return (!old_pointer_space()->was_swept_conservatively() &&
4271 !old_data_space()->was_swept_conservatively() &&
4272 new_space_top_after_last_gc_ == new_space()->top());
4276 void Heap::MakeHeapIterable() {
4277 ASSERT(AllowHeapAllocation::IsAllowed());
4278 if (!IsHeapIterable()) {
4279 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4281 ASSERT(IsHeapIterable());
4285 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4286 incremental_marking()->Step(step_size,
4287 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
4289 if (incremental_marking()->IsComplete()) {
4290 bool uncommit = false;
4291 if (gc_count_at_last_idle_gc_ == gc_count_) {
4292 // No GC since the last full GC, the mutator is probably not active.
4293 isolate_->compilation_cache()->Clear();
4296 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4297 mark_sweeps_since_idle_round_started_++;
4298 gc_count_at_last_idle_gc_ = gc_count_;
4300 new_space_.Shrink();
4301 UncommitFromSpace();
4307 bool Heap::IdleNotification(int hint) {
4308 // Hints greater than this value indicate that
4309 // the embedder is requesting a lot of GC work.
4310 const int kMaxHint = 1000;
4311 const int kMinHintForIncrementalMarking = 10;
4312 // Minimal hint that allows to do full GC.
4313 const int kMinHintForFullGC = 100;
4314 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
4315 // The size factor is in range [5..250]. The numbers here are chosen from
4316 // experiments. If you changes them, make sure to test with
4317 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
4318 intptr_t step_size =
4319 size_factor * IncrementalMarking::kAllocatedThreshold;
4321 if (contexts_disposed_ > 0) {
4322 contexts_disposed_ = 0;
4323 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
4324 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
4325 incremental_marking()->IsStopped()) {
4326 HistogramTimerScope scope(isolate_->counters()->gc_context());
4327 CollectAllGarbage(kReduceMemoryFootprintMask,
4328 "idle notification: contexts disposed");
4330 AdvanceIdleIncrementalMarking(step_size);
4333 // After context disposal there is likely a lot of garbage remaining, reset
4334 // the idle notification counters in order to trigger more incremental GCs
4335 // on subsequent idle notifications.
4340 if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
4341 return IdleGlobalGC();
4344 // By doing small chunks of GC work in each IdleNotification,
4345 // perform a round of incremental GCs and after that wait until
4346 // the mutator creates enough garbage to justify a new round.
4347 // An incremental GC progresses as follows:
4348 // 1. many incremental marking steps,
4349 // 2. one old space mark-sweep-compact,
4350 // Use mark-sweep-compact events to count incremental GCs in a round.
4352 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4353 if (EnoughGarbageSinceLastIdleRound()) {
4360 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
4361 mark_sweeps_since_idle_round_started_;
4363 if (incremental_marking()->IsStopped()) {
4364 // If there are no more than two GCs left in this idle round and we are
4365 // allowed to do a full GC, then make those GCs full in order to compact
4367 // TODO(ulan): Once we enable code compaction for incremental marking,
4368 // we can get rid of this special case and always start incremental marking.
4369 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
4370 CollectAllGarbage(kReduceMemoryFootprintMask,
4371 "idle notification: finalize idle round");
4372 mark_sweeps_since_idle_round_started_++;
4373 } else if (hint > kMinHintForIncrementalMarking) {
4374 incremental_marking()->Start();
4377 if (!incremental_marking()->IsStopped() &&
4378 hint > kMinHintForIncrementalMarking) {
4379 AdvanceIdleIncrementalMarking(step_size);
4382 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4387 // If the IdleNotifcation is called with a large hint we will wait for
4388 // the sweepter threads here.
4389 if (hint >= kMinHintForFullGC &&
4390 mark_compact_collector()->IsConcurrentSweepingInProgress()) {
4391 mark_compact_collector()->WaitUntilSweepingCompleted();
4398 bool Heap::IdleGlobalGC() {
4399 static const int kIdlesBeforeScavenge = 4;
4400 static const int kIdlesBeforeMarkSweep = 7;
4401 static const int kIdlesBeforeMarkCompact = 8;
4402 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4403 static const unsigned int kGCsBetweenCleanup = 4;
4405 if (!last_idle_notification_gc_count_init_) {
4406 last_idle_notification_gc_count_ = gc_count_;
4407 last_idle_notification_gc_count_init_ = true;
4410 bool uncommit = true;
4411 bool finished = false;
4413 // Reset the number of idle notifications received when a number of
4414 // GCs have taken place. This allows another round of cleanup based
4415 // on idle notifications if enough work has been carried out to
4416 // provoke a number of garbage collections.
4417 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4418 number_idle_notifications_ =
4419 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4421 number_idle_notifications_ = 0;
4422 last_idle_notification_gc_count_ = gc_count_;
4425 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4426 CollectGarbage(NEW_SPACE, "idle notification");
4427 new_space_.Shrink();
4428 last_idle_notification_gc_count_ = gc_count_;
4429 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4430 // Before doing the mark-sweep collections we clear the
4431 // compilation cache to avoid hanging on to source code and
4432 // generated code for cached functions.
4433 isolate_->compilation_cache()->Clear();
4435 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4436 new_space_.Shrink();
4437 last_idle_notification_gc_count_ = gc_count_;
4439 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4440 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4441 new_space_.Shrink();
4442 last_idle_notification_gc_count_ = gc_count_;
4443 number_idle_notifications_ = 0;
4445 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4446 // If we have received more than kIdlesBeforeMarkCompact idle
4447 // notifications we do not perform any cleanup because we don't
4448 // expect to gain much by doing so.
4452 if (uncommit) UncommitFromSpace();
4460 void Heap::Print() {
4461 if (!HasBeenSetUp()) return;
4462 isolate()->PrintStack(stdout);
4463 AllSpaces spaces(this);
4464 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4470 void Heap::ReportCodeStatistics(const char* title) {
4471 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4472 PagedSpace::ResetCodeStatistics(isolate());
4473 // We do not look for code in new space, map space, or old space. If code
4474 // somehow ends up in those spaces, we would miss it here.
4475 code_space_->CollectCodeStatistics();
4476 lo_space_->CollectCodeStatistics();
4477 PagedSpace::ReportCodeStatistics(isolate());
4481 // This function expects that NewSpace's allocated objects histogram is
4482 // populated (via a call to CollectStatistics or else as a side effect of a
4483 // just-completed scavenge collection).
4484 void Heap::ReportHeapStatistics(const char* title) {
4486 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4488 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4489 old_generation_allocation_limit_);
4492 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4493 isolate_->global_handles()->PrintStats();
4496 PrintF("Heap statistics : ");
4497 isolate_->memory_allocator()->ReportStatistics();
4498 PrintF("To space : ");
4499 new_space_.ReportStatistics();
4500 PrintF("Old pointer space : ");
4501 old_pointer_space_->ReportStatistics();
4502 PrintF("Old data space : ");
4503 old_data_space_->ReportStatistics();
4504 PrintF("Code space : ");
4505 code_space_->ReportStatistics();
4506 PrintF("Map space : ");
4507 map_space_->ReportStatistics();
4508 PrintF("Cell space : ");
4509 cell_space_->ReportStatistics();
4510 PrintF("PropertyCell space : ");
4511 property_cell_space_->ReportStatistics();
4512 PrintF("Large object space : ");
4513 lo_space_->ReportStatistics();
4514 PrintF(">>>>>> ========================================= >>>>>>\n");
4519 bool Heap::Contains(HeapObject* value) {
4520 return Contains(value->address());
4524 bool Heap::Contains(Address addr) {
4525 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4526 return HasBeenSetUp() &&
4527 (new_space_.ToSpaceContains(addr) ||
4528 old_pointer_space_->Contains(addr) ||
4529 old_data_space_->Contains(addr) ||
4530 code_space_->Contains(addr) ||
4531 map_space_->Contains(addr) ||
4532 cell_space_->Contains(addr) ||
4533 property_cell_space_->Contains(addr) ||
4534 lo_space_->SlowContains(addr));
4538 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4539 return InSpace(value->address(), space);
4543 bool Heap::InSpace(Address addr, AllocationSpace space) {
4544 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4545 if (!HasBeenSetUp()) return false;
4549 return new_space_.ToSpaceContains(addr);
4550 case OLD_POINTER_SPACE:
4551 return old_pointer_space_->Contains(addr);
4552 case OLD_DATA_SPACE:
4553 return old_data_space_->Contains(addr);
4555 return code_space_->Contains(addr);
4557 return map_space_->Contains(addr);
4559 return cell_space_->Contains(addr);
4560 case PROPERTY_CELL_SPACE:
4561 return property_cell_space_->Contains(addr);
4563 return lo_space_->SlowContains(addr);
4573 void Heap::Verify() {
4574 CHECK(HasBeenSetUp());
4575 HandleScope scope(isolate());
4577 store_buffer()->Verify();
4579 VerifyPointersVisitor visitor;
4580 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4582 VerifySmisVisitor smis_visitor;
4583 IterateSmiRoots(&smis_visitor);
4585 new_space_.Verify();
4587 old_pointer_space_->Verify(&visitor);
4588 map_space_->Verify(&visitor);
4590 VerifyPointersVisitor no_dirty_regions_visitor;
4591 old_data_space_->Verify(&no_dirty_regions_visitor);
4592 code_space_->Verify(&no_dirty_regions_visitor);
4593 cell_space_->Verify(&no_dirty_regions_visitor);
4594 property_cell_space_->Verify(&no_dirty_regions_visitor);
4596 lo_space_->Verify();
4601 void Heap::ZapFromSpace() {
4602 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4603 new_space_.FromSpaceEnd());
4604 while (it.has_next()) {
4605 NewSpacePage* page = it.next();
4606 for (Address cursor = page->area_start(), limit = page->area_end();
4608 cursor += kPointerSize) {
4609 Memory::Address_at(cursor) = kFromSpaceZapValue;
4615 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4617 ObjectSlotCallback callback) {
4618 Address slot_address = start;
4620 // We are not collecting slots on new space objects during mutation
4621 // thus we have to scan for pointers to evacuation candidates when we
4622 // promote objects. But we should not record any slots in non-black
4623 // objects. Grey object's slots would be rescanned.
4624 // White object might not survive until the end of collection
4625 // it would be a violation of the invariant to record it's slots.
4626 bool record_slots = false;
4627 if (incremental_marking()->IsCompacting()) {
4628 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4629 record_slots = Marking::IsBlack(mark_bit);
4632 while (slot_address < end) {
4633 Object** slot = reinterpret_cast<Object**>(slot_address);
4634 Object* object = *slot;
4635 // If the store buffer becomes overfull we mark pages as being exempt from
4636 // the store buffer. These pages are scanned to find pointers that point
4637 // to the new space. In that case we may hit newly promoted objects and
4638 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4639 if (object->IsHeapObject()) {
4640 if (Heap::InFromSpace(object)) {
4641 callback(reinterpret_cast<HeapObject**>(slot),
4642 HeapObject::cast(object));
4643 Object* new_object = *slot;
4644 if (InNewSpace(new_object)) {
4645 SLOW_ASSERT(Heap::InToSpace(new_object));
4646 SLOW_ASSERT(new_object->IsHeapObject());
4647 store_buffer_.EnterDirectlyIntoStoreBuffer(
4648 reinterpret_cast<Address>(slot));
4650 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4651 } else if (record_slots &&
4652 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4653 mark_compact_collector()->RecordSlot(slot, slot, object);
4656 slot_address += kPointerSize;
4662 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4665 bool IsAMapPointerAddress(Object** addr) {
4666 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4667 int mod = a % Map::kSize;
4668 return mod >= Map::kPointerFieldsBeginOffset &&
4669 mod < Map::kPointerFieldsEndOffset;
4673 bool EverythingsAPointer(Object** addr) {
4678 static void CheckStoreBuffer(Heap* heap,
4681 Object**** store_buffer_position,
4682 Object*** store_buffer_top,
4683 CheckStoreBufferFilter filter,
4684 Address special_garbage_start,
4685 Address special_garbage_end) {
4686 Map* free_space_map = heap->free_space_map();
4687 for ( ; current < limit; current++) {
4688 Object* o = *current;
4689 Address current_address = reinterpret_cast<Address>(current);
4691 if (o == free_space_map) {
4692 Address current_address = reinterpret_cast<Address>(current);
4693 FreeSpace* free_space =
4694 FreeSpace::cast(HeapObject::FromAddress(current_address));
4695 int skip = free_space->Size();
4696 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4698 current_address += skip - kPointerSize;
4699 current = reinterpret_cast<Object**>(current_address);
4702 // Skip the current linear allocation space between top and limit which is
4703 // unmarked with the free space map, but can contain junk.
4704 if (current_address == special_garbage_start &&
4705 special_garbage_end != special_garbage_start) {
4706 current_address = special_garbage_end - kPointerSize;
4707 current = reinterpret_cast<Object**>(current_address);
4710 if (!(*filter)(current)) continue;
4711 ASSERT(current_address < special_garbage_start ||
4712 current_address >= special_garbage_end);
4713 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4714 // We have to check that the pointer does not point into new space
4715 // without trying to cast it to a heap object since the hash field of
4716 // a string can contain values like 1 and 3 which are tagged null
4718 if (!heap->InNewSpace(o)) continue;
4719 while (**store_buffer_position < current &&
4720 *store_buffer_position < store_buffer_top) {
4721 (*store_buffer_position)++;
4723 if (**store_buffer_position != current ||
4724 *store_buffer_position == store_buffer_top) {
4725 Object** obj_start = current;
4726 while (!(*obj_start)->IsMap()) obj_start--;
4733 // Check that the store buffer contains all intergenerational pointers by
4734 // scanning a page and ensuring that all pointers to young space are in the
4736 void Heap::OldPointerSpaceCheckStoreBuffer() {
4737 OldSpace* space = old_pointer_space();
4738 PageIterator pages(space);
4740 store_buffer()->SortUniq();
4742 while (pages.has_next()) {
4743 Page* page = pages.next();
4744 Object** current = reinterpret_cast<Object**>(page->area_start());
4746 Address end = page->area_end();
4748 Object*** store_buffer_position = store_buffer()->Start();
4749 Object*** store_buffer_top = store_buffer()->Top();
4751 Object** limit = reinterpret_cast<Object**>(end);
4752 CheckStoreBuffer(this,
4755 &store_buffer_position,
4757 &EverythingsAPointer,
4764 void Heap::MapSpaceCheckStoreBuffer() {
4765 MapSpace* space = map_space();
4766 PageIterator pages(space);
4768 store_buffer()->SortUniq();
4770 while (pages.has_next()) {
4771 Page* page = pages.next();
4772 Object** current = reinterpret_cast<Object**>(page->area_start());
4774 Address end = page->area_end();
4776 Object*** store_buffer_position = store_buffer()->Start();
4777 Object*** store_buffer_top = store_buffer()->Top();
4779 Object** limit = reinterpret_cast<Object**>(end);
4780 CheckStoreBuffer(this,
4783 &store_buffer_position,
4785 &IsAMapPointerAddress,
4792 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4793 LargeObjectIterator it(lo_space());
4794 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4795 // We only have code, sequential strings, or fixed arrays in large
4796 // object space, and only fixed arrays can possibly contain pointers to
4797 // the young generation.
4798 if (object->IsFixedArray()) {
4799 Object*** store_buffer_position = store_buffer()->Start();
4800 Object*** store_buffer_top = store_buffer()->Top();
4801 Object** current = reinterpret_cast<Object**>(object->address());
4803 reinterpret_cast<Object**>(object->address() + object->Size());
4804 CheckStoreBuffer(this,
4807 &store_buffer_position,
4809 &EverythingsAPointer,
4818 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4819 IterateStrongRoots(v, mode);
4820 IterateWeakRoots(v, mode);
4824 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4825 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4826 v->Synchronize(VisitorSynchronization::kStringTable);
4827 if (mode != VISIT_ALL_IN_SCAVENGE &&
4828 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4829 // Scavenge collections have special processing for this.
4830 external_string_table_.Iterate(v);
4832 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4836 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4837 // Acquire execution access since we are going to read stack limit values.
4838 ExecutionAccess access(isolate());
4839 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4840 v->Synchronize(VisitorSynchronization::kSmiRootList);
4844 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4845 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4846 v->Synchronize(VisitorSynchronization::kStrongRootList);
4848 v->VisitPointer(BitCast<Object**>(&hidden_string_));
4849 v->Synchronize(VisitorSynchronization::kInternalizedString);
4851 isolate_->bootstrapper()->Iterate(v);
4852 v->Synchronize(VisitorSynchronization::kBootstrapper);
4853 isolate_->Iterate(v);
4854 v->Synchronize(VisitorSynchronization::kTop);
4855 Relocatable::Iterate(isolate_, v);
4856 v->Synchronize(VisitorSynchronization::kRelocatable);
4858 if (isolate_->deoptimizer_data() != NULL) {
4859 isolate_->deoptimizer_data()->Iterate(v);
4861 v->Synchronize(VisitorSynchronization::kDebug);
4862 isolate_->compilation_cache()->Iterate(v);
4863 v->Synchronize(VisitorSynchronization::kCompilationCache);
4865 // Iterate over local handles in handle scopes.
4866 isolate_->handle_scope_implementer()->Iterate(v);
4867 isolate_->IterateDeferredHandles(v);
4868 v->Synchronize(VisitorSynchronization::kHandleScope);
4870 // Iterate over the builtin code objects and code stubs in the
4871 // heap. Note that it is not necessary to iterate over code objects
4872 // on scavenge collections.
4873 if (mode != VISIT_ALL_IN_SCAVENGE) {
4874 isolate_->builtins()->IterateBuiltins(v);
4876 v->Synchronize(VisitorSynchronization::kBuiltins);
4878 // Iterate over global handles.
4880 case VISIT_ONLY_STRONG:
4881 isolate_->global_handles()->IterateStrongRoots(v);
4883 case VISIT_ALL_IN_SCAVENGE:
4884 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4886 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4888 isolate_->global_handles()->IterateAllRoots(v);
4891 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4893 // Iterate over eternal handles.
4894 if (mode == VISIT_ALL_IN_SCAVENGE) {
4895 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4897 isolate_->eternal_handles()->IterateAllRoots(v);
4899 v->Synchronize(VisitorSynchronization::kEternalHandles);
4901 // Iterate over pointers being held by inactive threads.
4902 isolate_->thread_manager()->Iterate(v);
4903 v->Synchronize(VisitorSynchronization::kThreadManager);
4905 // Iterate over the pointers the Serialization/Deserialization code is
4907 // During garbage collection this keeps the partial snapshot cache alive.
4908 // During deserialization of the startup snapshot this creates the partial
4909 // snapshot cache and deserializes the objects it refers to. During
4910 // serialization this does nothing, since the partial snapshot cache is
4911 // empty. However the next thing we do is create the partial snapshot,
4912 // filling up the partial snapshot cache with objects it needs as we go.
4913 SerializerDeserializer::Iterate(isolate_, v);
4914 // We don't do a v->Synchronize call here, because in debug mode that will
4915 // output a flag to the snapshot. However at this point the serializer and
4916 // deserializer are deliberately a little unsynchronized (see above) so the
4917 // checking of the sync flag in the snapshot would fail.
4921 // TODO(1236194): Since the heap size is configurable on the command line
4922 // and through the API, we should gracefully handle the case that the heap
4923 // size is not big enough to fit all the initial objects.
4924 bool Heap::ConfigureHeap(int max_semi_space_size,
4925 int max_old_space_size,
4926 int max_executable_size,
4927 size_t code_range_size) {
4928 if (HasBeenSetUp()) return false;
4930 // Overwrite default configuration.
4931 if (max_semi_space_size > 0) {
4932 max_semi_space_size_ = max_semi_space_size * MB;
4934 if (max_old_space_size > 0) {
4935 max_old_generation_size_ = max_old_space_size * MB;
4937 if (max_executable_size > 0) {
4938 max_executable_size_ = max_executable_size * MB;
4941 // If max space size flags are specified overwrite the configuration.
4942 if (FLAG_max_semi_space_size > 0) {
4943 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4945 if (FLAG_max_old_space_size > 0) {
4946 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4948 if (FLAG_max_executable_size > 0) {
4949 max_executable_size_ = FLAG_max_executable_size * MB;
4952 if (FLAG_stress_compaction) {
4953 // This will cause more frequent GCs when stressing.
4954 max_semi_space_size_ = Page::kPageSize;
4957 if (Snapshot::IsEnabled()) {
4958 // If we are using a snapshot we always reserve the default amount
4959 // of memory for each semispace because code in the snapshot has
4960 // write-barrier code that relies on the size and alignment of new
4961 // space. We therefore cannot use a larger max semispace size
4962 // than the default reserved semispace size.
4963 if (max_semi_space_size_ > reserved_semispace_size_) {
4964 max_semi_space_size_ = reserved_semispace_size_;
4965 if (FLAG_trace_gc) {
4966 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4967 reserved_semispace_size_ >> 10);
4971 // If we are not using snapshots we reserve space for the actual
4972 // max semispace size.
4973 reserved_semispace_size_ = max_semi_space_size_;
4976 // The max executable size must be less than or equal to the max old
4978 if (max_executable_size_ > max_old_generation_size_) {
4979 max_executable_size_ = max_old_generation_size_;
4982 // The new space size must be a power of two to support single-bit testing
4984 max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
4985 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4987 if (FLAG_min_semi_space_size > 0) {
4988 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4989 if (initial_semispace_size > max_semi_space_size_) {
4990 initial_semispace_size_ = max_semi_space_size_;
4991 if (FLAG_trace_gc) {
4992 PrintPID("Min semi-space size cannot be more than the maximum"
4993 "semi-space size of %d MB\n", max_semi_space_size_);
4996 initial_semispace_size_ = initial_semispace_size;
5000 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5002 // The old generation is paged and needs at least one page for each space.
5003 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5004 max_old_generation_size_ =
5005 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5006 max_old_generation_size_);
5008 // We rely on being able to allocate new arrays in paged spaces.
5009 ASSERT(Page::kMaxRegularHeapObjectSize >=
5011 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5012 AllocationMemento::kSize));
5014 code_range_size_ = code_range_size * MB;
5021 bool Heap::ConfigureHeapDefault() {
5022 return ConfigureHeap(0, 0, 0, 0);
5026 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5027 *stats->start_marker = HeapStats::kStartMarker;
5028 *stats->end_marker = HeapStats::kEndMarker;
5029 *stats->new_space_size = new_space_.SizeAsInt();
5030 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5031 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5032 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5033 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5034 *stats->old_data_space_capacity = old_data_space_->Capacity();
5035 *stats->code_space_size = code_space_->SizeOfObjects();
5036 *stats->code_space_capacity = code_space_->Capacity();
5037 *stats->map_space_size = map_space_->SizeOfObjects();
5038 *stats->map_space_capacity = map_space_->Capacity();
5039 *stats->cell_space_size = cell_space_->SizeOfObjects();
5040 *stats->cell_space_capacity = cell_space_->Capacity();
5041 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
5042 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
5043 *stats->lo_space_size = lo_space_->Size();
5044 isolate_->global_handles()->RecordStats(stats);
5045 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5046 *stats->memory_allocator_capacity =
5047 isolate()->memory_allocator()->Size() +
5048 isolate()->memory_allocator()->Available();
5049 *stats->os_error = OS::GetLastError();
5050 isolate()->memory_allocator()->Available();
5051 if (take_snapshot) {
5052 HeapIterator iterator(this);
5053 for (HeapObject* obj = iterator.next();
5055 obj = iterator.next()) {
5056 InstanceType type = obj->map()->instance_type();
5057 ASSERT(0 <= type && type <= LAST_TYPE);
5058 stats->objects_per_type[type]++;
5059 stats->size_per_type[type] += obj->Size();
5065 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5066 return old_pointer_space_->SizeOfObjects()
5067 + old_data_space_->SizeOfObjects()
5068 + code_space_->SizeOfObjects()
5069 + map_space_->SizeOfObjects()
5070 + cell_space_->SizeOfObjects()
5071 + property_cell_space_->SizeOfObjects()
5072 + lo_space_->SizeOfObjects();
5076 int64_t Heap::PromotedExternalMemorySize() {
5077 if (amount_of_external_allocated_memory_
5078 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5079 return amount_of_external_allocated_memory_
5080 - amount_of_external_allocated_memory_at_last_global_gc_;
5084 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5085 int freed_global_handles) {
5086 const int kMaxHandles = 1000;
5087 const int kMinHandles = 100;
5088 double min_factor = 1.1;
5089 double max_factor = 4;
5090 // We set the old generation growing factor to 2 to grow the heap slower on
5091 // memory-constrained devices.
5092 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5095 // If there are many freed global handles, then the next full GC will
5096 // likely collect a lot of garbage. Choose the heap growing factor
5097 // depending on freed global handles.
5098 // TODO(ulan, hpayer): Take into account mutator utilization.
5100 if (freed_global_handles <= kMinHandles) {
5101 factor = max_factor;
5102 } else if (freed_global_handles >= kMaxHandles) {
5103 factor = min_factor;
5105 // Compute factor using linear interpolation between points
5106 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5107 factor = max_factor -
5108 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5109 (kMaxHandles - kMinHandles);
5112 if (FLAG_stress_compaction ||
5113 mark_compact_collector()->reduce_memory_footprint_) {
5114 factor = min_factor;
5117 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5118 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5119 limit += new_space_.Capacity();
5120 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5121 return Min(limit, halfway_to_the_max);
5125 void Heap::EnableInlineAllocation() {
5126 if (!inline_allocation_disabled_) return;
5127 inline_allocation_disabled_ = false;
5129 // Update inline allocation limit for new space.
5130 new_space()->UpdateInlineAllocationLimit(0);
5134 void Heap::DisableInlineAllocation() {
5135 if (inline_allocation_disabled_) return;
5136 inline_allocation_disabled_ = true;
5138 // Update inline allocation limit for new space.
5139 new_space()->UpdateInlineAllocationLimit(0);
5141 // Update inline allocation limit for old spaces.
5142 PagedSpaces spaces(this);
5143 for (PagedSpace* space = spaces.next();
5145 space = spaces.next()) {
5146 space->EmptyAllocationInfo();
5151 V8_DECLARE_ONCE(initialize_gc_once);
5153 static void InitializeGCOnce() {
5154 InitializeScavengingVisitorsTables();
5155 NewSpaceScavenger::Initialize();
5156 MarkCompactCollector::Initialize();
5160 bool Heap::SetUp() {
5162 allocation_timeout_ = FLAG_gc_interval;
5165 // Initialize heap spaces and initial maps and objects. Whenever something
5166 // goes wrong, just return false. The caller should check the results and
5167 // call Heap::TearDown() to release allocated memory.
5169 // If the heap is not yet configured (e.g. through the API), configure it.
5170 // Configuration is based on the flags new-space-size (really the semispace
5171 // size) and old-space-size if set or the initial values of semispace_size_
5172 // and old_generation_size_ otherwise.
5174 if (!ConfigureHeapDefault()) return false;
5177 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5179 MarkMapPointersAsEncoded(false);
5181 // Set up memory allocator.
5182 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5185 // Set up new space.
5186 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5189 new_space_top_after_last_gc_ = new_space()->top();
5191 // Initialize old pointer space.
5192 old_pointer_space_ =
5194 max_old_generation_size_,
5197 if (old_pointer_space_ == NULL) return false;
5198 if (!old_pointer_space_->SetUp()) return false;
5200 // Initialize old data space.
5203 max_old_generation_size_,
5206 if (old_data_space_ == NULL) return false;
5207 if (!old_data_space_->SetUp()) return false;
5209 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5211 // Initialize the code space, set its maximum capacity to the old
5212 // generation size. It needs executable memory.
5214 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5215 if (code_space_ == NULL) return false;
5216 if (!code_space_->SetUp()) return false;
5218 // Initialize map space.
5219 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5220 if (map_space_ == NULL) return false;
5221 if (!map_space_->SetUp()) return false;
5223 // Initialize simple cell space.
5224 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5225 if (cell_space_ == NULL) return false;
5226 if (!cell_space_->SetUp()) return false;
5228 // Initialize global property cell space.
5229 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5230 PROPERTY_CELL_SPACE);
5231 if (property_cell_space_ == NULL) return false;
5232 if (!property_cell_space_->SetUp()) return false;
5234 // The large object code space may contain code or data. We set the memory
5235 // to be non-executable here for safety, but this means we need to enable it
5236 // explicitly when allocating large code objects.
5237 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5238 if (lo_space_ == NULL) return false;
5239 if (!lo_space_->SetUp()) return false;
5241 // Set up the seed that is used to randomize the string hash function.
5242 ASSERT(hash_seed() == 0);
5243 if (FLAG_randomize_hashes) {
5244 if (FLAG_hash_seed == 0) {
5245 int rnd = isolate()->random_number_generator()->NextInt();
5246 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5248 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5252 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5253 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5255 store_buffer()->SetUp();
5257 mark_compact_collector()->SetUp();
5263 bool Heap::CreateHeapObjects() {
5264 // Create initial maps.
5265 if (!CreateInitialMaps()) return false;
5268 // Create initial objects
5269 CreateInitialObjects();
5270 CHECK_EQ(0, gc_count_);
5272 set_native_contexts_list(undefined_value());
5273 set_array_buffers_list(undefined_value());
5274 set_allocation_sites_list(undefined_value());
5275 weak_object_to_code_table_ = undefined_value();
5280 void Heap::SetStackLimits() {
5281 ASSERT(isolate_ != NULL);
5282 ASSERT(isolate_ == isolate());
5283 // On 64 bit machines, pointers are generally out of range of Smis. We write
5284 // something that looks like an out of range Smi to the GC.
5286 // Set up the special root array entries containing the stack limits.
5287 // These are actually addresses, but the tag makes the GC ignore it.
5288 roots_[kStackLimitRootIndex] =
5289 reinterpret_cast<Object*>(
5290 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5291 roots_[kRealStackLimitRootIndex] =
5292 reinterpret_cast<Object*>(
5293 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5297 void Heap::TearDown() {
5299 if (FLAG_verify_heap) {
5304 UpdateMaximumCommitted();
5306 if (FLAG_print_cumulative_gc_stat) {
5308 PrintF("gc_count=%d ", gc_count_);
5309 PrintF("mark_sweep_count=%d ", ms_count_);
5310 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5311 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5312 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5313 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5314 get_max_alive_after_gc());
5315 PrintF("total_marking_time=%.1f ", marking_time());
5316 PrintF("total_sweeping_time=%.1f ", sweeping_time());
5320 if (FLAG_print_max_heap_committed) {
5322 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5323 MaximumCommittedMemory());
5324 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5325 new_space_.MaximumCommittedMemory());
5326 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5327 old_data_space_->MaximumCommittedMemory());
5328 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5329 old_pointer_space_->MaximumCommittedMemory());
5330 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5331 old_pointer_space_->MaximumCommittedMemory());
5332 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5333 code_space_->MaximumCommittedMemory());
5334 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5335 map_space_->MaximumCommittedMemory());
5336 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5337 cell_space_->MaximumCommittedMemory());
5338 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5339 property_cell_space_->MaximumCommittedMemory());
5340 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5341 lo_space_->MaximumCommittedMemory());
5345 TearDownArrayBuffers();
5347 isolate_->global_handles()->TearDown();
5349 external_string_table_.TearDown();
5351 mark_compact_collector()->TearDown();
5353 new_space_.TearDown();
5355 if (old_pointer_space_ != NULL) {
5356 old_pointer_space_->TearDown();
5357 delete old_pointer_space_;
5358 old_pointer_space_ = NULL;
5361 if (old_data_space_ != NULL) {
5362 old_data_space_->TearDown();
5363 delete old_data_space_;
5364 old_data_space_ = NULL;
5367 if (code_space_ != NULL) {
5368 code_space_->TearDown();
5373 if (map_space_ != NULL) {
5374 map_space_->TearDown();
5379 if (cell_space_ != NULL) {
5380 cell_space_->TearDown();
5385 if (property_cell_space_ != NULL) {
5386 property_cell_space_->TearDown();
5387 delete property_cell_space_;
5388 property_cell_space_ = NULL;
5391 if (lo_space_ != NULL) {
5392 lo_space_->TearDown();
5397 store_buffer()->TearDown();
5398 incremental_marking()->TearDown();
5400 isolate_->memory_allocator()->TearDown();
5404 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5406 bool pass_isolate) {
5407 ASSERT(callback != NULL);
5408 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5409 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5410 return gc_prologue_callbacks_.Add(pair);
5414 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5415 ASSERT(callback != NULL);
5416 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5417 if (gc_prologue_callbacks_[i].callback == callback) {
5418 gc_prologue_callbacks_.Remove(i);
5426 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5428 bool pass_isolate) {
5429 ASSERT(callback != NULL);
5430 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5431 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5432 return gc_epilogue_callbacks_.Add(pair);
5436 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5437 ASSERT(callback != NULL);
5438 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5439 if (gc_epilogue_callbacks_[i].callback == callback) {
5440 gc_epilogue_callbacks_.Remove(i);
5448 // TODO(ishell): Find a better place for this.
5449 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5450 Handle<DependentCode> dep) {
5451 ASSERT(!InNewSpace(*obj));
5452 ASSERT(!InNewSpace(*dep));
5453 // This handle scope keeps the table handle local to this function, which
5454 // allows us to safely skip write barriers in table update operations.
5455 HandleScope scope(isolate());
5456 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5458 table = WeakHashTable::Put(table, obj, dep);
5460 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5461 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5463 set_weak_object_to_code_table(*table);
5464 ASSERT_EQ(*dep, table->Lookup(obj));
5468 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5469 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5470 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5471 return DependentCode::cast(empty_fixed_array());
5475 void Heap::EnsureWeakObjectToCodeTable() {
5476 if (!weak_object_to_code_table()->IsHashTable()) {
5477 set_weak_object_to_code_table(*WeakHashTable::New(
5478 isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
5483 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5484 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5489 class PrintHandleVisitor: public ObjectVisitor {
5491 void VisitPointers(Object** start, Object** end) {
5492 for (Object** p = start; p < end; p++)
5493 PrintF(" handle %p to %p\n",
5494 reinterpret_cast<void*>(p),
5495 reinterpret_cast<void*>(*p));
5500 void Heap::PrintHandles() {
5501 PrintF("Handles:\n");
5502 PrintHandleVisitor v;
5503 isolate_->handle_scope_implementer()->Iterate(&v);
5509 Space* AllSpaces::next() {
5510 switch (counter_++) {
5512 return heap_->new_space();
5513 case OLD_POINTER_SPACE:
5514 return heap_->old_pointer_space();
5515 case OLD_DATA_SPACE:
5516 return heap_->old_data_space();
5518 return heap_->code_space();
5520 return heap_->map_space();
5522 return heap_->cell_space();
5523 case PROPERTY_CELL_SPACE:
5524 return heap_->property_cell_space();
5526 return heap_->lo_space();
5533 PagedSpace* PagedSpaces::next() {
5534 switch (counter_++) {
5535 case OLD_POINTER_SPACE:
5536 return heap_->old_pointer_space();
5537 case OLD_DATA_SPACE:
5538 return heap_->old_data_space();
5540 return heap_->code_space();
5542 return heap_->map_space();
5544 return heap_->cell_space();
5545 case PROPERTY_CELL_SPACE:
5546 return heap_->property_cell_space();
5554 OldSpace* OldSpaces::next() {
5555 switch (counter_++) {
5556 case OLD_POINTER_SPACE:
5557 return heap_->old_pointer_space();
5558 case OLD_DATA_SPACE:
5559 return heap_->old_data_space();
5561 return heap_->code_space();
5568 SpaceIterator::SpaceIterator(Heap* heap)
5570 current_space_(FIRST_SPACE),
5576 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5578 current_space_(FIRST_SPACE),
5580 size_func_(size_func) {
5584 SpaceIterator::~SpaceIterator() {
5585 // Delete active iterator if any.
5590 bool SpaceIterator::has_next() {
5591 // Iterate until no more spaces.
5592 return current_space_ != LAST_SPACE;
5596 ObjectIterator* SpaceIterator::next() {
5597 if (iterator_ != NULL) {
5600 // Move to the next space
5602 if (current_space_ > LAST_SPACE) {
5607 // Return iterator for the new current space.
5608 return CreateIterator();
5612 // Create an iterator for the space to iterate.
5613 ObjectIterator* SpaceIterator::CreateIterator() {
5614 ASSERT(iterator_ == NULL);
5616 switch (current_space_) {
5618 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5620 case OLD_POINTER_SPACE:
5622 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5624 case OLD_DATA_SPACE:
5625 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5628 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5631 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5634 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5636 case PROPERTY_CELL_SPACE:
5637 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
5641 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5645 // Return the newly allocated iterator;
5646 ASSERT(iterator_ != NULL);
5651 class HeapObjectsFilter {
5653 virtual ~HeapObjectsFilter() {}
5654 virtual bool SkipObject(HeapObject* object) = 0;
5658 class UnreachableObjectsFilter : public HeapObjectsFilter {
5660 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5661 MarkReachableObjects();
5664 ~UnreachableObjectsFilter() {
5665 heap_->mark_compact_collector()->ClearMarkbits();
5668 bool SkipObject(HeapObject* object) {
5669 MarkBit mark_bit = Marking::MarkBitFrom(object);
5670 return !mark_bit.Get();
5674 class MarkingVisitor : public ObjectVisitor {
5676 MarkingVisitor() : marking_stack_(10) {}
5678 void VisitPointers(Object** start, Object** end) {
5679 for (Object** p = start; p < end; p++) {
5680 if (!(*p)->IsHeapObject()) continue;
5681 HeapObject* obj = HeapObject::cast(*p);
5682 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5683 if (!mark_bit.Get()) {
5685 marking_stack_.Add(obj);
5690 void TransitiveClosure() {
5691 while (!marking_stack_.is_empty()) {
5692 HeapObject* obj = marking_stack_.RemoveLast();
5698 List<HeapObject*> marking_stack_;
5701 void MarkReachableObjects() {
5702 MarkingVisitor visitor;
5703 heap_->IterateRoots(&visitor, VISIT_ALL);
5704 visitor.TransitiveClosure();
5708 DisallowHeapAllocation no_allocation_;
5712 HeapIterator::HeapIterator(Heap* heap)
5713 : make_heap_iterable_helper_(heap),
5714 no_heap_allocation_(),
5716 filtering_(HeapIterator::kNoFiltering),
5722 HeapIterator::HeapIterator(Heap* heap,
5723 HeapIterator::HeapObjectsFiltering filtering)
5724 : make_heap_iterable_helper_(heap),
5725 no_heap_allocation_(),
5727 filtering_(filtering),
5733 HeapIterator::~HeapIterator() {
5738 void HeapIterator::Init() {
5739 // Start the iteration.
5740 space_iterator_ = new SpaceIterator(heap_);
5741 switch (filtering_) {
5742 case kFilterUnreachable:
5743 filter_ = new UnreachableObjectsFilter(heap_);
5748 object_iterator_ = space_iterator_->next();
5752 void HeapIterator::Shutdown() {
5754 // Assert that in filtering mode we have iterated through all
5755 // objects. Otherwise, heap will be left in an inconsistent state.
5756 if (filtering_ != kNoFiltering) {
5757 ASSERT(object_iterator_ == NULL);
5760 // Make sure the last iterator is deallocated.
5761 delete space_iterator_;
5762 space_iterator_ = NULL;
5763 object_iterator_ = NULL;
5769 HeapObject* HeapIterator::next() {
5770 if (filter_ == NULL) return NextObject();
5772 HeapObject* obj = NextObject();
5773 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5778 HeapObject* HeapIterator::NextObject() {
5779 // No iterator means we are done.
5780 if (object_iterator_ == NULL) return NULL;
5782 if (HeapObject* obj = object_iterator_->next_object()) {
5783 // If the current iterator has more objects we are fine.
5786 // Go though the spaces looking for one that has objects.
5787 while (space_iterator_->has_next()) {
5788 object_iterator_ = space_iterator_->next();
5789 if (HeapObject* obj = object_iterator_->next_object()) {
5794 // Done with the last space.
5795 object_iterator_ = NULL;
5800 void HeapIterator::reset() {
5801 // Restart the iterator.
5809 Object* const PathTracer::kAnyGlobalObject = NULL;
5811 class PathTracer::MarkVisitor: public ObjectVisitor {
5813 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5814 void VisitPointers(Object** start, Object** end) {
5815 // Scan all HeapObject pointers in [start, end)
5816 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5817 if ((*p)->IsHeapObject())
5818 tracer_->MarkRecursively(p, this);
5823 PathTracer* tracer_;
5827 class PathTracer::UnmarkVisitor: public ObjectVisitor {
5829 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5830 void VisitPointers(Object** start, Object** end) {
5831 // Scan all HeapObject pointers in [start, end)
5832 for (Object** p = start; p < end; p++) {
5833 if ((*p)->IsHeapObject())
5834 tracer_->UnmarkRecursively(p, this);
5839 PathTracer* tracer_;
5843 void PathTracer::VisitPointers(Object** start, Object** end) {
5844 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5845 // Visit all HeapObject pointers in [start, end)
5846 for (Object** p = start; !done && (p < end); p++) {
5847 if ((*p)->IsHeapObject()) {
5849 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5855 void PathTracer::Reset() {
5856 found_target_ = false;
5857 object_stack_.Clear();
5861 void PathTracer::TracePathFrom(Object** root) {
5862 ASSERT((search_target_ == kAnyGlobalObject) ||
5863 search_target_->IsHeapObject());
5864 found_target_in_trace_ = false;
5867 MarkVisitor mark_visitor(this);
5868 MarkRecursively(root, &mark_visitor);
5870 UnmarkVisitor unmark_visitor(this);
5871 UnmarkRecursively(root, &unmark_visitor);
5877 static bool SafeIsNativeContext(HeapObject* obj) {
5878 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5882 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5883 if (!(*p)->IsHeapObject()) return;
5885 HeapObject* obj = HeapObject::cast(*p);
5887 MapWord map_word = obj->map_word();
5888 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5890 if (found_target_in_trace_) return; // stop if target found
5891 object_stack_.Add(obj);
5892 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5893 (obj == search_target_)) {
5894 found_target_in_trace_ = true;
5895 found_target_ = true;
5899 bool is_native_context = SafeIsNativeContext(obj);
5902 Map* map = Map::cast(map_word.ToMap());
5904 MapWord marked_map_word =
5905 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5906 obj->set_map_word(marked_map_word);
5908 // Scan the object body.
5909 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5910 // This is specialized to scan Context's properly.
5911 Object** start = reinterpret_cast<Object**>(obj->address() +
5912 Context::kHeaderSize);
5913 Object** end = reinterpret_cast<Object**>(obj->address() +
5914 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5915 mark_visitor->VisitPointers(start, end);
5917 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5920 // Scan the map after the body because the body is a lot more interesting
5921 // when doing leak detection.
5922 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5924 if (!found_target_in_trace_) { // don't pop if found the target
5925 object_stack_.RemoveLast();
5930 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5931 if (!(*p)->IsHeapObject()) return;
5933 HeapObject* obj = HeapObject::cast(*p);
5935 MapWord map_word = obj->map_word();
5936 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5938 MapWord unmarked_map_word =
5939 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5940 obj->set_map_word(unmarked_map_word);
5942 Map* map = Map::cast(unmarked_map_word.ToMap());
5944 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5946 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5950 void PathTracer::ProcessResults() {
5951 if (found_target_) {
5952 PrintF("=====================================\n");
5953 PrintF("==== Path to object ====\n");
5954 PrintF("=====================================\n\n");
5956 ASSERT(!object_stack_.is_empty());
5957 for (int i = 0; i < object_stack_.length(); i++) {
5958 if (i > 0) PrintF("\n |\n |\n V\n\n");
5959 Object* obj = object_stack_[i];
5962 PrintF("=====================================\n");
5967 // Triggers a depth-first traversal of reachable objects from one
5968 // given root object and finds a path to a specific heap object and
5970 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5971 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5972 tracer.VisitPointer(&root);
5976 // Triggers a depth-first traversal of reachable objects from roots
5977 // and finds a path to a specific heap object and prints it.
5978 void Heap::TracePathToObject(Object* target) {
5979 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5980 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5984 // Triggers a depth-first traversal of reachable objects from roots
5985 // and finds a path to any global object and prints it. Useful for
5986 // determining the source for leaks of global objects.
5987 void Heap::TracePathToGlobal() {
5988 PathTracer tracer(PathTracer::kAnyGlobalObject,
5989 PathTracer::FIND_ALL,
5991 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5996 static intptr_t CountTotalHolesSize(Heap* heap) {
5997 intptr_t holes_size = 0;
5998 OldSpaces spaces(heap);
5999 for (OldSpace* space = spaces.next();
6001 space = spaces.next()) {
6002 holes_size += space->Waste() + space->Available();
6008 GCTracer::GCTracer(Heap* heap,
6009 const char* gc_reason,
6010 const char* collector_reason)
6012 start_object_size_(0),
6013 start_memory_size_(0),
6016 allocated_since_last_gc_(0),
6017 spent_in_mutator_(0),
6018 nodes_died_in_new_space_(0),
6019 nodes_copied_in_new_space_(0),
6022 gc_reason_(gc_reason),
6023 collector_reason_(collector_reason) {
6024 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6025 start_time_ = OS::TimeCurrentMillis();
6026 start_object_size_ = heap_->SizeOfObjects();
6027 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6029 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6033 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
6035 allocated_since_last_gc_ =
6036 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6038 if (heap_->last_gc_end_timestamp_ > 0) {
6039 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6042 steps_count_ = heap_->incremental_marking()->steps_count();
6043 steps_took_ = heap_->incremental_marking()->steps_took();
6044 longest_step_ = heap_->incremental_marking()->longest_step();
6045 steps_count_since_last_gc_ =
6046 heap_->incremental_marking()->steps_count_since_last_gc();
6047 steps_took_since_last_gc_ =
6048 heap_->incremental_marking()->steps_took_since_last_gc();
6052 GCTracer::~GCTracer() {
6053 // Printf ONE line iff flag is set.
6054 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6056 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6058 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6059 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6061 double time = heap_->last_gc_end_timestamp_ - start_time_;
6063 // Update cumulative GC statistics if required.
6064 if (FLAG_print_cumulative_gc_stat) {
6065 heap_->total_gc_time_ms_ += time;
6066 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6067 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6068 heap_->alive_after_last_gc_);
6070 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6073 } else if (FLAG_trace_gc_verbose) {
6074 heap_->total_gc_time_ms_ += time;
6077 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6079 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
6081 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
6082 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6084 if (!FLAG_trace_gc_nvp) {
6085 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6087 double end_memory_size_mb =
6088 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6090 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6092 static_cast<double>(start_object_size_) / MB,
6093 static_cast<double>(start_memory_size_) / MB,
6094 SizeOfHeapObjects(),
6095 end_memory_size_mb);
6097 if (external_time > 0) PrintF("%d / ", external_time);
6098 PrintF("%.1f ms", time);
6099 if (steps_count_ > 0) {
6100 if (collector_ == SCAVENGER) {
6101 PrintF(" (+ %.1f ms in %d steps since last GC)",
6102 steps_took_since_last_gc_,
6103 steps_count_since_last_gc_);
6105 PrintF(" (+ %.1f ms in %d steps since start of marking, "
6106 "biggest step %.1f ms)",
6113 if (gc_reason_ != NULL) {
6114 PrintF(" [%s]", gc_reason_);
6117 if (collector_reason_ != NULL) {
6118 PrintF(" [%s]", collector_reason_);
6123 PrintF("pause=%.1f ", time);
6124 PrintF("mutator=%.1f ", spent_in_mutator_);
6126 switch (collector_) {
6130 case MARK_COMPACTOR:
6138 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
6139 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
6140 PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
6141 PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
6142 PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
6143 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
6144 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
6145 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
6146 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
6147 PrintF("compaction_ptrs=%.1f ",
6148 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
6149 PrintF("intracompaction_ptrs=%.1f ",
6150 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
6151 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
6152 PrintF("weakcollection_process=%.1f ",
6153 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
6154 PrintF("weakcollection_clear=%.1f ",
6155 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
6157 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
6158 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6159 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6160 in_free_list_or_wasted_before_gc_);
6161 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
6163 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6164 PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
6165 PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
6166 heap_->semi_space_copied_object_size_);
6167 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
6168 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
6169 PrintF("nodes_promoted=%d ", nodes_promoted_);
6170 PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
6171 PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
6173 if (collector_ == SCAVENGER) {
6174 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6175 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
6177 PrintF("stepscount=%d ", steps_count_);
6178 PrintF("stepstook=%.1f ", steps_took_);
6179 PrintF("longeststep=%.1f ", longest_step_);
6185 heap_->PrintShortHeapStatistics();
6189 const char* GCTracer::CollectorString() {
6190 switch (collector_) {
6193 case MARK_COMPACTOR:
6194 return "Mark-sweep";
6196 return "Unknown GC";
6200 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6201 DisallowHeapAllocation no_gc;
6202 // Uses only lower 32 bits if pointers are larger.
6203 uintptr_t addr_hash =
6204 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6205 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6209 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6210 DisallowHeapAllocation no_gc;
6211 int index = (Hash(map, name) & kHashMask);
6212 for (int i = 0; i < kEntriesPerBucket; i++) {
6213 Key& key = keys_[index + i];
6214 if ((key.map == *map) && key.name->Equals(*name)) {
6215 return field_offsets_[index + i];
6222 void KeyedLookupCache::Update(Handle<Map> map,
6225 DisallowHeapAllocation no_gc;
6226 if (!name->IsUniqueName()) {
6227 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
6228 Handle<String>::cast(name)).
6233 // This cache is cleared only between mark compact passes, so we expect the
6234 // cache to only contain old space names.
6235 ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));
6237 int index = (Hash(map, name) & kHashMask);
6238 // After a GC there will be free slots, so we use them in order (this may
6239 // help to get the most frequently used one in position 0).
6240 for (int i = 0; i< kEntriesPerBucket; i++) {
6241 Key& key = keys_[index];
6242 Object* free_entry_indicator = NULL;
6243 if (key.map == free_entry_indicator) {
6246 field_offsets_[index + i] = field_offset;
6250 // No free entry found in this bucket, so we move them all down one and
6251 // put the new entry at position zero.
6252 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6253 Key& key = keys_[index + i];
6254 Key& key2 = keys_[index + i - 1];
6256 field_offsets_[index + i] = field_offsets_[index + i - 1];
6259 // Write the new first entry.
6260 Key& key = keys_[index];
6263 field_offsets_[index] = field_offset;
6267 void KeyedLookupCache::Clear() {
6268 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6272 void DescriptorLookupCache::Clear() {
6273 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6277 void ExternalStringTable::CleanUp() {
6279 for (int i = 0; i < new_space_strings_.length(); ++i) {
6280 if (new_space_strings_[i] == heap_->the_hole_value()) {
6283 ASSERT(new_space_strings_[i]->IsExternalString());
6284 if (heap_->InNewSpace(new_space_strings_[i])) {
6285 new_space_strings_[last++] = new_space_strings_[i];
6287 old_space_strings_.Add(new_space_strings_[i]);
6290 new_space_strings_.Rewind(last);
6291 new_space_strings_.Trim();
6294 for (int i = 0; i < old_space_strings_.length(); ++i) {
6295 if (old_space_strings_[i] == heap_->the_hole_value()) {
6298 ASSERT(old_space_strings_[i]->IsExternalString());
6299 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6300 old_space_strings_[last++] = old_space_strings_[i];
6302 old_space_strings_.Rewind(last);
6303 old_space_strings_.Trim();
6305 if (FLAG_verify_heap) {
6312 void ExternalStringTable::TearDown() {
6313 for (int i = 0; i < new_space_strings_.length(); ++i) {
6314 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6316 new_space_strings_.Free();
6317 for (int i = 0; i < old_space_strings_.length(); ++i) {
6318 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6320 old_space_strings_.Free();
6324 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6325 chunk->set_next_chunk(chunks_queued_for_free_);
6326 chunks_queued_for_free_ = chunk;
6330 void Heap::FreeQueuedChunks() {
6331 if (chunks_queued_for_free_ == NULL) return;
6334 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6335 next = chunk->next_chunk();
6336 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6338 if (chunk->owner()->identity() == LO_SPACE) {
6339 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6340 // If FromAnyPointerAddress encounters a slot that belongs to a large
6341 // chunk queued for deletion it will fail to find the chunk because
6342 // it try to perform a search in the list of pages owned by of the large
6343 // object space and queued chunks were detached from that list.
6344 // To work around this we split large chunk into normal kPageSize aligned
6345 // pieces and initialize size, owner and flags field of every piece.
6346 // If FromAnyPointerAddress encounters a slot that belongs to one of
6347 // these smaller pieces it will treat it as a slot on a normal Page.
6348 Address chunk_end = chunk->address() + chunk->size();
6349 MemoryChunk* inner = MemoryChunk::FromAddress(
6350 chunk->address() + Page::kPageSize);
6351 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6352 while (inner <= inner_last) {
6353 // Size of a large chunk is always a multiple of
6354 // OS::AllocateAlignment() so there is always
6355 // enough space for a fake MemoryChunk header.
6356 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6357 // Guard against overflow.
6358 if (area_end < inner->address()) area_end = chunk_end;
6359 inner->SetArea(inner->address(), area_end);
6360 inner->set_size(Page::kPageSize);
6361 inner->set_owner(lo_space());
6362 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6363 inner = MemoryChunk::FromAddress(
6364 inner->address() + Page::kPageSize);
6368 isolate_->heap()->store_buffer()->Compact();
6369 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6370 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6371 next = chunk->next_chunk();
6372 isolate_->memory_allocator()->Free(chunk);
6374 chunks_queued_for_free_ = NULL;
6378 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6379 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6380 // Tag the page pointer to make it findable in the dump file.
6382 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6384 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6386 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6387 reinterpret_cast<Address>(p);
6388 remembered_unmapped_pages_index_++;
6389 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6393 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6394 memset(object_counts_, 0, sizeof(object_counts_));
6395 memset(object_sizes_, 0, sizeof(object_sizes_));
6396 if (clear_last_time_stats) {
6397 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6398 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6403 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6406 void Heap::CheckpointObjectStats() {
6407 LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
6408 Counters* counters = isolate()->counters();
6409 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6410 counters->count_of_##name()->Increment( \
6411 static_cast<int>(object_counts_[name])); \
6412 counters->count_of_##name()->Decrement( \
6413 static_cast<int>(object_counts_last_time_[name])); \
6414 counters->size_of_##name()->Increment( \
6415 static_cast<int>(object_sizes_[name])); \
6416 counters->size_of_##name()->Decrement( \
6417 static_cast<int>(object_sizes_last_time_[name]));
6418 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6419 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6421 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6422 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6423 counters->count_of_CODE_TYPE_##name()->Increment( \
6424 static_cast<int>(object_counts_[index])); \
6425 counters->count_of_CODE_TYPE_##name()->Decrement( \
6426 static_cast<int>(object_counts_last_time_[index])); \
6427 counters->size_of_CODE_TYPE_##name()->Increment( \
6428 static_cast<int>(object_sizes_[index])); \
6429 counters->size_of_CODE_TYPE_##name()->Decrement( \
6430 static_cast<int>(object_sizes_last_time_[index]));
6431 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6432 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6433 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6434 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6435 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6436 static_cast<int>(object_counts_[index])); \
6437 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6438 static_cast<int>(object_counts_last_time_[index])); \
6439 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6440 static_cast<int>(object_sizes_[index])); \
6441 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6442 static_cast<int>(object_sizes_last_time_[index]));
6443 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6444 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6445 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6447 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6448 counters->count_of_CODE_AGE_##name()->Increment( \
6449 static_cast<int>(object_counts_[index])); \
6450 counters->count_of_CODE_AGE_##name()->Decrement( \
6451 static_cast<int>(object_counts_last_time_[index])); \
6452 counters->size_of_CODE_AGE_##name()->Increment( \
6453 static_cast<int>(object_sizes_[index])); \
6454 counters->size_of_CODE_AGE_##name()->Decrement( \
6455 static_cast<int>(object_sizes_last_time_[index]));
6456 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6457 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6459 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6460 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6464 } } // namespace v8::internal