1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/once.h"
10 #include "src/base/utils/random-number-generator.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/compilation-cache.h"
14 #include "src/conversions.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/deoptimizer.h"
18 #include "src/global-handles.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact.h"
21 #include "src/heap/objects-visiting-inl.h"
22 #include "src/heap/objects-visiting.h"
23 #include "src/heap/store-buffer.h"
24 #include "src/heap-profiler.h"
25 #include "src/isolate-inl.h"
26 #include "src/natives.h"
27 #include "src/runtime-profiler.h"
28 #include "src/scopeinfo.h"
29 #include "src/snapshot.h"
30 #include "src/utils.h"
31 #include "src/v8threads.h"
32 #include "src/vm-state-inl.h"
34 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
35 #include "src/regexp-macro-assembler.h" // NOLINT
36 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
38 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
39 #include "src/regexp-macro-assembler.h" // NOLINT
40 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
42 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
43 #include "src/regexp-macro-assembler.h"
44 #include "src/mips64/regexp-macro-assembler-mips64.h"
52 : amount_of_external_allocated_memory_(0),
53 amount_of_external_allocated_memory_at_last_global_gc_(0),
56 // semispace_size_ should be a power of 2 and old_generation_size_ should
57 // be a multiple of Page::kPageSize.
58 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
59 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
60 initial_semispace_size_(Page::kPageSize),
61 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
62 max_executable_size_(256ul * (kPointerSize / 4) * MB),
63 // Variables set based on semispace_size_ and old_generation_size_ in
65 // Will be 4 * reserved_semispace_size_ to ensure that young
66 // generation can be aligned to its size.
67 maximum_committed_(0),
68 survived_since_last_expansion_(0),
70 always_allocate_scope_depth_(0),
71 contexts_disposed_(0),
73 flush_monomorphic_ics_(false),
74 scan_on_scavenge_pages_(0),
76 old_pointer_space_(NULL),
77 old_data_space_(NULL),
81 property_cell_space_(NULL),
84 gc_post_processing_depth_(0),
85 allocations_count_(0),
86 raw_allocations_hash_(0),
87 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
90 remembered_unmapped_pages_index_(0),
91 unflattened_strings_length_(0),
93 allocation_timeout_(0),
95 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
96 old_gen_exhausted_(false),
97 inline_allocation_disabled_(false),
98 store_buffer_rebuilder_(store_buffer()),
100 gc_safe_size_of_old_object_(NULL),
101 total_regexp_code_generated_(0),
103 high_survival_rate_period_length_(0),
104 promoted_objects_size_(0),
106 semi_space_copied_object_size_(0),
107 semi_space_copied_rate_(0),
108 nodes_died_in_new_space_(0),
109 nodes_copied_in_new_space_(0),
111 maximum_size_scavenges_(0),
113 total_gc_time_ms_(0.0),
114 max_alive_after_gc_(0),
115 min_in_mutator_(kMaxInt),
118 mark_compact_collector_(this),
121 incremental_marking_(this),
122 number_idle_notifications_(0),
123 last_idle_notification_gc_count_(0),
124 last_idle_notification_gc_count_init_(false),
125 mark_sweeps_since_idle_round_started_(0),
126 gc_count_at_last_idle_gc_(0),
127 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
128 full_codegen_bytes_generated_(0),
129 crankshaft_codegen_bytes_generated_(0),
130 gcs_since_last_deopt_(0),
132 no_weak_object_verification_scope_depth_(0),
134 allocation_sites_scratchpad_length_(0),
135 promotion_queue_(this),
137 external_string_table_(this),
138 chunks_queued_for_free_(NULL),
139 gc_callbacks_depth_(0) {
140 // Allow build-time customization of the max semispace size. Building
141 // V8 with snapshots and a non-default max semispace size is much
142 // easier if you can define it as part of the build environment.
143 #if defined(V8_MAX_SEMISPACE_SIZE)
144 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
147 // Ensure old_generation_size_ is a multiple of kPageSize.
148 DCHECK(MB >= Page::kPageSize);
150 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
151 set_native_contexts_list(NULL);
152 set_array_buffers_list(Smi::FromInt(0));
153 set_allocation_sites_list(Smi::FromInt(0));
154 set_encountered_weak_collections(Smi::FromInt(0));
155 // Put a dummy entry in the remembered pages so we can find the list the
156 // minidump even if there are no real unmapped pages.
157 RememberUnmappedPage(NULL, false);
159 ClearObjectStats(true);
163 intptr_t Heap::Capacity() {
164 if (!HasBeenSetUp()) return 0;
166 return new_space_.Capacity() + old_pointer_space_->Capacity() +
167 old_data_space_->Capacity() + code_space_->Capacity() +
168 map_space_->Capacity() + cell_space_->Capacity() +
169 property_cell_space_->Capacity();
173 intptr_t Heap::CommittedMemory() {
174 if (!HasBeenSetUp()) return 0;
176 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
177 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
178 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
179 property_cell_space_->CommittedMemory() + lo_space_->Size();
183 size_t Heap::CommittedPhysicalMemory() {
184 if (!HasBeenSetUp()) return 0;
186 return new_space_.CommittedPhysicalMemory() +
187 old_pointer_space_->CommittedPhysicalMemory() +
188 old_data_space_->CommittedPhysicalMemory() +
189 code_space_->CommittedPhysicalMemory() +
190 map_space_->CommittedPhysicalMemory() +
191 cell_space_->CommittedPhysicalMemory() +
192 property_cell_space_->CommittedPhysicalMemory() +
193 lo_space_->CommittedPhysicalMemory();
197 intptr_t Heap::CommittedMemoryExecutable() {
198 if (!HasBeenSetUp()) return 0;
200 return isolate()->memory_allocator()->SizeExecutable();
204 void Heap::UpdateMaximumCommitted() {
205 if (!HasBeenSetUp()) return;
207 intptr_t current_committed_memory = CommittedMemory();
208 if (current_committed_memory > maximum_committed_) {
209 maximum_committed_ = current_committed_memory;
214 intptr_t Heap::Available() {
215 if (!HasBeenSetUp()) return 0;
217 return new_space_.Available() + old_pointer_space_->Available() +
218 old_data_space_->Available() + code_space_->Available() +
219 map_space_->Available() + cell_space_->Available() +
220 property_cell_space_->Available();
224 bool Heap::HasBeenSetUp() {
225 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
226 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
227 property_cell_space_ != NULL && lo_space_ != NULL;
231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
232 if (IntrusiveMarking::IsMarked(object)) {
233 return IntrusiveMarking::SizeOfMarkedObject(object);
235 return object->SizeFromMap(object->map());
239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
240 const char** reason) {
241 // Is global GC requested?
242 if (space != NEW_SPACE) {
243 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
244 *reason = "GC in old space requested";
245 return MARK_COMPACTOR;
248 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
249 *reason = "GC in old space forced by flags";
250 return MARK_COMPACTOR;
253 // Is enough data promoted to justify a global GC?
254 if (OldGenerationAllocationLimitReached()) {
255 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
256 *reason = "promotion limit reached";
257 return MARK_COMPACTOR;
260 // Have allocation in OLD and LO failed?
261 if (old_gen_exhausted_) {
263 ->gc_compactor_caused_by_oldspace_exhaustion()
265 *reason = "old generations exhausted";
266 return MARK_COMPACTOR;
269 // Is there enough space left in OLD to guarantee that a scavenge can
272 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
273 // for object promotion. It counts only the bytes that the memory
274 // allocator has not yet allocated from the OS and assigned to any space,
275 // and does not count available bytes already in the old space or code
276 // space. Undercounting is safe---we may get an unrequested full GC when
277 // a scavenge would have succeeded.
278 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
280 ->gc_compactor_caused_by_oldspace_exhaustion()
282 *reason = "scavenge might not succeed";
283 return MARK_COMPACTOR;
292 // TODO(1238405): Combine the infrastructure for --heap-stats and
293 // --log-gc to avoid the complicated preprocessor and flag testing.
294 void Heap::ReportStatisticsBeforeGC() {
295 // Heap::ReportHeapStatistics will also log NewSpace statistics when
296 // compiled --log-gc is set. The following logic is used to avoid
299 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
300 if (FLAG_heap_stats) {
301 ReportHeapStatistics("Before GC");
302 } else if (FLAG_log_gc) {
303 new_space_.ReportStatistics();
305 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
308 new_space_.CollectStatistics();
309 new_space_.ReportStatistics();
310 new_space_.ClearHistograms();
316 void Heap::PrintShortHeapStatistics() {
317 if (!FLAG_trace_gc_verbose) return;
318 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
320 ", available: %6" V8_PTR_PREFIX "d KB\n",
321 isolate_->memory_allocator()->Size() / KB,
322 isolate_->memory_allocator()->Available() / KB);
323 PrintPID("New space, used: %6" V8_PTR_PREFIX
325 ", available: %6" V8_PTR_PREFIX
327 ", committed: %6" V8_PTR_PREFIX "d KB\n",
328 new_space_.Size() / KB, new_space_.Available() / KB,
329 new_space_.CommittedMemory() / KB);
330 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
332 ", available: %6" V8_PTR_PREFIX
334 ", committed: %6" V8_PTR_PREFIX "d KB\n",
335 old_pointer_space_->SizeOfObjects() / KB,
336 old_pointer_space_->Available() / KB,
337 old_pointer_space_->CommittedMemory() / KB);
338 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
340 ", available: %6" V8_PTR_PREFIX
342 ", committed: %6" V8_PTR_PREFIX "d KB\n",
343 old_data_space_->SizeOfObjects() / KB,
344 old_data_space_->Available() / KB,
345 old_data_space_->CommittedMemory() / KB);
346 PrintPID("Code space, used: %6" V8_PTR_PREFIX
348 ", available: %6" V8_PTR_PREFIX
350 ", committed: %6" V8_PTR_PREFIX "d KB\n",
351 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
352 code_space_->CommittedMemory() / KB);
353 PrintPID("Map space, used: %6" V8_PTR_PREFIX
355 ", available: %6" V8_PTR_PREFIX
357 ", committed: %6" V8_PTR_PREFIX "d KB\n",
358 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
359 map_space_->CommittedMemory() / KB);
360 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
362 ", available: %6" V8_PTR_PREFIX
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
366 cell_space_->CommittedMemory() / KB);
367 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
369 ", available: %6" V8_PTR_PREFIX
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 property_cell_space_->SizeOfObjects() / KB,
373 property_cell_space_->Available() / KB,
374 property_cell_space_->CommittedMemory() / KB);
375 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
377 ", available: %6" V8_PTR_PREFIX
379 ", committed: %6" V8_PTR_PREFIX "d KB\n",
380 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
381 lo_space_->CommittedMemory() / KB);
382 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
384 ", available: %6" V8_PTR_PREFIX
386 ", committed: %6" V8_PTR_PREFIX "d KB\n",
387 this->SizeOfObjects() / KB, this->Available() / KB,
388 this->CommittedMemory() / KB);
389 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
390 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
391 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
395 // TODO(1238405): Combine the infrastructure for --heap-stats and
396 // --log-gc to avoid the complicated preprocessor and flag testing.
397 void Heap::ReportStatisticsAfterGC() {
398 // Similar to the before GC, we use some complicated logic to ensure that
399 // NewSpace statistics are logged exactly once when --log-gc is turned on.
401 if (FLAG_heap_stats) {
402 new_space_.CollectStatistics();
403 ReportHeapStatistics("After GC");
404 } else if (FLAG_log_gc) {
405 new_space_.ReportStatistics();
408 if (FLAG_log_gc) new_space_.ReportStatistics();
413 void Heap::GarbageCollectionPrologue() {
415 AllowHeapAllocation for_the_first_part_of_prologue;
416 ClearJSFunctionResultCaches();
418 unflattened_strings_length_ = 0;
420 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
421 mark_compact_collector()->EnableCodeFlushing(true);
425 if (FLAG_verify_heap) {
431 // Reset GC statistics.
432 promoted_objects_size_ = 0;
433 semi_space_copied_object_size_ = 0;
434 nodes_died_in_new_space_ = 0;
435 nodes_copied_in_new_space_ = 0;
438 UpdateMaximumCommitted();
441 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
443 if (FLAG_gc_verbose) Print();
445 ReportStatisticsBeforeGC();
448 store_buffer()->GCPrologue();
450 if (isolate()->concurrent_osr_enabled()) {
451 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
454 if (new_space_.IsAtMaximumCapacity()) {
455 maximum_size_scavenges_++;
457 maximum_size_scavenges_ = 0;
459 CheckNewSpaceExpansionCriteria();
463 intptr_t Heap::SizeOfObjects() {
465 AllSpaces spaces(this);
466 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
467 total += space->SizeOfObjects();
473 void Heap::ClearAllICsByKind(Code::Kind kind) {
474 HeapObjectIterator it(code_space());
476 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
477 Code* code = Code::cast(object);
478 Code::Kind current_kind = code->kind();
479 if (current_kind == Code::FUNCTION ||
480 current_kind == Code::OPTIMIZED_FUNCTION) {
481 code->ClearInlineCaches(kind);
487 void Heap::RepairFreeListsAfterBoot() {
488 PagedSpaces spaces(this);
489 for (PagedSpace* space = spaces.next(); space != NULL;
490 space = spaces.next()) {
491 space->RepairFreeListsAfterBoot();
496 void Heap::ProcessPretenuringFeedback() {
497 if (FLAG_allocation_site_pretenuring) {
498 int tenure_decisions = 0;
499 int dont_tenure_decisions = 0;
500 int allocation_mementos_found = 0;
501 int allocation_sites = 0;
502 int active_allocation_sites = 0;
504 // If the scratchpad overflowed, we have to iterate over the allocation
506 // TODO(hpayer): We iterate over the whole list of allocation sites when
507 // we grew to the maximum semi-space size to deopt maybe tenured
508 // allocation sites. We could hold the maybe tenured allocation sites
509 // in a seperate data structure if this is a performance problem.
510 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
511 bool use_scratchpad =
512 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
513 !deopt_maybe_tenured;
516 Object* list_element = allocation_sites_list();
517 bool trigger_deoptimization = false;
518 bool maximum_size_scavenge = MaximumSizeScavenge();
519 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
520 : list_element->IsAllocationSite()) {
521 AllocationSite* site =
523 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
524 : AllocationSite::cast(list_element);
525 allocation_mementos_found += site->memento_found_count();
526 if (site->memento_found_count() > 0) {
527 active_allocation_sites++;
528 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
529 trigger_deoptimization = true;
531 if (site->GetPretenureMode() == TENURED) {
534 dont_tenure_decisions++;
539 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
540 site->set_deopt_dependent_code(true);
541 trigger_deoptimization = true;
544 if (use_scratchpad) {
547 list_element = site->weak_next();
551 if (trigger_deoptimization) {
552 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
555 FlushAllocationSitesScratchpad();
557 if (FLAG_trace_pretenuring_statistics &&
558 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
559 dont_tenure_decisions > 0)) {
561 "GC: (mode, #visited allocation sites, #active allocation sites, "
562 "#mementos, #tenure decisions, #donttenure decisions) "
563 "(%s, %d, %d, %d, %d, %d)\n",
564 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
565 active_allocation_sites, allocation_mementos_found, tenure_decisions,
566 dont_tenure_decisions);
572 void Heap::DeoptMarkedAllocationSites() {
573 // TODO(hpayer): If iterating over the allocation sites list becomes a
574 // performance issue, use a cache heap data structure instead (similar to the
575 // allocation sites scratchpad).
576 Object* list_element = allocation_sites_list();
577 while (list_element->IsAllocationSite()) {
578 AllocationSite* site = AllocationSite::cast(list_element);
579 if (site->deopt_dependent_code()) {
580 site->dependent_code()->MarkCodeForDeoptimization(
581 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
582 site->set_deopt_dependent_code(false);
584 list_element = site->weak_next();
586 Deoptimizer::DeoptimizeMarkedCode(isolate_);
590 void Heap::GarbageCollectionEpilogue() {
591 store_buffer()->GCEpilogue();
593 // In release mode, we only zap the from space under heap verification.
594 if (Heap::ShouldZapGarbage()) {
598 // Process pretenuring feedback and update allocation sites.
599 ProcessPretenuringFeedback();
602 if (FLAG_verify_heap) {
607 AllowHeapAllocation for_the_rest_of_the_epilogue;
610 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
611 if (FLAG_print_handles) PrintHandles();
612 if (FLAG_gc_verbose) Print();
613 if (FLAG_code_stats) ReportCodeStatistics("After GC");
615 if (FLAG_deopt_every_n_garbage_collections > 0) {
616 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
617 // the topmost optimized frame can be deoptimized safely, because it
618 // might not have a lazy bailout point right after its current PC.
619 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
620 Deoptimizer::DeoptimizeAll(isolate());
621 gcs_since_last_deopt_ = 0;
625 UpdateMaximumCommitted();
627 isolate_->counters()->alive_after_last_gc()->Set(
628 static_cast<int>(SizeOfObjects()));
630 isolate_->counters()->string_table_capacity()->Set(
631 string_table()->Capacity());
632 isolate_->counters()->number_of_symbols()->Set(
633 string_table()->NumberOfElements());
635 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
636 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
637 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
638 (crankshaft_codegen_bytes_generated_ +
639 full_codegen_bytes_generated_)));
642 if (CommittedMemory() > 0) {
643 isolate_->counters()->external_fragmentation_total()->AddSample(
644 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
646 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
647 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
648 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
649 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
651 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
652 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
654 isolate_->counters()->heap_fraction_code_space()->AddSample(
655 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
657 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
658 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
659 isolate_->counters()->heap_fraction_cell_space()->AddSample(
660 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
663 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
665 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
666 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
668 isolate_->counters()->heap_sample_total_committed()->AddSample(
669 static_cast<int>(CommittedMemory() / KB));
670 isolate_->counters()->heap_sample_total_used()->AddSample(
671 static_cast<int>(SizeOfObjects() / KB));
672 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
673 static_cast<int>(map_space()->CommittedMemory() / KB));
674 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
675 static_cast<int>(cell_space()->CommittedMemory() / KB));
677 ->heap_sample_property_cell_space_committed()
679 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
681 static_cast<int>(code_space()->CommittedMemory() / KB));
683 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
684 static_cast<int>(MaximumCommittedMemory() / KB));
687 #define UPDATE_COUNTERS_FOR_SPACE(space) \
688 isolate_->counters()->space##_bytes_available()->Set( \
689 static_cast<int>(space()->Available())); \
690 isolate_->counters()->space##_bytes_committed()->Set( \
691 static_cast<int>(space()->CommittedMemory())); \
692 isolate_->counters()->space##_bytes_used()->Set( \
693 static_cast<int>(space()->SizeOfObjects()));
694 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
695 if (space()->CommittedMemory() > 0) { \
696 isolate_->counters()->external_fragmentation_##space()->AddSample( \
697 static_cast<int>(100 - \
698 (space()->SizeOfObjects() * 100.0) / \
699 space()->CommittedMemory())); \
701 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
702 UPDATE_COUNTERS_FOR_SPACE(space) \
703 UPDATE_FRAGMENTATION_FOR_SPACE(space)
705 UPDATE_COUNTERS_FOR_SPACE(new_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
710 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
711 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
712 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
713 #undef UPDATE_COUNTERS_FOR_SPACE
714 #undef UPDATE_FRAGMENTATION_FOR_SPACE
715 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
718 ReportStatisticsAfterGC();
721 // Remember the last top pointer so that we can later find out
722 // whether we allocated in new space since the last GC.
723 new_space_top_after_last_gc_ = new_space()->top();
727 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
728 const v8::GCCallbackFlags gc_callback_flags) {
729 // Since we are ignoring the return value, the exact choice of space does
730 // not matter, so long as we do not specify NEW_SPACE, which would not
732 mark_compact_collector_.SetFlags(flags);
733 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
734 mark_compact_collector_.SetFlags(kNoGCFlags);
738 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
739 // Since we are ignoring the return value, the exact choice of space does
740 // not matter, so long as we do not specify NEW_SPACE, which would not
742 // Major GC would invoke weak handle callbacks on weakly reachable
743 // handles, but won't collect weakly reachable objects until next
744 // major GC. Therefore if we collect aggressively and weak handle callback
745 // has been invoked, we rerun major GC to release objects which become
747 // Note: as weak callbacks can execute arbitrary code, we cannot
748 // hope that eventually there will be no weak callbacks invocations.
749 // Therefore stop recollecting after several attempts.
750 if (isolate()->concurrent_recompilation_enabled()) {
751 // The optimizing compiler may be unnecessarily holding on to memory.
752 DisallowHeapAllocation no_recursive_gc;
753 isolate()->optimizing_compiler_thread()->Flush();
755 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
756 kReduceMemoryFootprintMask);
757 isolate_->compilation_cache()->Clear();
758 const int kMaxNumberOfAttempts = 7;
759 const int kMinNumberOfAttempts = 2;
760 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
761 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
762 attempt + 1 >= kMinNumberOfAttempts) {
766 mark_compact_collector()->SetFlags(kNoGCFlags);
769 incremental_marking()->UncommitMarkingDeque();
773 void Heap::EnsureFillerObjectAtTop() {
774 // There may be an allocation memento behind every object in new space.
775 // If we evacuate a not full new space or if we are on the last page of
776 // the new space, then there may be uninitialized memory behind the top
777 // pointer of the new space page. We store a filler object there to
778 // identify the unused space.
779 Address from_top = new_space_.top();
780 Address from_limit = new_space_.limit();
781 if (from_top < from_limit) {
782 int remaining_in_page = static_cast<int>(from_limit - from_top);
783 CreateFillerObjectAt(from_top, remaining_in_page);
788 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
789 const char* collector_reason,
790 const v8::GCCallbackFlags gc_callback_flags) {
791 // The VM is in the GC state until exiting this function.
792 VMState<GC> state(isolate_);
795 // Reset the allocation timeout to the GC interval, but make sure to
796 // allow at least a few allocations after a collection. The reason
797 // for this is that we have a lot of allocation sequences and we
798 // assume that a garbage collection will allow the subsequent
799 // allocation attempts to go through.
800 allocation_timeout_ = Max(6, FLAG_gc_interval);
803 EnsureFillerObjectAtTop();
805 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
806 if (FLAG_trace_incremental_marking) {
807 PrintF("[IncrementalMarking] Scavenge during marking.\n");
811 if (collector == MARK_COMPACTOR &&
812 !mark_compact_collector()->abort_incremental_marking() &&
813 !incremental_marking()->IsStopped() &&
814 !incremental_marking()->should_hurry() &&
815 FLAG_incremental_marking_steps) {
816 // Make progress in incremental marking.
817 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
818 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
819 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
820 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
821 if (FLAG_trace_incremental_marking) {
822 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
824 collector = SCAVENGER;
825 collector_reason = "incremental marking delaying mark-sweep";
829 bool next_gc_likely_to_collect_more = false;
832 tracer()->Start(collector, gc_reason, collector_reason);
833 DCHECK(AllowHeapAllocation::IsAllowed());
834 DisallowHeapAllocation no_allocation_during_gc;
835 GarbageCollectionPrologue();
838 HistogramTimerScope histogram_timer_scope(
839 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
840 : isolate_->counters()->gc_compactor());
841 next_gc_likely_to_collect_more =
842 PerformGarbageCollection(collector, gc_callback_flags);
845 GarbageCollectionEpilogue();
849 // Start incremental marking for the next cycle. The heap snapshot
850 // generator needs incremental marking to stay off after it aborted.
851 if (!mark_compact_collector()->abort_incremental_marking() &&
852 incremental_marking()->IsStopped() &&
853 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
854 incremental_marking()->Start();
857 return next_gc_likely_to_collect_more;
861 int Heap::NotifyContextDisposed() {
862 if (isolate()->concurrent_recompilation_enabled()) {
863 // Flush the queued recompilation tasks.
864 isolate()->optimizing_compiler_thread()->Flush();
866 flush_monomorphic_ics_ = true;
868 return ++contexts_disposed_;
872 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
874 if (len == 0) return;
876 DCHECK(array->map() != fixed_cow_array_map());
877 Object** dst_objects = array->data_start() + dst_index;
878 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
879 if (!InNewSpace(array)) {
880 for (int i = 0; i < len; i++) {
881 // TODO(hpayer): check store buffer for entries
882 if (InNewSpace(dst_objects[i])) {
883 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
887 incremental_marking()->RecordWrites(array);
892 // Helper class for verifying the string table.
893 class StringTableVerifier : public ObjectVisitor {
895 void VisitPointers(Object** start, Object** end) {
896 // Visit all HeapObject pointers in [start, end).
897 for (Object** p = start; p < end; p++) {
898 if ((*p)->IsHeapObject()) {
899 // Check that the string is actually internalized.
900 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
901 (*p)->IsInternalizedString());
908 static void VerifyStringTable(Heap* heap) {
909 StringTableVerifier verifier;
910 heap->string_table()->IterateElements(&verifier);
912 #endif // VERIFY_HEAP
915 static bool AbortIncrementalMarkingAndCollectGarbage(
916 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
917 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
918 bool result = heap->CollectGarbage(space, gc_reason);
919 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
924 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
925 bool gc_performed = true;
927 static const int kThreshold = 20;
928 while (gc_performed && counter++ < kThreshold) {
929 gc_performed = false;
930 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
931 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
932 if (sizes[space] != 0) {
933 AllocationResult allocation;
934 if (space == NEW_SPACE) {
935 allocation = new_space()->AllocateRaw(sizes[space]);
937 allocation = paged_space(space)->AllocateRaw(sizes[space]);
940 if (!allocation.To(&node)) {
941 if (space == NEW_SPACE) {
942 Heap::CollectGarbage(NEW_SPACE,
943 "failed to reserve space in the new space");
945 AbortIncrementalMarkingAndCollectGarbage(
946 this, static_cast<AllocationSpace>(space),
947 "failed to reserve space in paged space");
952 // Mark with a free list node, in case we have a GC before
954 node->set_size(this, sizes[space]);
955 locations_out[space] = node->address();
962 // Failed to reserve the space after several attempts.
963 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
968 void Heap::EnsureFromSpaceIsCommitted() {
969 if (new_space_.CommitFromSpaceIfNeeded()) return;
971 // Committing memory to from space failed.
972 // Memory is exhausted and we will die.
973 V8::FatalProcessOutOfMemory("Committing semi space failed.");
977 void Heap::ClearJSFunctionResultCaches() {
978 if (isolate_->bootstrapper()->IsActive()) return;
980 Object* context = native_contexts_list();
981 while (!context->IsUndefined()) {
982 // Get the caches for this context. GC can happen when the context
983 // is not fully initialized, so the caches can be undefined.
984 Object* caches_or_undefined =
985 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
986 if (!caches_or_undefined->IsUndefined()) {
987 FixedArray* caches = FixedArray::cast(caches_or_undefined);
989 int length = caches->length();
990 for (int i = 0; i < length; i++) {
991 JSFunctionResultCache::cast(caches->get(i))->Clear();
994 // Get the next context:
995 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1000 void Heap::ClearNormalizedMapCaches() {
1001 if (isolate_->bootstrapper()->IsActive() &&
1002 !incremental_marking()->IsMarking()) {
1006 Object* context = native_contexts_list();
1007 while (!context->IsUndefined()) {
1008 // GC can happen when the context is not fully initialized,
1009 // so the cache can be undefined.
1011 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1012 if (!cache->IsUndefined()) {
1013 NormalizedMapCache::cast(cache)->Clear();
1015 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1020 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1021 if (start_new_space_size == 0) return;
1023 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1026 semi_space_copied_rate_ =
1027 (static_cast<double>(semi_space_copied_object_size_) /
1028 static_cast<double>(start_new_space_size) * 100);
1030 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1032 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1033 high_survival_rate_period_length_++;
1035 high_survival_rate_period_length_ = 0;
1039 bool Heap::PerformGarbageCollection(
1040 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1041 int freed_global_handles = 0;
1043 if (collector != SCAVENGER) {
1044 PROFILE(isolate_, CodeMovingGCEvent());
1048 if (FLAG_verify_heap) {
1049 VerifyStringTable(this);
1054 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1057 GCCallbacksScope scope(this);
1058 if (scope.CheckReenter()) {
1059 AllowHeapAllocation allow_allocation;
1060 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1061 VMState<EXTERNAL> state(isolate_);
1062 HandleScope handle_scope(isolate_);
1063 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1067 EnsureFromSpaceIsCommitted();
1069 int start_new_space_size = Heap::new_space()->SizeAsInt();
1071 if (IsHighSurvivalRate()) {
1072 // We speed up the incremental marker if it is running so that it
1073 // does not fall behind the rate of promotion, which would cause a
1074 // constantly growing old space.
1075 incremental_marking()->NotifyOfHighPromotionRate();
1078 if (collector == MARK_COMPACTOR) {
1079 // Perform mark-sweep with optional compaction.
1081 sweep_generation_++;
1082 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1083 // allocates and triggers GC. The real limit is set at after
1084 // PostGarbageCollectionProcessing.
1085 old_generation_allocation_limit_ =
1086 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1087 old_gen_exhausted_ = false;
1092 UpdateSurvivalStatistics(start_new_space_size);
1094 isolate_->counters()->objs_since_last_young()->Set(0);
1096 // Callbacks that fire after this point might trigger nested GCs and
1097 // restart incremental marking, the assertion can't be moved down.
1098 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1100 gc_post_processing_depth_++;
1102 AllowHeapAllocation allow_allocation;
1103 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1104 freed_global_handles =
1105 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1107 gc_post_processing_depth_--;
1109 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1111 // Update relocatables.
1112 Relocatable::PostGarbageCollectionProcessing(isolate_);
1114 if (collector == MARK_COMPACTOR) {
1115 // Register the amount of external allocated memory.
1116 amount_of_external_allocated_memory_at_last_global_gc_ =
1117 amount_of_external_allocated_memory_;
1118 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1119 PromotedSpaceSizeOfObjects(), freed_global_handles);
1123 GCCallbacksScope scope(this);
1124 if (scope.CheckReenter()) {
1125 AllowHeapAllocation allow_allocation;
1126 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1127 VMState<EXTERNAL> state(isolate_);
1128 HandleScope handle_scope(isolate_);
1129 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1134 if (FLAG_verify_heap) {
1135 VerifyStringTable(this);
1139 return freed_global_handles > 0;
1143 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1144 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1145 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1146 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1147 v8::GCPrologueCallback callback =
1148 reinterpret_cast<v8::GCPrologueCallback>(
1149 gc_prologue_callbacks_[i].callback);
1150 callback(gc_type, flags);
1152 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1153 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1160 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1161 GCCallbackFlags gc_callback_flags) {
1162 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1163 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1164 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1165 v8::GCPrologueCallback callback =
1166 reinterpret_cast<v8::GCPrologueCallback>(
1167 gc_epilogue_callbacks_[i].callback);
1168 callback(gc_type, gc_callback_flags);
1170 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1171 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1178 void Heap::MarkCompact() {
1179 gc_state_ = MARK_COMPACT;
1180 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1182 uint64_t size_of_objects_before_gc = SizeOfObjects();
1184 mark_compact_collector_.Prepare();
1188 MarkCompactPrologue();
1190 mark_compact_collector_.CollectGarbage();
1192 LOG(isolate_, ResourceEvent("markcompact", "end"));
1194 gc_state_ = NOT_IN_GC;
1196 isolate_->counters()->objs_since_last_full()->Set(0);
1198 flush_monomorphic_ics_ = false;
1200 if (FLAG_allocation_site_pretenuring) {
1201 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1206 void Heap::MarkCompactPrologue() {
1207 // At any old GC clear the keyed lookup cache to enable collection of unused
1209 isolate_->keyed_lookup_cache()->Clear();
1210 isolate_->context_slot_cache()->Clear();
1211 isolate_->descriptor_lookup_cache()->Clear();
1212 RegExpResultsCache::Clear(string_split_cache());
1213 RegExpResultsCache::Clear(regexp_multiple_cache());
1215 isolate_->compilation_cache()->MarkCompactPrologue();
1217 CompletelyClearInstanceofCache();
1219 FlushNumberStringCache();
1220 if (FLAG_cleanup_code_caches_at_gc) {
1221 polymorphic_code_cache()->set_cache(undefined_value());
1224 ClearNormalizedMapCaches();
1228 // Helper class for copying HeapObjects
1229 class ScavengeVisitor : public ObjectVisitor {
1231 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1233 void VisitPointer(Object** p) { ScavengePointer(p); }
1235 void VisitPointers(Object** start, Object** end) {
1236 // Copy all HeapObject pointers in [start, end)
1237 for (Object** p = start; p < end; p++) ScavengePointer(p);
1241 void ScavengePointer(Object** p) {
1242 Object* object = *p;
1243 if (!heap_->InNewSpace(object)) return;
1244 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1245 reinterpret_cast<HeapObject*>(object));
1253 // Visitor class to verify pointers in code or data space do not point into
1255 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1257 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1258 void VisitPointers(Object** start, Object** end) {
1259 for (Object** current = start; current < end; current++) {
1260 if ((*current)->IsHeapObject()) {
1261 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1271 static void VerifyNonPointerSpacePointers(Heap* heap) {
1272 // Verify that there are no pointers to new space in spaces where we
1273 // do not expect them.
1274 VerifyNonPointerSpacePointersVisitor v(heap);
1275 HeapObjectIterator code_it(heap->code_space());
1276 for (HeapObject* object = code_it.Next(); object != NULL;
1277 object = code_it.Next())
1278 object->Iterate(&v);
1280 // The old data space was normally swept conservatively so that the iterator
1281 // doesn't work, so we normally skip the next bit.
1282 if (heap->old_data_space()->swept_precisely()) {
1283 HeapObjectIterator data_it(heap->old_data_space());
1284 for (HeapObject* object = data_it.Next(); object != NULL;
1285 object = data_it.Next())
1286 object->Iterate(&v);
1289 #endif // VERIFY_HEAP
1292 void Heap::CheckNewSpaceExpansionCriteria() {
1293 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1294 survived_since_last_expansion_ > new_space_.Capacity()) {
1295 // Grow the size of new space if there is room to grow, enough data
1296 // has survived scavenge since the last expansion and we are not in
1297 // high promotion mode.
1299 survived_since_last_expansion_ = 0;
1304 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1305 return heap->InNewSpace(*p) &&
1306 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1310 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1311 StoreBufferEvent event) {
1312 heap->store_buffer_rebuilder_.Callback(page, event);
1316 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1317 if (event == kStoreBufferStartScanningPagesEvent) {
1318 start_of_current_page_ = NULL;
1319 current_page_ = NULL;
1320 } else if (event == kStoreBufferScanningPageEvent) {
1321 if (current_page_ != NULL) {
1322 // If this page already overflowed the store buffer during this iteration.
1323 if (current_page_->scan_on_scavenge()) {
1324 // Then we should wipe out the entries that have been added for it.
1325 store_buffer_->SetTop(start_of_current_page_);
1326 } else if (store_buffer_->Top() - start_of_current_page_ >=
1327 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1328 // Did we find too many pointers in the previous page? The heuristic is
1329 // that no page can take more then 1/5 the remaining slots in the store
1331 current_page_->set_scan_on_scavenge(true);
1332 store_buffer_->SetTop(start_of_current_page_);
1334 // In this case the page we scanned took a reasonable number of slots in
1335 // the store buffer. It has now been rehabilitated and is no longer
1336 // marked scan_on_scavenge.
1337 DCHECK(!current_page_->scan_on_scavenge());
1340 start_of_current_page_ = store_buffer_->Top();
1341 current_page_ = page;
1342 } else if (event == kStoreBufferFullEvent) {
1343 // The current page overflowed the store buffer again. Wipe out its entries
1344 // in the store buffer and mark it scan-on-scavenge again. This may happen
1345 // several times while scanning.
1346 if (current_page_ == NULL) {
1347 // Store Buffer overflowed while scanning promoted objects. These are not
1348 // in any particular page, though they are likely to be clustered by the
1349 // allocation routines.
1350 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1352 // Store Buffer overflowed while scanning a particular old space page for
1353 // pointers to new space.
1354 DCHECK(current_page_ == page);
1355 DCHECK(page != NULL);
1356 current_page_->set_scan_on_scavenge(true);
1357 DCHECK(start_of_current_page_ != store_buffer_->Top());
1358 store_buffer_->SetTop(start_of_current_page_);
1366 void PromotionQueue::Initialize() {
1367 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1368 // entries (where each is a pair of intptr_t). This allows us to simplify
1369 // the test fpr when to switch pages.
1370 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1372 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1374 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1375 emergency_stack_ = NULL;
1380 void PromotionQueue::RelocateQueueHead() {
1381 DCHECK(emergency_stack_ == NULL);
1383 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1384 intptr_t* head_start = rear_;
1385 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1388 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1390 emergency_stack_ = new List<Entry>(2 * entries_count);
1392 while (head_start != head_end) {
1393 int size = static_cast<int>(*(head_start++));
1394 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1395 emergency_stack_->Add(Entry(obj, size));
1401 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1403 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1405 virtual Object* RetainAs(Object* object) {
1406 if (!heap_->InFromSpace(object)) {
1410 MapWord map_word = HeapObject::cast(object)->map_word();
1411 if (map_word.IsForwardingAddress()) {
1412 return map_word.ToForwardingAddress();
1422 void Heap::Scavenge() {
1423 RelocationLock relocation_lock(this);
1426 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1429 gc_state_ = SCAVENGE;
1431 // Implements Cheney's copying algorithm
1432 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1434 // Clear descriptor cache.
1435 isolate_->descriptor_lookup_cache()->Clear();
1437 // Used for updating survived_since_last_expansion_ at function end.
1438 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1440 SelectScavengingVisitorsTable();
1442 incremental_marking()->PrepareForScavenge();
1444 // Flip the semispaces. After flipping, to space is empty, from space has
1447 new_space_.ResetAllocationInfo();
1449 // We need to sweep newly copied objects which can be either in the
1450 // to space or promoted to the old generation. For to-space
1451 // objects, we treat the bottom of the to space as a queue. Newly
1452 // copied and unswept objects lie between a 'front' mark and the
1453 // allocation pointer.
1455 // Promoted objects can go into various old-generation spaces, and
1456 // can be allocated internally in the spaces (from the free list).
1457 // We treat the top of the to space as a queue of addresses of
1458 // promoted objects. The addresses of newly promoted and unswept
1459 // objects lie between a 'front' mark and a 'rear' mark that is
1460 // updated as a side effect of promoting an object.
1462 // There is guaranteed to be enough room at the top of the to space
1463 // for the addresses of promoted objects: every object promoted
1464 // frees up its size in bytes from the top of the new space, and
1465 // objects are at least one pointer in size.
1466 Address new_space_front = new_space_.ToSpaceStart();
1467 promotion_queue_.Initialize();
1470 store_buffer()->Clean();
1473 ScavengeVisitor scavenge_visitor(this);
1475 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1477 // Copy objects reachable from the old generation.
1479 StoreBufferRebuildScope scope(this, store_buffer(),
1480 &ScavengeStoreBufferCallback);
1481 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1484 // Copy objects reachable from simple cells by scavenging cell values
1486 HeapObjectIterator cell_iterator(cell_space_);
1487 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1488 heap_object = cell_iterator.Next()) {
1489 if (heap_object->IsCell()) {
1490 Cell* cell = Cell::cast(heap_object);
1491 Address value_address = cell->ValueAddress();
1492 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1496 // Copy objects reachable from global property cells by scavenging global
1497 // property cell values directly.
1498 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1499 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1500 heap_object != NULL;
1501 heap_object = js_global_property_cell_iterator.Next()) {
1502 if (heap_object->IsPropertyCell()) {
1503 PropertyCell* cell = PropertyCell::cast(heap_object);
1504 Address value_address = cell->ValueAddress();
1505 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1506 Address type_address = cell->TypeAddress();
1507 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1511 // Copy objects reachable from the encountered weak collections list.
1512 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1514 // Copy objects reachable from the code flushing candidates list.
1515 MarkCompactCollector* collector = mark_compact_collector();
1516 if (collector->is_code_flushing_enabled()) {
1517 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1520 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1522 while (isolate()->global_handles()->IterateObjectGroups(
1523 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1524 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1526 isolate()->global_handles()->RemoveObjectGroups();
1527 isolate()->global_handles()->RemoveImplicitRefGroups();
1529 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1530 &IsUnscavengedHeapObject);
1531 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1533 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1535 UpdateNewSpaceReferencesInExternalStringTable(
1536 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1538 promotion_queue_.Destroy();
1540 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1542 ScavengeWeakObjectRetainer weak_object_retainer(this);
1543 ProcessWeakReferences(&weak_object_retainer);
1545 DCHECK(new_space_front == new_space_.top());
1548 new_space_.set_age_mark(new_space_.top());
1550 new_space_.LowerInlineAllocationLimit(
1551 new_space_.inline_allocation_limit_step());
1553 // Update how much has survived scavenge.
1554 IncrementYoungSurvivorsCounter(static_cast<int>(
1555 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1557 LOG(isolate_, ResourceEvent("scavenge", "end"));
1559 gc_state_ = NOT_IN_GC;
1561 scavenges_since_last_idle_round_++;
1565 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1567 MapWord first_word = HeapObject::cast(*p)->map_word();
1569 if (!first_word.IsForwardingAddress()) {
1570 // Unreachable external string can be finalized.
1571 heap->FinalizeExternalString(String::cast(*p));
1575 // String is still reachable.
1576 return String::cast(first_word.ToForwardingAddress());
1580 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1581 ExternalStringTableUpdaterCallback updater_func) {
1583 if (FLAG_verify_heap) {
1584 external_string_table_.Verify();
1588 if (external_string_table_.new_space_strings_.is_empty()) return;
1590 Object** start = &external_string_table_.new_space_strings_[0];
1591 Object** end = start + external_string_table_.new_space_strings_.length();
1592 Object** last = start;
1594 for (Object** p = start; p < end; ++p) {
1595 DCHECK(InFromSpace(*p));
1596 String* target = updater_func(this, p);
1598 if (target == NULL) continue;
1600 DCHECK(target->IsExternalString());
1602 if (InNewSpace(target)) {
1603 // String is still in new space. Update the table entry.
1607 // String got promoted. Move it to the old string list.
1608 external_string_table_.AddOldString(target);
1612 DCHECK(last <= end);
1613 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1617 void Heap::UpdateReferencesInExternalStringTable(
1618 ExternalStringTableUpdaterCallback updater_func) {
1619 // Update old space string references.
1620 if (external_string_table_.old_space_strings_.length() > 0) {
1621 Object** start = &external_string_table_.old_space_strings_[0];
1622 Object** end = start + external_string_table_.old_space_strings_.length();
1623 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1626 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1630 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1631 ProcessArrayBuffers(retainer);
1632 ProcessNativeContexts(retainer);
1633 // TODO(mvstanton): AllocationSites only need to be processed during
1634 // MARK_COMPACT, as they live in old space. Verify and address.
1635 ProcessAllocationSites(retainer);
1639 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1640 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1641 // Update the head of the list of contexts.
1642 set_native_contexts_list(head);
1646 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1647 Object* array_buffer_obj =
1648 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1649 set_array_buffers_list(array_buffer_obj);
1653 void Heap::TearDownArrayBuffers() {
1654 Object* undefined = undefined_value();
1655 for (Object* o = array_buffers_list(); o != undefined;) {
1656 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1657 Runtime::FreeArrayBuffer(isolate(), buffer);
1658 o = buffer->weak_next();
1660 set_array_buffers_list(undefined);
1664 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1665 Object* allocation_site_obj =
1666 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1667 set_allocation_sites_list(allocation_site_obj);
1671 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1672 DisallowHeapAllocation no_allocation_scope;
1673 Object* cur = allocation_sites_list();
1674 bool marked = false;
1675 while (cur->IsAllocationSite()) {
1676 AllocationSite* casted = AllocationSite::cast(cur);
1677 if (casted->GetPretenureMode() == flag) {
1678 casted->ResetPretenureDecision();
1679 casted->set_deopt_dependent_code(true);
1682 cur = casted->weak_next();
1684 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1688 void Heap::EvaluateOldSpaceLocalPretenuring(
1689 uint64_t size_of_objects_before_gc) {
1690 uint64_t size_of_objects_after_gc = SizeOfObjects();
1691 double old_generation_survival_rate =
1692 (static_cast<double>(size_of_objects_after_gc) * 100) /
1693 static_cast<double>(size_of_objects_before_gc);
1695 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1696 // Too many objects died in the old generation, pretenuring of wrong
1697 // allocation sites may be the cause for that. We have to deopt all
1698 // dependent code registered in the allocation sites to re-evaluate
1699 // our pretenuring decisions.
1700 ResetAllAllocationSitesDependentCode(TENURED);
1701 if (FLAG_trace_pretenuring) {
1703 "Deopt all allocation sites dependent code due to low survival "
1704 "rate in the old generation %f\n",
1705 old_generation_survival_rate);
1711 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1712 DisallowHeapAllocation no_allocation;
1713 // All external strings are listed in the external string table.
1715 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1717 explicit ExternalStringTableVisitorAdapter(
1718 v8::ExternalResourceVisitor* visitor)
1719 : visitor_(visitor) {}
1720 virtual void VisitPointers(Object** start, Object** end) {
1721 for (Object** p = start; p < end; p++) {
1722 DCHECK((*p)->IsExternalString());
1723 visitor_->VisitExternalString(
1724 Utils::ToLocal(Handle<String>(String::cast(*p))));
1729 v8::ExternalResourceVisitor* visitor_;
1730 } external_string_table_visitor(visitor);
1732 external_string_table_.Iterate(&external_string_table_visitor);
1736 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1738 static inline void VisitPointer(Heap* heap, Object** p) {
1739 Object* object = *p;
1740 if (!heap->InNewSpace(object)) return;
1741 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1742 reinterpret_cast<HeapObject*>(object));
1747 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1748 Address new_space_front) {
1750 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1751 // The addresses new_space_front and new_space_.top() define a
1752 // queue of unprocessed copied objects. Process them until the
1754 while (new_space_front != new_space_.top()) {
1755 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1756 HeapObject* object = HeapObject::FromAddress(new_space_front);
1758 NewSpaceScavenger::IterateBody(object->map(), object);
1761 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1765 // Promote and process all the to-be-promoted objects.
1767 StoreBufferRebuildScope scope(this, store_buffer(),
1768 &ScavengeStoreBufferCallback);
1769 while (!promotion_queue()->is_empty()) {
1772 promotion_queue()->remove(&target, &size);
1774 // Promoted object might be already partially visited
1775 // during old space pointer iteration. Thus we search specificly
1776 // for pointers to from semispace instead of looking for pointers
1778 DCHECK(!target->IsMap());
1779 IterateAndMarkPointersToFromSpace(
1780 target->address(), target->address() + size, &ScavengeObject);
1784 // Take another spin if there are now unswept objects in new space
1785 // (there are currently no more unswept promoted objects).
1786 } while (new_space_front != new_space_.top());
1788 return new_space_front;
1792 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1794 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1796 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1797 kDoubleAlignmentMask) == 0); // NOLINT
1800 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1803 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1805 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1806 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1807 return HeapObject::FromAddress(object->address() + kPointerSize);
1809 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1816 enum LoggingAndProfiling {
1817 LOGGING_AND_PROFILING_ENABLED,
1818 LOGGING_AND_PROFILING_DISABLED
1822 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1825 template <MarksHandling marks_handling,
1826 LoggingAndProfiling logging_and_profiling_mode>
1827 class ScavengingVisitor : public StaticVisitorBase {
1829 static void Initialize() {
1830 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1831 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1832 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1833 table_.Register(kVisitByteArray, &EvacuateByteArray);
1834 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1835 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1836 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1837 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1840 kVisitNativeContext,
1841 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1846 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1847 ConsString::kSize>);
1851 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1852 SlicedString::kSize>);
1856 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1860 kVisitSharedFunctionInfo,
1861 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1862 SharedFunctionInfo::kSize>);
1864 table_.Register(kVisitJSWeakCollection,
1865 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1867 table_.Register(kVisitJSArrayBuffer,
1868 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1870 table_.Register(kVisitJSTypedArray,
1871 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1873 table_.Register(kVisitJSDataView,
1874 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1876 table_.Register(kVisitJSRegExp,
1877 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1879 if (marks_handling == IGNORE_MARKS) {
1882 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1883 JSFunction::kSize>);
1885 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1888 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1889 kVisitDataObject, kVisitDataObjectGeneric>();
1891 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1892 kVisitJSObject, kVisitJSObjectGeneric>();
1894 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1895 kVisitStruct, kVisitStructGeneric>();
1898 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1903 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1905 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1906 bool should_record = false;
1908 should_record = FLAG_heap_stats;
1910 should_record = should_record || FLAG_log_gc;
1911 if (should_record) {
1912 if (heap->new_space()->Contains(obj)) {
1913 heap->new_space()->RecordAllocation(obj);
1915 heap->new_space()->RecordPromotion(obj);
1920 // Helper function used by CopyObject to copy a source object to an
1921 // allocated target object and update the forwarding pointer in the source
1922 // object. Returns the target object.
1923 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1924 HeapObject* target, int size)) {
1925 // If we migrate into to-space, then the to-space top pointer should be
1926 // right after the target object. Incorporate double alignment
1928 DCHECK(!heap->InToSpace(target) ||
1929 target->address() + size == heap->new_space()->top() ||
1930 target->address() + size + kPointerSize == heap->new_space()->top());
1932 // Make sure that we do not overwrite the promotion queue which is at
1933 // the end of to-space.
1934 DCHECK(!heap->InToSpace(target) ||
1935 heap->promotion_queue()->IsBelowPromotionQueue(
1936 heap->new_space()->top()));
1938 // Copy the content of source to target.
1939 heap->CopyBlock(target->address(), source->address(), size);
1941 // Set the forwarding address.
1942 source->set_map_word(MapWord::FromForwardingAddress(target));
1944 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1945 // Update NewSpace stats if necessary.
1946 RecordCopiedObject(heap, target);
1947 heap->OnMoveEvent(target, source, size);
1950 if (marks_handling == TRANSFER_MARKS) {
1951 if (Marking::TransferColor(source, target)) {
1952 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1957 template <int alignment>
1958 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1959 HeapObject* object, int object_size) {
1960 Heap* heap = map->GetHeap();
1962 int allocation_size = object_size;
1963 if (alignment != kObjectAlignment) {
1964 DCHECK(alignment == kDoubleAlignment);
1965 allocation_size += kPointerSize;
1968 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1969 AllocationResult allocation =
1970 heap->new_space()->AllocateRaw(allocation_size);
1972 HeapObject* target = NULL; // Initialization to please compiler.
1973 if (allocation.To(&target)) {
1974 if (alignment != kObjectAlignment) {
1975 target = EnsureDoubleAligned(heap, target, allocation_size);
1978 // Order is important here: Set the promotion limit before migrating
1979 // the object. Otherwise we may end up overwriting promotion queue
1980 // entries when we migrate the object.
1981 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1983 // Order is important: slot might be inside of the target if target
1984 // was allocated over a dead object and slot comes from the store
1987 MigrateObject(heap, object, target, object_size);
1989 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1996 template <ObjectContents object_contents, int alignment>
1997 static inline bool PromoteObject(Map* map, HeapObject** slot,
1998 HeapObject* object, int object_size) {
1999 Heap* heap = map->GetHeap();
2001 int allocation_size = object_size;
2002 if (alignment != kObjectAlignment) {
2003 DCHECK(alignment == kDoubleAlignment);
2004 allocation_size += kPointerSize;
2007 AllocationResult allocation;
2008 if (object_contents == DATA_OBJECT) {
2009 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2010 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2012 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2013 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2016 HeapObject* target = NULL; // Initialization to please compiler.
2017 if (allocation.To(&target)) {
2018 if (alignment != kObjectAlignment) {
2019 target = EnsureDoubleAligned(heap, target, allocation_size);
2022 // Order is important: slot might be inside of the target if target
2023 // was allocated over a dead object and slot comes from the store
2026 MigrateObject(heap, object, target, object_size);
2028 if (object_contents == POINTER_OBJECT) {
2029 if (map->instance_type() == JS_FUNCTION_TYPE) {
2030 heap->promotion_queue()->insert(target,
2031 JSFunction::kNonWeakFieldsEndOffset);
2033 heap->promotion_queue()->insert(target, object_size);
2036 heap->IncrementPromotedObjectsSize(object_size);
2043 template <ObjectContents object_contents, int alignment>
2044 static inline void EvacuateObject(Map* map, HeapObject** slot,
2045 HeapObject* object, int object_size) {
2046 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2047 SLOW_DCHECK(object->Size() == object_size);
2048 Heap* heap = map->GetHeap();
2050 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2051 // A semi-space copy may fail due to fragmentation. In that case, we
2052 // try to promote the object.
2053 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2058 if (PromoteObject<object_contents, alignment>(map, slot, object,
2063 // If promotion failed, we try to copy the object to the other semi-space
2064 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2070 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2071 HeapObject* object) {
2072 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2073 JSFunction::kSize>(map, slot, object);
2075 HeapObject* target = *slot;
2076 MarkBit mark_bit = Marking::MarkBitFrom(target);
2077 if (Marking::IsBlack(mark_bit)) {
2078 // This object is black and it might not be rescanned by marker.
2079 // We should explicitly record code entry slot for compaction because
2080 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2081 // miss it as it is not HeapObject-tagged.
2082 Address code_entry_slot =
2083 target->address() + JSFunction::kCodeEntryOffset;
2084 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2085 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2086 code_entry_slot, code);
2091 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2092 HeapObject* object) {
2093 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2094 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2099 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2100 HeapObject* object) {
2101 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2102 int object_size = FixedDoubleArray::SizeFor(length);
2103 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2108 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2109 HeapObject* object) {
2110 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2111 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2116 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2117 HeapObject* object) {
2118 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2119 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2124 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2125 HeapObject* object) {
2126 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2127 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2132 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2133 HeapObject* object) {
2134 int object_size = SeqOneByteString::cast(object)
2135 ->SeqOneByteStringSize(map->instance_type());
2136 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2141 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2142 HeapObject* object) {
2143 int object_size = SeqTwoByteString::cast(object)
2144 ->SeqTwoByteStringSize(map->instance_type());
2145 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2150 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2151 HeapObject* object) {
2152 DCHECK(IsShortcutCandidate(map->instance_type()));
2154 Heap* heap = map->GetHeap();
2156 if (marks_handling == IGNORE_MARKS &&
2157 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2159 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2163 if (!heap->InNewSpace(first)) {
2164 object->set_map_word(MapWord::FromForwardingAddress(first));
2168 MapWord first_word = first->map_word();
2169 if (first_word.IsForwardingAddress()) {
2170 HeapObject* target = first_word.ToForwardingAddress();
2173 object->set_map_word(MapWord::FromForwardingAddress(target));
2177 heap->DoScavengeObject(first->map(), slot, first);
2178 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2182 int object_size = ConsString::kSize;
2183 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2187 template <ObjectContents object_contents>
2188 class ObjectEvacuationStrategy {
2190 template <int object_size>
2191 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2192 HeapObject* object) {
2193 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2197 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2198 int object_size = map->instance_size();
2199 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2204 static VisitorDispatchTable<ScavengingCallback> table_;
2208 template <MarksHandling marks_handling,
2209 LoggingAndProfiling logging_and_profiling_mode>
2210 VisitorDispatchTable<ScavengingCallback>
2211 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2214 static void InitializeScavengingVisitorsTables() {
2215 ScavengingVisitor<TRANSFER_MARKS,
2216 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2217 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2218 ScavengingVisitor<TRANSFER_MARKS,
2219 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2220 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2224 void Heap::SelectScavengingVisitorsTable() {
2225 bool logging_and_profiling =
2226 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2227 isolate()->cpu_profiler()->is_profiling() ||
2228 (isolate()->heap_profiler() != NULL &&
2229 isolate()->heap_profiler()->is_tracking_object_moves());
2231 if (!incremental_marking()->IsMarking()) {
2232 if (!logging_and_profiling) {
2233 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2234 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2236 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2237 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2240 if (!logging_and_profiling) {
2241 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2242 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2244 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2245 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2248 if (incremental_marking()->IsCompacting()) {
2249 // When compacting forbid short-circuiting of cons-strings.
2250 // Scavenging code relies on the fact that new space object
2251 // can't be evacuated into evacuation candidate but
2252 // short-circuiting violates this assumption.
2253 scavenging_visitors_table_.Register(
2254 StaticVisitorBase::kVisitShortcutCandidate,
2255 scavenging_visitors_table_.GetVisitorById(
2256 StaticVisitorBase::kVisitConsString));
2262 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2263 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2264 MapWord first_word = object->map_word();
2265 SLOW_DCHECK(!first_word.IsForwardingAddress());
2266 Map* map = first_word.ToMap();
2267 map->GetHeap()->DoScavengeObject(map, p, object);
2271 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2272 int instance_size) {
2274 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2275 if (!allocation.To(&result)) return allocation;
2277 // Map::cast cannot be used due to uninitialized map field.
2278 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2279 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2280 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2281 reinterpret_cast<Map*>(result)->set_visitor_id(
2282 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2283 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2284 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2285 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2286 reinterpret_cast<Map*>(result)->set_bit_field(0);
2287 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2288 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2289 Map::OwnsDescriptors::encode(true);
2290 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2295 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2297 ElementsKind elements_kind) {
2299 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2300 if (!allocation.To(&result)) return allocation;
2302 result->set_map_no_write_barrier(meta_map());
2303 Map* map = Map::cast(result);
2304 map->set_instance_type(instance_type);
2305 map->set_visitor_id(
2306 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2307 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2308 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2309 map->set_instance_size(instance_size);
2310 map->set_inobject_properties(0);
2311 map->set_pre_allocated_property_fields(0);
2312 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2313 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2314 SKIP_WRITE_BARRIER);
2315 map->init_back_pointer(undefined_value());
2316 map->set_unused_property_fields(0);
2317 map->set_instance_descriptors(empty_descriptor_array());
2318 map->set_bit_field(0);
2319 map->set_bit_field2(1 << Map::kIsExtensible);
2320 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2321 Map::OwnsDescriptors::encode(true);
2322 map->set_bit_field3(bit_field3);
2323 map->set_elements_kind(elements_kind);
2329 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2330 AllocationSpace space) {
2333 AllocationResult allocation = AllocateRaw(size, space, space);
2334 if (!allocation.To(&obj)) return allocation;
2337 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2338 DCHECK(chunk->owner()->identity() == space);
2340 CreateFillerObjectAt(obj->address(), size);
2345 const Heap::StringTypeTable Heap::string_type_table[] = {
2346 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2347 { type, size, k##camel_name##MapRootIndex } \
2349 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2350 #undef STRING_TYPE_ELEMENT
2354 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2355 #define CONSTANT_STRING_ELEMENT(name, contents) \
2356 { contents, k##name##RootIndex } \
2358 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2359 #undef CONSTANT_STRING_ELEMENT
2363 const Heap::StructTable Heap::struct_table[] = {
2364 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2365 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2367 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2368 #undef STRUCT_TABLE_ELEMENT
2372 bool Heap::CreateInitialMaps() {
2375 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2376 if (!allocation.To(&obj)) return false;
2378 // Map::cast cannot be used due to uninitialized map field.
2379 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2380 set_meta_map(new_meta_map);
2381 new_meta_map->set_map(new_meta_map);
2383 { // Partial map allocation
2384 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2387 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388 set_##field_name##_map(map); \
2391 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2392 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2393 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2394 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2395 constant_pool_array);
2397 #undef ALLOCATE_PARTIAL_MAP
2400 // Allocate the empty array.
2402 AllocationResult allocation = AllocateEmptyFixedArray();
2403 if (!allocation.To(&obj)) return false;
2405 set_empty_fixed_array(FixedArray::cast(obj));
2408 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2409 if (!allocation.To(&obj)) return false;
2411 set_null_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kNull);
2415 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2416 if (!allocation.To(&obj)) return false;
2418 set_undefined_value(Oddball::cast(obj));
2419 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2420 DCHECK(!InNewSpace(undefined_value()));
2422 // Set preliminary exception sentinel value before actually initializing it.
2423 set_exception(null_value());
2425 // Allocate the empty descriptor array.
2427 AllocationResult allocation = AllocateEmptyFixedArray();
2428 if (!allocation.To(&obj)) return false;
2430 set_empty_descriptor_array(DescriptorArray::cast(obj));
2432 // Allocate the constant pool array.
2434 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2435 if (!allocation.To(&obj)) return false;
2437 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2439 // Fix the instance_descriptors for the existing maps.
2440 meta_map()->set_code_cache(empty_fixed_array());
2441 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2442 meta_map()->init_back_pointer(undefined_value());
2443 meta_map()->set_instance_descriptors(empty_descriptor_array());
2445 fixed_array_map()->set_code_cache(empty_fixed_array());
2446 fixed_array_map()->set_dependent_code(
2447 DependentCode::cast(empty_fixed_array()));
2448 fixed_array_map()->init_back_pointer(undefined_value());
2449 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2451 undefined_map()->set_code_cache(empty_fixed_array());
2452 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 undefined_map()->init_back_pointer(undefined_value());
2454 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2456 null_map()->set_code_cache(empty_fixed_array());
2457 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2458 null_map()->init_back_pointer(undefined_value());
2459 null_map()->set_instance_descriptors(empty_descriptor_array());
2461 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2462 constant_pool_array_map()->set_dependent_code(
2463 DependentCode::cast(empty_fixed_array()));
2464 constant_pool_array_map()->init_back_pointer(undefined_value());
2465 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2467 // Fix prototype object for existing maps.
2468 meta_map()->set_prototype(null_value());
2469 meta_map()->set_constructor(null_value());
2471 fixed_array_map()->set_prototype(null_value());
2472 fixed_array_map()->set_constructor(null_value());
2474 undefined_map()->set_prototype(null_value());
2475 undefined_map()->set_constructor(null_value());
2477 null_map()->set_prototype(null_value());
2478 null_map()->set_constructor(null_value());
2480 constant_pool_array_map()->set_prototype(null_value());
2481 constant_pool_array_map()->set_constructor(null_value());
2484 #define ALLOCATE_MAP(instance_type, size, field_name) \
2487 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2488 set_##field_name##_map(map); \
2491 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2492 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2494 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2495 DCHECK(fixed_array_map() != fixed_cow_array_map());
2497 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2498 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2499 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2500 mutable_heap_number)
2501 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2502 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2504 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2505 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2506 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2507 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2508 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2509 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2510 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2512 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2513 const StringTypeTable& entry = string_type_table[i];
2515 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2516 if (!allocation.To(&obj)) return false;
2518 // Mark cons string maps as unstable, because their objects can change
2520 Map* map = Map::cast(obj);
2521 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2522 roots_[entry.index] = map;
2525 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2526 undetectable_string_map()->set_is_undetectable();
2528 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2529 undetectable_ascii_string_map()->set_is_undetectable();
2531 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2532 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2533 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2535 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2537 external_##type##_array)
2539 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2540 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2542 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2543 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2545 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2546 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2548 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2550 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2552 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2553 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2554 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2555 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2558 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2559 const StructTable& entry = struct_table[i];
2561 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2562 roots_[entry.index] = map;
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2569 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2570 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2571 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2572 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2573 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2575 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2576 native_context_map()->set_dictionary_map(true);
2577 native_context_map()->set_visitor_id(
2578 StaticVisitorBase::kVisitNativeContext);
2580 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2581 shared_function_info)
2583 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2584 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2585 external_map()->set_is_extensible(false);
2586 #undef ALLOCATE_VARSIZE_MAP
2592 ByteArray* byte_array;
2593 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2594 set_empty_byte_array(byte_array);
2597 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2599 ExternalArray* obj; \
2600 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2602 set_empty_external_##type##_array(obj); \
2605 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2606 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2608 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2610 FixedTypedArrayBase* obj; \
2611 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2613 set_empty_fixed_##type##_array(obj); \
2616 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2617 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2619 DCHECK(!InNewSpace(empty_fixed_array()));
2624 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2625 PretenureFlag pretenure) {
2626 // Statically ensure that it is safe to allocate heap numbers in paged
2628 int size = HeapNumber::kSize;
2629 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2631 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2635 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2636 if (!allocation.To(&result)) return allocation;
2639 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2640 HeapObject::cast(result)->set_map_no_write_barrier(map);
2641 HeapNumber::cast(result)->set_value(value);
2646 AllocationResult Heap::AllocateCell(Object* value) {
2647 int size = Cell::kSize;
2648 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2652 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2653 if (!allocation.To(&result)) return allocation;
2655 result->set_map_no_write_barrier(cell_map());
2656 Cell::cast(result)->set_value(value);
2661 AllocationResult Heap::AllocatePropertyCell() {
2662 int size = PropertyCell::kSize;
2663 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2666 AllocationResult allocation =
2667 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2668 if (!allocation.To(&result)) return allocation;
2670 result->set_map_no_write_barrier(global_property_cell_map());
2671 PropertyCell* cell = PropertyCell::cast(result);
2672 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2673 SKIP_WRITE_BARRIER);
2674 cell->set_value(the_hole_value());
2675 cell->set_type(HeapType::None());
2680 void Heap::CreateApiObjects() {
2681 HandleScope scope(isolate());
2682 Factory* factory = isolate()->factory();
2683 Handle<Map> new_neander_map =
2684 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2686 // Don't use Smi-only elements optimizations for objects with the neander
2687 // map. There are too many cases where element values are set directly with a
2688 // bottleneck to trap the Smi-only -> fast elements transition, and there
2689 // appears to be no benefit for optimize this case.
2690 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2691 set_neander_map(*new_neander_map);
2693 Handle<JSObject> listeners = factory->NewNeanderObject();
2694 Handle<FixedArray> elements = factory->NewFixedArray(2);
2695 elements->set(0, Smi::FromInt(0));
2696 listeners->set_elements(*elements);
2697 set_message_listeners(*listeners);
2701 void Heap::CreateJSEntryStub() {
2702 JSEntryStub stub(isolate());
2703 set_js_entry_code(*stub.GetCode());
2707 void Heap::CreateJSConstructEntryStub() {
2708 JSConstructEntryStub stub(isolate());
2709 set_js_construct_entry_code(*stub.GetCode());
2713 void Heap::CreateFixedStubs() {
2714 // Here we create roots for fixed stubs. They are needed at GC
2715 // for cooking and uncooking (check out frames.cc).
2716 // The eliminates the need for doing dictionary lookup in the
2717 // stub cache for these stubs.
2718 HandleScope scope(isolate());
2720 // Create stubs that should be there, so we don't unexpectedly have to
2721 // create them if we need them during the creation of another stub.
2722 // Stub creation mixes raw pointers and handles in an unsafe manner so
2723 // we cannot create stubs while we are creating stubs.
2724 CodeStub::GenerateStubsAheadOfTime(isolate());
2726 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2727 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2730 // gcc-4.4 has problem generating correct code of following snippet:
2731 // { JSEntryStub stub;
2732 // js_entry_code_ = *stub.GetCode();
2734 // { JSConstructEntryStub stub;
2735 // js_construct_entry_code_ = *stub.GetCode();
2737 // To workaround the problem, make separate functions without inlining.
2738 Heap::CreateJSEntryStub();
2739 Heap::CreateJSConstructEntryStub();
2743 void Heap::CreateInitialObjects() {
2744 HandleScope scope(isolate());
2745 Factory* factory = isolate()->factory();
2747 // The -0 value must be set before NewNumber works.
2748 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2749 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2752 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2753 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2755 // The hole has not been created yet, but we want to put something
2756 // predictable in the gaps in the string table, so lets make that Smi zero.
2757 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2759 // Allocate initial string table.
2760 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2762 // Finish initializing oddballs after creating the string table.
2763 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2764 factory->nan_value(), Oddball::kUndefined);
2766 // Initialize the null_value.
2767 Oddball::Initialize(isolate(), factory->null_value(), "null",
2768 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2770 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2771 handle(Smi::FromInt(1), isolate()),
2774 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2775 handle(Smi::FromInt(0), isolate()),
2778 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2779 handle(Smi::FromInt(-1), isolate()),
2780 Oddball::kTheHole));
2782 set_uninitialized_value(*factory->NewOddball(
2783 factory->uninitialized_map(), "uninitialized",
2784 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2786 set_arguments_marker(*factory->NewOddball(
2787 factory->arguments_marker_map(), "arguments_marker",
2788 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2790 set_no_interceptor_result_sentinel(*factory->NewOddball(
2791 factory->no_interceptor_result_sentinel_map(),
2792 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2795 set_termination_exception(*factory->NewOddball(
2796 factory->termination_exception_map(), "termination_exception",
2797 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2799 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2800 handle(Smi::FromInt(-5), isolate()),
2801 Oddball::kException));
2803 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2804 Handle<String> str =
2805 factory->InternalizeUtf8String(constant_string_table[i].contents);
2806 roots_[constant_string_table[i].index] = *str;
2809 // Allocate the hidden string which is used to identify the hidden properties
2810 // in JSObjects. The hash code has a special value so that it will not match
2811 // the empty string when searching for the property. It cannot be part of the
2812 // loop above because it needs to be allocated manually with the special
2813 // hash code in place. The hash code for the hidden_string is zero to ensure
2814 // that it will always be at the first entry in property descriptors.
2815 hidden_string_ = *factory->NewOneByteInternalizedString(
2816 OneByteVector("", 0), String::kEmptyStringHash);
2818 // Create the code_stubs dictionary. The initial size is set to avoid
2819 // expanding the dictionary during bootstrapping.
2820 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2822 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2823 // is set to avoid expanding the dictionary during bootstrapping.
2824 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2826 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2827 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2829 set_instanceof_cache_function(Smi::FromInt(0));
2830 set_instanceof_cache_map(Smi::FromInt(0));
2831 set_instanceof_cache_answer(Smi::FromInt(0));
2835 // Allocate the dictionary of intrinsic function names.
2836 Handle<NameDictionary> intrinsic_names =
2837 NameDictionary::New(isolate(), Runtime::kNumFunctions);
2838 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2839 set_intrinsic_function_names(*intrinsic_names);
2841 set_number_string_cache(
2842 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2844 // Allocate cache for single character one byte strings.
2845 set_single_character_string_cache(
2846 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2848 // Allocate cache for string split and regexp-multiple.
2849 set_string_split_cache(*factory->NewFixedArray(
2850 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2851 set_regexp_multiple_cache(*factory->NewFixedArray(
2852 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2854 // Allocate cache for external strings pointing to native source code.
2855 set_natives_source_cache(
2856 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2858 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2860 // The symbol registry is initialized lazily.
2861 set_symbol_registry(undefined_value());
2863 // Allocate object to hold object observation state.
2864 set_observation_state(*factory->NewJSObjectFromMap(
2865 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2867 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2868 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2869 set_microtask_queue(empty_fixed_array());
2871 set_detailed_stack_trace_symbol(*factory->NewPrivateSymbol());
2872 set_elements_transition_symbol(*factory->NewPrivateSymbol());
2873 set_frozen_symbol(*factory->NewPrivateSymbol());
2874 set_megamorphic_symbol(*factory->NewPrivateSymbol());
2875 set_nonexistent_symbol(*factory->NewPrivateSymbol());
2876 set_normal_ic_symbol(*factory->NewPrivateSymbol());
2877 set_observed_symbol(*factory->NewPrivateSymbol());
2878 set_stack_trace_symbol(*factory->NewPrivateSymbol());
2879 set_uninitialized_symbol(*factory->NewPrivateSymbol());
2881 Handle<SeededNumberDictionary> slow_element_dictionary =
2882 SeededNumberDictionary::New(isolate(), 0, TENURED);
2883 slow_element_dictionary->set_requires_slow_elements();
2884 set_empty_slow_element_dictionary(*slow_element_dictionary);
2886 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2888 // Handling of script id generation is in Factory::NewScript.
2889 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2891 set_allocation_sites_scratchpad(
2892 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2893 InitializeAllocationSitesScratchpad();
2895 // Initialize keyed lookup cache.
2896 isolate_->keyed_lookup_cache()->Clear();
2898 // Initialize context slot cache.
2899 isolate_->context_slot_cache()->Clear();
2901 // Initialize descriptor cache.
2902 isolate_->descriptor_lookup_cache()->Clear();
2904 // Initialize compilation cache.
2905 isolate_->compilation_cache()->Clear();
2909 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2910 RootListIndex writable_roots[] = {
2911 kStoreBufferTopRootIndex,
2912 kStackLimitRootIndex,
2913 kNumberStringCacheRootIndex,
2914 kInstanceofCacheFunctionRootIndex,
2915 kInstanceofCacheMapRootIndex,
2916 kInstanceofCacheAnswerRootIndex,
2917 kCodeStubsRootIndex,
2918 kNonMonomorphicCacheRootIndex,
2919 kPolymorphicCodeCacheRootIndex,
2920 kLastScriptIdRootIndex,
2921 kEmptyScriptRootIndex,
2922 kRealStackLimitRootIndex,
2923 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2924 kConstructStubDeoptPCOffsetRootIndex,
2925 kGetterStubDeoptPCOffsetRootIndex,
2926 kSetterStubDeoptPCOffsetRootIndex,
2927 kStringTableRootIndex,
2930 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2931 if (root_index == writable_roots[i]) return true;
2937 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2938 return !RootCanBeWrittenAfterInitialization(root_index) &&
2939 !InNewSpace(roots_array_start()[root_index]);
2943 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2944 Object* key_pattern, ResultsCacheType type) {
2946 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2947 if (type == STRING_SPLIT_SUBSTRINGS) {
2948 DCHECK(key_pattern->IsString());
2949 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2950 cache = heap->string_split_cache();
2952 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2953 DCHECK(key_pattern->IsFixedArray());
2954 cache = heap->regexp_multiple_cache();
2957 uint32_t hash = key_string->Hash();
2958 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2959 ~(kArrayEntriesPerCacheEntry - 1));
2960 if (cache->get(index + kStringOffset) == key_string &&
2961 cache->get(index + kPatternOffset) == key_pattern) {
2962 return cache->get(index + kArrayOffset);
2965 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2966 if (cache->get(index + kStringOffset) == key_string &&
2967 cache->get(index + kPatternOffset) == key_pattern) {
2968 return cache->get(index + kArrayOffset);
2970 return Smi::FromInt(0);
2974 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
2975 Handle<Object> key_pattern,
2976 Handle<FixedArray> value_array,
2977 ResultsCacheType type) {
2978 Factory* factory = isolate->factory();
2979 Handle<FixedArray> cache;
2980 if (!key_string->IsInternalizedString()) return;
2981 if (type == STRING_SPLIT_SUBSTRINGS) {
2982 DCHECK(key_pattern->IsString());
2983 if (!key_pattern->IsInternalizedString()) return;
2984 cache = factory->string_split_cache();
2986 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2987 DCHECK(key_pattern->IsFixedArray());
2988 cache = factory->regexp_multiple_cache();
2991 uint32_t hash = key_string->Hash();
2992 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2993 ~(kArrayEntriesPerCacheEntry - 1));
2994 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2995 cache->set(index + kStringOffset, *key_string);
2996 cache->set(index + kPatternOffset, *key_pattern);
2997 cache->set(index + kArrayOffset, *value_array);
3000 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3001 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3002 cache->set(index2 + kStringOffset, *key_string);
3003 cache->set(index2 + kPatternOffset, *key_pattern);
3004 cache->set(index2 + kArrayOffset, *value_array);
3006 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3007 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3008 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3009 cache->set(index + kStringOffset, *key_string);
3010 cache->set(index + kPatternOffset, *key_pattern);
3011 cache->set(index + kArrayOffset, *value_array);
3014 // If the array is a reasonably short list of substrings, convert it into a
3015 // list of internalized strings.
3016 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3017 for (int i = 0; i < value_array->length(); i++) {
3018 Handle<String> str(String::cast(value_array->get(i)), isolate);
3019 Handle<String> internalized_str = factory->InternalizeString(str);
3020 value_array->set(i, *internalized_str);
3023 // Convert backing store to a copy-on-write array.
3024 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3028 void RegExpResultsCache::Clear(FixedArray* cache) {
3029 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3030 cache->set(i, Smi::FromInt(0));
3035 int Heap::FullSizeNumberStringCacheLength() {
3036 // Compute the size of the number string cache based on the max newspace size.
3037 // The number string cache has a minimum size based on twice the initial cache
3038 // size to ensure that it is bigger after being made 'full size'.
3039 int number_string_cache_size = max_semi_space_size_ / 512;
3040 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3041 Min(0x4000, number_string_cache_size));
3042 // There is a string and a number per entry so the length is twice the number
3044 return number_string_cache_size * 2;
3048 void Heap::FlushNumberStringCache() {
3049 // Flush the number to string cache.
3050 int len = number_string_cache()->length();
3051 for (int i = 0; i < len; i++) {
3052 number_string_cache()->set_undefined(i);
3057 void Heap::FlushAllocationSitesScratchpad() {
3058 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3059 allocation_sites_scratchpad()->set_undefined(i);
3061 allocation_sites_scratchpad_length_ = 0;
3065 void Heap::InitializeAllocationSitesScratchpad() {
3066 DCHECK(allocation_sites_scratchpad()->length() ==
3067 kAllocationSiteScratchpadSize);
3068 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3069 allocation_sites_scratchpad()->set_undefined(i);
3074 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3075 ScratchpadSlotMode mode) {
3076 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3077 // We cannot use the normal write-barrier because slots need to be
3078 // recorded with non-incremental marking as well. We have to explicitly
3079 // record the slot to take evacuation candidates into account.
3080 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3081 site, SKIP_WRITE_BARRIER);
3082 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3083 allocation_sites_scratchpad_length_);
3085 if (mode == RECORD_SCRATCHPAD_SLOT) {
3086 // We need to allow slots buffer overflow here since the evacuation
3087 // candidates are not part of the global list of old space pages and
3088 // releasing an evacuation candidate due to a slots buffer overflow
3089 // results in lost pages.
3090 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3091 SlotsBuffer::IGNORE_OVERFLOW);
3093 allocation_sites_scratchpad_length_++;
3098 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3099 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3103 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3104 ExternalArrayType array_type) {
3105 switch (array_type) {
3106 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3107 case kExternal##Type##Array: \
3108 return kExternal##Type##ArrayMapRootIndex;
3110 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3111 #undef ARRAY_TYPE_TO_ROOT_INDEX
3115 return kUndefinedValueRootIndex;
3120 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3121 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3125 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3126 ExternalArrayType array_type) {
3127 switch (array_type) {
3128 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3129 case kExternal##Type##Array: \
3130 return kFixed##Type##ArrayMapRootIndex;
3132 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3133 #undef ARRAY_TYPE_TO_ROOT_INDEX
3137 return kUndefinedValueRootIndex;
3142 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3143 ElementsKind elementsKind) {
3144 switch (elementsKind) {
3145 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3146 case EXTERNAL_##TYPE##_ELEMENTS: \
3147 return kEmptyExternal##Type##ArrayRootIndex;
3149 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3150 #undef ELEMENT_KIND_TO_ROOT_INDEX
3154 return kUndefinedValueRootIndex;
3159 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3160 ElementsKind elementsKind) {
3161 switch (elementsKind) {
3162 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3163 case TYPE##_ELEMENTS: \
3164 return kEmptyFixed##Type##ArrayRootIndex;
3166 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3167 #undef ELEMENT_KIND_TO_ROOT_INDEX
3170 return kUndefinedValueRootIndex;
3175 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3176 return ExternalArray::cast(
3177 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3181 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3182 return FixedTypedArrayBase::cast(
3183 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3187 AllocationResult Heap::AllocateForeign(Address address,
3188 PretenureFlag pretenure) {
3189 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3190 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3191 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3193 AllocationResult allocation = Allocate(foreign_map(), space);
3194 if (!allocation.To(&result)) return allocation;
3195 result->set_foreign_address(address);
3200 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3201 if (length < 0 || length > ByteArray::kMaxLength) {
3202 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3204 int size = ByteArray::SizeFor(length);
3205 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3208 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3209 if (!allocation.To(&result)) return allocation;
3212 result->set_map_no_write_barrier(byte_array_map());
3213 ByteArray::cast(result)->set_length(length);
3218 void Heap::CreateFillerObjectAt(Address addr, int size) {
3219 if (size == 0) return;
3220 HeapObject* filler = HeapObject::FromAddress(addr);
3221 if (size == kPointerSize) {
3222 filler->set_map_no_write_barrier(one_pointer_filler_map());
3223 } else if (size == 2 * kPointerSize) {
3224 filler->set_map_no_write_barrier(two_pointer_filler_map());
3226 filler->set_map_no_write_barrier(free_space_map());
3227 FreeSpace::cast(filler)->set_size(size);
3232 bool Heap::CanMoveObjectStart(HeapObject* object) {
3233 Address address = object->address();
3234 bool is_in_old_pointer_space = InOldPointerSpace(address);
3235 bool is_in_old_data_space = InOldDataSpace(address);
3237 if (lo_space()->Contains(object)) return false;
3239 Page* page = Page::FromAddress(address);
3240 // We can move the object start if:
3241 // (1) the object is not in old pointer or old data space,
3242 // (2) the page of the object was already swept,
3243 // (3) the page was already concurrently swept. This case is an optimization
3244 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3245 // pages is set after sweeping all pages.
3246 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3247 page->WasSwept() || page->SweepingCompleted();
3251 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3252 if (incremental_marking()->IsMarking() &&
3253 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3254 if (mode == FROM_GC) {
3255 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3257 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3263 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3264 int elements_to_trim) {
3265 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3266 const int bytes_to_trim = elements_to_trim * element_size;
3267 Map* map = object->map();
3269 // For now this trick is only applied to objects in new and paged space.
3270 // In large object space the object's start must coincide with chunk
3271 // and thus the trick is just not applicable.
3272 DCHECK(!lo_space()->Contains(object));
3273 DCHECK(object->map() != fixed_cow_array_map());
3275 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3276 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3277 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3279 const int len = object->length();
3280 DCHECK(elements_to_trim <= len);
3282 // Calculate location of new array start.
3283 Address new_start = object->address() + bytes_to_trim;
3285 // Technically in new space this write might be omitted (except for
3286 // debug mode which iterates through the heap), but to play safer
3288 CreateFillerObjectAt(object->address(), bytes_to_trim);
3290 // Initialize header of the trimmed array. Since left trimming is only
3291 // performed on pages which are not concurrently swept creating a filler
3292 // object does not require synchronization.
3293 DCHECK(CanMoveObjectStart(object));
3294 Object** former_start = HeapObject::RawField(object, 0);
3295 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3296 former_start[new_start_index] = map;
3297 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3298 FixedArrayBase* new_object =
3299 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3301 // Maintain consistency of live bytes during incremental marking
3302 marking()->TransferMark(object->address(), new_start);
3303 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3305 // Notify the heap profiler of change in object layout.
3306 OnMoveEvent(new_object, object, new_object->Size());
3311 // Force instantiation of templatized method.
3313 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3315 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3318 template<Heap::InvocationMode mode>
3319 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3320 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3321 const int bytes_to_trim = elements_to_trim * element_size;
3323 // For now this trick is only applied to objects in new and paged space.
3324 DCHECK(!lo_space()->Contains(object));
3325 DCHECK(object->map() != fixed_cow_array_map());
3327 const int len = object->length();
3328 DCHECK(elements_to_trim < len);
3330 // Calculate location of new array end.
3331 Address new_end = object->address() + object->Size() - bytes_to_trim;
3333 // Technically in new space this write might be omitted (except for
3334 // debug mode which iterates through the heap), but to play safer
3336 CreateFillerObjectAt(new_end, bytes_to_trim);
3338 // Initialize header of the trimmed array. We are storing the new length
3339 // using release store after creating a filler for the left-over space to
3340 // avoid races with the sweeper thread.
3341 object->synchronized_set_length(len - elements_to_trim);
3343 // Maintain consistency of live bytes during incremental marking
3344 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3346 // Notify the heap profiler of change in object layout. The array may not be
3347 // moved during GC, and size has to be adjusted nevertheless.
3348 HeapProfiler* profiler = isolate()->heap_profiler();
3349 if (profiler->is_tracking_allocations()) {
3350 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3355 AllocationResult Heap::AllocateExternalArray(int length,
3356 ExternalArrayType array_type,
3357 void* external_pointer,
3358 PretenureFlag pretenure) {
3359 int size = ExternalArray::kAlignedSize;
3360 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3363 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3364 if (!allocation.To(&result)) return allocation;
3367 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3368 ExternalArray::cast(result)->set_length(length);
3369 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3373 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3374 ElementsKind* element_kind) {
3375 switch (array_type) {
3376 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3377 case kExternal##Type##Array: \
3378 *element_size = size; \
3379 *element_kind = TYPE##_ELEMENTS; \
3382 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3383 #undef TYPED_ARRAY_CASE
3386 *element_size = 0; // Bogus
3387 *element_kind = UINT8_ELEMENTS; // Bogus
3393 AllocationResult Heap::AllocateFixedTypedArray(int length,
3394 ExternalArrayType array_type,
3395 PretenureFlag pretenure) {
3397 ElementsKind elements_kind;
3398 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3399 int size = OBJECT_POINTER_ALIGN(length * element_size +
3400 FixedTypedArrayBase::kDataOffset);
3401 #ifndef V8_HOST_ARCH_64_BIT
3402 if (array_type == kExternalFloat64Array) {
3403 size += kPointerSize;
3406 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3409 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3410 if (!allocation.To(&object)) return allocation;
3412 if (array_type == kExternalFloat64Array) {
3413 object = EnsureDoubleAligned(this, object, size);
3416 object->set_map(MapForFixedTypedArray(array_type));
3417 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3418 elements->set_length(length);
3419 memset(elements->DataPtr(), 0, elements->DataSize());
3424 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3425 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3426 AllocationResult allocation =
3427 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3430 if (!allocation.To(&result)) return allocation;
3433 Address address = result->address();
3434 // Code objects which should stay at a fixed address are allocated either
3435 // in the first page of code space (objects on the first page of each space
3436 // are never moved) or in large object space.
3437 if (!code_space_->FirstPage()->Contains(address) &&
3438 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3439 // Discard the first code allocation, which was on a page where it could
3441 CreateFillerObjectAt(result->address(), object_size);
3442 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3443 if (!allocation.To(&result)) return allocation;
3444 OnAllocationEvent(result, object_size);
3448 result->set_map_no_write_barrier(code_map());
3449 Code* code = Code::cast(result);
3450 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3451 isolate_->code_range()->contains(code->address()));
3452 code->set_gc_metadata(Smi::FromInt(0));
3453 code->set_ic_age(global_ic_age_);
3458 AllocationResult Heap::CopyCode(Code* code) {
3459 AllocationResult allocation;
3460 HeapObject* new_constant_pool;
3461 if (FLAG_enable_ool_constant_pool &&
3462 code->constant_pool() != empty_constant_pool_array()) {
3463 // Copy the constant pool, since edits to the copied code may modify
3464 // the constant pool.
3465 allocation = CopyConstantPoolArray(code->constant_pool());
3466 if (!allocation.To(&new_constant_pool)) return allocation;
3468 new_constant_pool = empty_constant_pool_array();
3472 // Allocate an object the same size as the code object.
3473 int obj_size = code->Size();
3474 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3475 if (!allocation.To(&result)) return allocation;
3477 // Copy code object.
3478 Address old_addr = code->address();
3479 Address new_addr = result->address();
3480 CopyBlock(new_addr, old_addr, obj_size);
3481 Code* new_code = Code::cast(result);
3483 // Update the constant pool.
3484 new_code->set_constant_pool(new_constant_pool);
3486 // Relocate the copy.
3487 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3488 isolate_->code_range()->contains(code->address()));
3489 new_code->Relocate(new_addr - old_addr);
3494 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3495 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3496 // do not risk leaving uninitialized Code object (and breaking the heap).
3497 ByteArray* reloc_info_array;
3499 AllocationResult allocation =
3500 AllocateByteArray(reloc_info.length(), TENURED);
3501 if (!allocation.To(&reloc_info_array)) return allocation;
3503 HeapObject* new_constant_pool;
3504 if (FLAG_enable_ool_constant_pool &&
3505 code->constant_pool() != empty_constant_pool_array()) {
3506 // Copy the constant pool, since edits to the copied code may modify
3507 // the constant pool.
3508 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3509 if (!allocation.To(&new_constant_pool)) return allocation;
3511 new_constant_pool = empty_constant_pool_array();
3514 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3516 int new_obj_size = Code::SizeFor(new_body_size);
3518 Address old_addr = code->address();
3520 size_t relocation_offset =
3521 static_cast<size_t>(code->instruction_end() - old_addr);
3524 AllocationResult allocation =
3525 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3526 if (!allocation.To(&result)) return allocation;
3528 // Copy code object.
3529 Address new_addr = result->address();
3531 // Copy header and instructions.
3532 CopyBytes(new_addr, old_addr, relocation_offset);
3534 Code* new_code = Code::cast(result);
3535 new_code->set_relocation_info(reloc_info_array);
3537 // Update constant pool.
3538 new_code->set_constant_pool(new_constant_pool);
3540 // Copy patched rinfo.
3541 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3542 static_cast<size_t>(reloc_info.length()));
3544 // Relocate the copy.
3545 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3546 isolate_->code_range()->contains(code->address()));
3547 new_code->Relocate(new_addr - old_addr);
3550 if (FLAG_verify_heap) code->ObjectVerify();
3556 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3557 AllocationSite* allocation_site) {
3558 memento->set_map_no_write_barrier(allocation_memento_map());
3559 DCHECK(allocation_site->map() == allocation_site_map());
3560 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3561 if (FLAG_allocation_site_pretenuring) {
3562 allocation_site->IncrementMementoCreateCount();
3567 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3568 AllocationSite* allocation_site) {
3569 DCHECK(gc_state_ == NOT_IN_GC);
3570 DCHECK(map->instance_type() != MAP_TYPE);
3571 // If allocation failures are disallowed, we may allocate in a different
3572 // space when new space is full and the object is not a large object.
3573 AllocationSpace retry_space =
3574 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3575 int size = map->instance_size();
3576 if (allocation_site != NULL) {
3577 size += AllocationMemento::kSize;
3580 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3581 if (!allocation.To(&result)) return allocation;
3582 // No need for write barrier since object is white and map is in old space.
3583 result->set_map_no_write_barrier(map);
3584 if (allocation_site != NULL) {
3585 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3586 reinterpret_cast<Address>(result) + map->instance_size());
3587 InitializeAllocationMemento(alloc_memento, allocation_site);
3593 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3595 obj->set_properties(properties);
3596 obj->initialize_elements();
3597 // TODO(1240798): Initialize the object's body using valid initial values
3598 // according to the object's initial map. For example, if the map's
3599 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3600 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3601 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3602 // verification code has to cope with (temporarily) invalid objects. See
3603 // for example, JSArray::JSArrayVerify).
3605 // We cannot always fill with one_pointer_filler_map because objects
3606 // created from API functions expect their internal fields to be initialized
3607 // with undefined_value.
3608 // Pre-allocated fields need to be initialized with undefined_value as well
3609 // so that object accesses before the constructor completes (e.g. in the
3610 // debugger) will not cause a crash.
3611 if (map->constructor()->IsJSFunction() &&
3612 JSFunction::cast(map->constructor())
3613 ->IsInobjectSlackTrackingInProgress()) {
3614 // We might want to shrink the object later.
3615 DCHECK(obj->GetInternalFieldCount() == 0);
3616 filler = Heap::one_pointer_filler_map();
3618 filler = Heap::undefined_value();
3620 obj->InitializeBody(map, Heap::undefined_value(), filler);
3624 AllocationResult Heap::AllocateJSObjectFromMap(
3625 Map* map, PretenureFlag pretenure, bool allocate_properties,
3626 AllocationSite* allocation_site) {
3627 // JSFunctions should be allocated using AllocateFunction to be
3628 // properly initialized.
3629 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3631 // Both types of global objects should be allocated using
3632 // AllocateGlobalObject to be properly initialized.
3633 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3634 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3636 // Allocate the backing storage for the properties.
3637 FixedArray* properties;
3638 if (allocate_properties) {
3639 int prop_size = map->InitialPropertiesLength();
3640 DCHECK(prop_size >= 0);
3642 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3643 if (!allocation.To(&properties)) return allocation;
3646 properties = empty_fixed_array();
3649 // Allocate the JSObject.
3650 int size = map->instance_size();
3651 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3653 AllocationResult allocation = Allocate(map, space, allocation_site);
3654 if (!allocation.To(&js_obj)) return allocation;
3656 // Initialize the JSObject.
3657 InitializeJSObjectFromMap(js_obj, properties, map);
3658 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3659 js_obj->HasFixedTypedArrayElements());
3664 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3665 PretenureFlag pretenure,
3666 AllocationSite* allocation_site) {
3667 DCHECK(constructor->has_initial_map());
3669 // Allocate the object based on the constructors initial map.
3670 AllocationResult allocation = AllocateJSObjectFromMap(
3671 constructor->initial_map(), pretenure, true, allocation_site);
3673 // Make sure result is NOT a global object if valid.
3675 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3681 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3682 // Never used to copy functions. If functions need to be copied we
3683 // have to be careful to clear the literals array.
3684 SLOW_DCHECK(!source->IsJSFunction());
3687 Map* map = source->map();
3688 int object_size = map->instance_size();
3691 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3693 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3695 // If we're forced to always allocate, we use the general allocation
3696 // functions which may leave us with an object in old space.
3697 if (always_allocate()) {
3699 AllocationResult allocation =
3700 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3701 if (!allocation.To(&clone)) return allocation;
3703 Address clone_address = clone->address();
3704 CopyBlock(clone_address, source->address(), object_size);
3705 // Update write barrier for all fields that lie beyond the header.
3706 RecordWrites(clone_address, JSObject::kHeaderSize,
3707 (object_size - JSObject::kHeaderSize) / kPointerSize);
3709 wb_mode = SKIP_WRITE_BARRIER;
3712 int adjusted_object_size =
3713 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3714 AllocationResult allocation =
3715 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3716 if (!allocation.To(&clone)) return allocation;
3718 SLOW_DCHECK(InNewSpace(clone));
3719 // Since we know the clone is allocated in new space, we can copy
3720 // the contents without worrying about updating the write barrier.
3721 CopyBlock(clone->address(), source->address(), object_size);
3724 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3725 reinterpret_cast<Address>(clone) + object_size);
3726 InitializeAllocationMemento(alloc_memento, site);
3730 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3731 source->GetElementsKind());
3732 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3733 FixedArray* properties = FixedArray::cast(source->properties());
3734 // Update elements if necessary.
3735 if (elements->length() > 0) {
3736 FixedArrayBase* elem;
3738 AllocationResult allocation;
3739 if (elements->map() == fixed_cow_array_map()) {
3740 allocation = FixedArray::cast(elements);
3741 } else if (source->HasFastDoubleElements()) {
3742 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3744 allocation = CopyFixedArray(FixedArray::cast(elements));
3746 if (!allocation.To(&elem)) return allocation;
3748 JSObject::cast(clone)->set_elements(elem, wb_mode);
3750 // Update properties if necessary.
3751 if (properties->length() > 0) {
3754 AllocationResult allocation = CopyFixedArray(properties);
3755 if (!allocation.To(&prop)) return allocation;
3757 JSObject::cast(clone)->set_properties(prop, wb_mode);
3759 // Return the new clone.
3764 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3766 // Only works for ascii.
3767 DCHECK(vector.length() == len);
3768 MemCopy(chars, vector.start(), len);
3771 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3773 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3774 unsigned stream_length = vector.length();
3775 while (stream_length != 0) {
3776 unsigned consumed = 0;
3777 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3778 DCHECK(c != unibrow::Utf8::kBadChar);
3779 DCHECK(consumed <= stream_length);
3780 stream_length -= consumed;
3782 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3785 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3786 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3793 DCHECK(stream_length == 0);
3798 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3799 DCHECK(s->length() == len);
3800 String::WriteToFlat(s, chars, 0, len);
3804 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3805 DCHECK(s->length() == len);
3806 String::WriteToFlat(s, chars, 0, len);
3810 template <bool is_one_byte, typename T>
3811 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3812 uint32_t hash_field) {
3814 // Compute map and object size.
3818 DCHECK_LE(0, chars);
3819 DCHECK_GE(String::kMaxLength, chars);
3821 map = ascii_internalized_string_map();
3822 size = SeqOneByteString::SizeFor(chars);
3824 map = internalized_string_map();
3825 size = SeqTwoByteString::SizeFor(chars);
3827 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3832 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3833 if (!allocation.To(&result)) return allocation;
3836 result->set_map_no_write_barrier(map);
3837 // Set length and hash fields of the allocated string.
3838 String* answer = String::cast(result);
3839 answer->set_length(chars);
3840 answer->set_hash_field(hash_field);
3842 DCHECK_EQ(size, answer->Size());
3845 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3847 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3853 // Need explicit instantiations.
3854 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3857 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3860 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3861 Vector<const char>, int, uint32_t);
3864 AllocationResult Heap::AllocateRawOneByteString(int length,
3865 PretenureFlag pretenure) {
3866 DCHECK_LE(0, length);
3867 DCHECK_GE(String::kMaxLength, length);
3868 int size = SeqOneByteString::SizeFor(length);
3869 DCHECK(size <= SeqOneByteString::kMaxSize);
3870 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3874 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3875 if (!allocation.To(&result)) return allocation;
3878 // Partially initialize the object.
3879 result->set_map_no_write_barrier(ascii_string_map());
3880 String::cast(result)->set_length(length);
3881 String::cast(result)->set_hash_field(String::kEmptyHashField);
3882 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3888 AllocationResult Heap::AllocateRawTwoByteString(int length,
3889 PretenureFlag pretenure) {
3890 DCHECK_LE(0, length);
3891 DCHECK_GE(String::kMaxLength, length);
3892 int size = SeqTwoByteString::SizeFor(length);
3893 DCHECK(size <= SeqTwoByteString::kMaxSize);
3894 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3898 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3899 if (!allocation.To(&result)) return allocation;
3902 // Partially initialize the object.
3903 result->set_map_no_write_barrier(string_map());
3904 String::cast(result)->set_length(length);
3905 String::cast(result)->set_hash_field(String::kEmptyHashField);
3906 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3911 AllocationResult Heap::AllocateEmptyFixedArray() {
3912 int size = FixedArray::SizeFor(0);
3915 AllocationResult allocation =
3916 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3917 if (!allocation.To(&result)) return allocation;
3919 // Initialize the object.
3920 result->set_map_no_write_barrier(fixed_array_map());
3921 FixedArray::cast(result)->set_length(0);
3926 AllocationResult Heap::AllocateEmptyExternalArray(
3927 ExternalArrayType array_type) {
3928 return AllocateExternalArray(0, array_type, NULL, TENURED);
3932 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3933 if (!InNewSpace(src)) {
3937 int len = src->length();
3940 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3941 if (!allocation.To(&obj)) return allocation;
3943 obj->set_map_no_write_barrier(fixed_array_map());
3944 FixedArray* result = FixedArray::cast(obj);
3945 result->set_length(len);
3948 DisallowHeapAllocation no_gc;
3949 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3950 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3952 // TODO(mvstanton): The map is set twice because of protection against calling
3953 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3954 // we might then be able to remove this whole method.
3955 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3960 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3961 ExternalArrayType array_type) {
3962 return AllocateFixedTypedArray(0, array_type, TENURED);
3966 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3967 int len = src->length();
3970 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3971 if (!allocation.To(&obj)) return allocation;
3973 if (InNewSpace(obj)) {
3974 obj->set_map_no_write_barrier(map);
3975 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3976 FixedArray::SizeFor(len) - kPointerSize);
3979 obj->set_map_no_write_barrier(map);
3980 FixedArray* result = FixedArray::cast(obj);
3981 result->set_length(len);
3984 DisallowHeapAllocation no_gc;
3985 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3986 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3991 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3993 int len = src->length();
3996 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3997 if (!allocation.To(&obj)) return allocation;
3999 obj->set_map_no_write_barrier(map);
4000 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4001 src->address() + FixedDoubleArray::kLengthOffset,
4002 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4007 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4010 if (src->is_extended_layout()) {
4011 ConstantPoolArray::NumberOfEntries small(src,
4012 ConstantPoolArray::SMALL_SECTION);
4013 ConstantPoolArray::NumberOfEntries extended(
4014 src, ConstantPoolArray::EXTENDED_SECTION);
4015 AllocationResult allocation =
4016 AllocateExtendedConstantPoolArray(small, extended);
4017 if (!allocation.To(&obj)) return allocation;
4019 ConstantPoolArray::NumberOfEntries small(src,
4020 ConstantPoolArray::SMALL_SECTION);
4021 AllocationResult allocation = AllocateConstantPoolArray(small);
4022 if (!allocation.To(&obj)) return allocation;
4024 obj->set_map_no_write_barrier(map);
4025 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4026 src->address() + ConstantPoolArray::kFirstEntryOffset,
4027 src->size() - ConstantPoolArray::kFirstEntryOffset);
4032 AllocationResult Heap::AllocateRawFixedArray(int length,
4033 PretenureFlag pretenure) {
4034 if (length < 0 || length > FixedArray::kMaxLength) {
4035 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4037 int size = FixedArray::SizeFor(length);
4038 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4040 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4044 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4045 PretenureFlag pretenure,
4047 DCHECK(length >= 0);
4048 DCHECK(empty_fixed_array()->IsFixedArray());
4049 if (length == 0) return empty_fixed_array();
4051 DCHECK(!InNewSpace(filler));
4054 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4055 if (!allocation.To(&result)) return allocation;
4058 result->set_map_no_write_barrier(fixed_array_map());
4059 FixedArray* array = FixedArray::cast(result);
4060 array->set_length(length);
4061 MemsetPointer(array->data_start(), filler, length);
4066 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4067 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4071 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4072 if (length == 0) return empty_fixed_array();
4076 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4077 if (!allocation.To(&obj)) return allocation;
4080 obj->set_map_no_write_barrier(fixed_array_map());
4081 FixedArray::cast(obj)->set_length(length);
4086 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4087 int length, PretenureFlag pretenure) {
4088 if (length == 0) return empty_fixed_array();
4090 HeapObject* elements;
4091 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4092 if (!allocation.To(&elements)) return allocation;
4094 elements->set_map_no_write_barrier(fixed_double_array_map());
4095 FixedDoubleArray::cast(elements)->set_length(length);
4100 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4101 PretenureFlag pretenure) {
4102 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4103 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4105 int size = FixedDoubleArray::SizeFor(length);
4106 #ifndef V8_HOST_ARCH_64_BIT
4107 size += kPointerSize;
4109 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4113 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4114 if (!allocation.To(&object)) return allocation;
4117 return EnsureDoubleAligned(this, object, size);
4121 AllocationResult Heap::AllocateConstantPoolArray(
4122 const ConstantPoolArray::NumberOfEntries& small) {
4123 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4124 int size = ConstantPoolArray::SizeFor(small);
4125 #ifndef V8_HOST_ARCH_64_BIT
4126 size += kPointerSize;
4128 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4132 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4133 if (!allocation.To(&object)) return allocation;
4135 object = EnsureDoubleAligned(this, object, size);
4136 object->set_map_no_write_barrier(constant_pool_array_map());
4138 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4139 constant_pool->Init(small);
4140 constant_pool->ClearPtrEntries(isolate());
4141 return constant_pool;
4145 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4146 const ConstantPoolArray::NumberOfEntries& small,
4147 const ConstantPoolArray::NumberOfEntries& extended) {
4148 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4149 CHECK(extended.are_in_range(0, kMaxInt));
4150 int size = ConstantPoolArray::SizeForExtended(small, extended);
4151 #ifndef V8_HOST_ARCH_64_BIT
4152 size += kPointerSize;
4154 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4158 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4159 if (!allocation.To(&object)) return allocation;
4161 object = EnsureDoubleAligned(this, object, size);
4162 object->set_map_no_write_barrier(constant_pool_array_map());
4164 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4165 constant_pool->InitExtended(small, extended);
4166 constant_pool->ClearPtrEntries(isolate());
4167 return constant_pool;
4171 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4172 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4173 int size = ConstantPoolArray::SizeFor(small);
4176 AllocationResult allocation =
4177 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4178 if (!allocation.To(&result)) return allocation;
4180 result->set_map_no_write_barrier(constant_pool_array_map());
4181 ConstantPoolArray::cast(result)->Init(small);
4186 AllocationResult Heap::AllocateSymbol() {
4187 // Statically ensure that it is safe to allocate symbols in paged spaces.
4188 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4191 AllocationResult allocation =
4192 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4193 if (!allocation.To(&result)) return allocation;
4195 result->set_map_no_write_barrier(symbol_map());
4197 // Generate a random hash value.
4201 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4203 } while (hash == 0 && attempts < 30);
4204 if (hash == 0) hash = 1; // never return 0
4206 Symbol::cast(result)
4207 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4208 Symbol::cast(result)->set_name(undefined_value());
4209 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4211 DCHECK(!Symbol::cast(result)->is_private());
4216 AllocationResult Heap::AllocateStruct(InstanceType type) {
4219 #define MAKE_CASE(NAME, Name, name) \
4221 map = name##_map(); \
4223 STRUCT_LIST(MAKE_CASE)
4229 int size = map->instance_size();
4230 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4233 AllocationResult allocation = Allocate(map, space);
4234 if (!allocation.To(&result)) return allocation;
4236 result->InitializeBody(size);
4241 bool Heap::IsHeapIterable() {
4242 // TODO(hpayer): This function is not correct. Allocation folding in old
4243 // space breaks the iterability.
4244 return (old_pointer_space()->swept_precisely() &&
4245 old_data_space()->swept_precisely() &&
4246 new_space_top_after_last_gc_ == new_space()->top());
4250 void Heap::MakeHeapIterable() {
4251 DCHECK(AllowHeapAllocation::IsAllowed());
4252 if (!IsHeapIterable()) {
4253 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4255 if (mark_compact_collector()->sweeping_in_progress()) {
4256 mark_compact_collector()->EnsureSweepingCompleted();
4258 DCHECK(IsHeapIterable());
4262 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4263 incremental_marking()->Step(step_size,
4264 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4266 if (incremental_marking()->IsComplete()) {
4267 bool uncommit = false;
4268 if (gc_count_at_last_idle_gc_ == gc_count_) {
4269 // No GC since the last full GC, the mutator is probably not active.
4270 isolate_->compilation_cache()->Clear();
4273 CollectAllGarbage(kReduceMemoryFootprintMask,
4274 "idle notification: finalize incremental");
4275 mark_sweeps_since_idle_round_started_++;
4276 gc_count_at_last_idle_gc_ = gc_count_;
4278 new_space_.Shrink();
4279 UncommitFromSpace();
4285 bool Heap::IdleNotification(int hint) {
4286 // If incremental marking is off, we do not perform idle notification.
4287 if (!FLAG_incremental_marking) return true;
4289 // Hints greater than this value indicate that
4290 // the embedder is requesting a lot of GC work.
4291 const int kMaxHint = 1000;
4292 const int kMinHintForIncrementalMarking = 10;
4293 // Minimal hint that allows to do full GC.
4294 const int kMinHintForFullGC = 100;
4295 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
4296 // The size factor is in range [5..250]. The numbers here are chosen from
4297 // experiments. If you changes them, make sure to test with
4298 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
4299 intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
4301 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
4302 HistogramTimerScope idle_notification_scope(
4303 isolate_->counters()->gc_idle_notification());
4305 if (contexts_disposed_ > 0) {
4306 contexts_disposed_ = 0;
4307 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
4308 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
4309 incremental_marking()->IsStopped()) {
4310 HistogramTimerScope scope(isolate_->counters()->gc_context());
4311 CollectAllGarbage(kReduceMemoryFootprintMask,
4312 "idle notification: contexts disposed");
4314 AdvanceIdleIncrementalMarking(step_size);
4317 // After context disposal there is likely a lot of garbage remaining, reset
4318 // the idle notification counters in order to trigger more incremental GCs
4319 // on subsequent idle notifications.
4324 // By doing small chunks of GC work in each IdleNotification,
4325 // perform a round of incremental GCs and after that wait until
4326 // the mutator creates enough garbage to justify a new round.
4327 // An incremental GC progresses as follows:
4328 // 1. many incremental marking steps,
4329 // 2. one old space mark-sweep-compact,
4330 // Use mark-sweep-compact events to count incremental GCs in a round.
4332 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4333 if (EnoughGarbageSinceLastIdleRound()) {
4340 int remaining_mark_sweeps =
4341 kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_;
4343 if (incremental_marking()->IsStopped()) {
4344 // If there are no more than two GCs left in this idle round and we are
4345 // allowed to do a full GC, then make those GCs full in order to compact
4347 // TODO(ulan): Once we enable code compaction for incremental marking,
4348 // we can get rid of this special case and always start incremental marking.
4349 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
4350 CollectAllGarbage(kReduceMemoryFootprintMask,
4351 "idle notification: finalize idle round");
4352 mark_sweeps_since_idle_round_started_++;
4353 } else if (hint > kMinHintForIncrementalMarking) {
4354 incremental_marking()->Start();
4357 if (!incremental_marking()->IsStopped() &&
4358 hint > kMinHintForIncrementalMarking) {
4359 AdvanceIdleIncrementalMarking(step_size);
4362 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4367 // If the IdleNotifcation is called with a large hint we will wait for
4368 // the sweepter threads here.
4369 if (hint >= kMinHintForFullGC &&
4370 mark_compact_collector()->sweeping_in_progress()) {
4371 mark_compact_collector()->EnsureSweepingCompleted();
4380 void Heap::Print() {
4381 if (!HasBeenSetUp()) return;
4382 isolate()->PrintStack(stdout);
4383 AllSpaces spaces(this);
4384 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4390 void Heap::ReportCodeStatistics(const char* title) {
4391 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4392 PagedSpace::ResetCodeStatistics(isolate());
4393 // We do not look for code in new space, map space, or old space. If code
4394 // somehow ends up in those spaces, we would miss it here.
4395 code_space_->CollectCodeStatistics();
4396 lo_space_->CollectCodeStatistics();
4397 PagedSpace::ReportCodeStatistics(isolate());
4401 // This function expects that NewSpace's allocated objects histogram is
4402 // populated (via a call to CollectStatistics or else as a side effect of a
4403 // just-completed scavenge collection).
4404 void Heap::ReportHeapStatistics(const char* title) {
4406 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4408 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4409 old_generation_allocation_limit_);
4412 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4413 isolate_->global_handles()->PrintStats();
4416 PrintF("Heap statistics : ");
4417 isolate_->memory_allocator()->ReportStatistics();
4418 PrintF("To space : ");
4419 new_space_.ReportStatistics();
4420 PrintF("Old pointer space : ");
4421 old_pointer_space_->ReportStatistics();
4422 PrintF("Old data space : ");
4423 old_data_space_->ReportStatistics();
4424 PrintF("Code space : ");
4425 code_space_->ReportStatistics();
4426 PrintF("Map space : ");
4427 map_space_->ReportStatistics();
4428 PrintF("Cell space : ");
4429 cell_space_->ReportStatistics();
4430 PrintF("PropertyCell space : ");
4431 property_cell_space_->ReportStatistics();
4432 PrintF("Large object space : ");
4433 lo_space_->ReportStatistics();
4434 PrintF(">>>>>> ========================================= >>>>>>\n");
4439 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4442 bool Heap::Contains(Address addr) {
4443 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4444 return HasBeenSetUp() &&
4445 (new_space_.ToSpaceContains(addr) ||
4446 old_pointer_space_->Contains(addr) ||
4447 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4448 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4449 property_cell_space_->Contains(addr) ||
4450 lo_space_->SlowContains(addr));
4454 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4455 return InSpace(value->address(), space);
4459 bool Heap::InSpace(Address addr, AllocationSpace space) {
4460 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4461 if (!HasBeenSetUp()) return false;
4465 return new_space_.ToSpaceContains(addr);
4466 case OLD_POINTER_SPACE:
4467 return old_pointer_space_->Contains(addr);
4468 case OLD_DATA_SPACE:
4469 return old_data_space_->Contains(addr);
4471 return code_space_->Contains(addr);
4473 return map_space_->Contains(addr);
4475 return cell_space_->Contains(addr);
4476 case PROPERTY_CELL_SPACE:
4477 return property_cell_space_->Contains(addr);
4479 return lo_space_->SlowContains(addr);
4489 void Heap::Verify() {
4490 CHECK(HasBeenSetUp());
4491 HandleScope scope(isolate());
4493 store_buffer()->Verify();
4495 if (mark_compact_collector()->sweeping_in_progress()) {
4496 // We have to wait here for the sweeper threads to have an iterable heap.
4497 mark_compact_collector()->EnsureSweepingCompleted();
4500 VerifyPointersVisitor visitor;
4501 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4503 VerifySmisVisitor smis_visitor;
4504 IterateSmiRoots(&smis_visitor);
4506 new_space_.Verify();
4508 old_pointer_space_->Verify(&visitor);
4509 map_space_->Verify(&visitor);
4511 VerifyPointersVisitor no_dirty_regions_visitor;
4512 old_data_space_->Verify(&no_dirty_regions_visitor);
4513 code_space_->Verify(&no_dirty_regions_visitor);
4514 cell_space_->Verify(&no_dirty_regions_visitor);
4515 property_cell_space_->Verify(&no_dirty_regions_visitor);
4517 lo_space_->Verify();
4522 void Heap::ZapFromSpace() {
4523 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4524 new_space_.FromSpaceEnd());
4525 while (it.has_next()) {
4526 NewSpacePage* page = it.next();
4527 for (Address cursor = page->area_start(), limit = page->area_end();
4528 cursor < limit; cursor += kPointerSize) {
4529 Memory::Address_at(cursor) = kFromSpaceZapValue;
4535 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4536 ObjectSlotCallback callback) {
4537 Address slot_address = start;
4539 // We are not collecting slots on new space objects during mutation
4540 // thus we have to scan for pointers to evacuation candidates when we
4541 // promote objects. But we should not record any slots in non-black
4542 // objects. Grey object's slots would be rescanned.
4543 // White object might not survive until the end of collection
4544 // it would be a violation of the invariant to record it's slots.
4545 bool record_slots = false;
4546 if (incremental_marking()->IsCompacting()) {
4547 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4548 record_slots = Marking::IsBlack(mark_bit);
4551 while (slot_address < end) {
4552 Object** slot = reinterpret_cast<Object**>(slot_address);
4553 Object* object = *slot;
4554 // If the store buffer becomes overfull we mark pages as being exempt from
4555 // the store buffer. These pages are scanned to find pointers that point
4556 // to the new space. In that case we may hit newly promoted objects and
4557 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4558 if (object->IsHeapObject()) {
4559 if (Heap::InFromSpace(object)) {
4560 callback(reinterpret_cast<HeapObject**>(slot),
4561 HeapObject::cast(object));
4562 Object* new_object = *slot;
4563 if (InNewSpace(new_object)) {
4564 SLOW_DCHECK(Heap::InToSpace(new_object));
4565 SLOW_DCHECK(new_object->IsHeapObject());
4566 store_buffer_.EnterDirectlyIntoStoreBuffer(
4567 reinterpret_cast<Address>(slot));
4569 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4570 } else if (record_slots &&
4571 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4572 mark_compact_collector()->RecordSlot(slot, slot, object);
4575 slot_address += kPointerSize;
4581 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4584 bool IsAMapPointerAddress(Object** addr) {
4585 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4586 int mod = a % Map::kSize;
4587 return mod >= Map::kPointerFieldsBeginOffset &&
4588 mod < Map::kPointerFieldsEndOffset;
4592 bool EverythingsAPointer(Object** addr) { return true; }
4595 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4596 Object**** store_buffer_position,
4597 Object*** store_buffer_top,
4598 CheckStoreBufferFilter filter,
4599 Address special_garbage_start,
4600 Address special_garbage_end) {
4601 Map* free_space_map = heap->free_space_map();
4602 for (; current < limit; current++) {
4603 Object* o = *current;
4604 Address current_address = reinterpret_cast<Address>(current);
4606 if (o == free_space_map) {
4607 Address current_address = reinterpret_cast<Address>(current);
4608 FreeSpace* free_space =
4609 FreeSpace::cast(HeapObject::FromAddress(current_address));
4610 int skip = free_space->Size();
4611 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4613 current_address += skip - kPointerSize;
4614 current = reinterpret_cast<Object**>(current_address);
4617 // Skip the current linear allocation space between top and limit which is
4618 // unmarked with the free space map, but can contain junk.
4619 if (current_address == special_garbage_start &&
4620 special_garbage_end != special_garbage_start) {
4621 current_address = special_garbage_end - kPointerSize;
4622 current = reinterpret_cast<Object**>(current_address);
4625 if (!(*filter)(current)) continue;
4626 DCHECK(current_address < special_garbage_start ||
4627 current_address >= special_garbage_end);
4628 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4629 // We have to check that the pointer does not point into new space
4630 // without trying to cast it to a heap object since the hash field of
4631 // a string can contain values like 1 and 3 which are tagged null
4633 if (!heap->InNewSpace(o)) continue;
4634 while (**store_buffer_position < current &&
4635 *store_buffer_position < store_buffer_top) {
4636 (*store_buffer_position)++;
4638 if (**store_buffer_position != current ||
4639 *store_buffer_position == store_buffer_top) {
4640 Object** obj_start = current;
4641 while (!(*obj_start)->IsMap()) obj_start--;
4648 // Check that the store buffer contains all intergenerational pointers by
4649 // scanning a page and ensuring that all pointers to young space are in the
4651 void Heap::OldPointerSpaceCheckStoreBuffer() {
4652 OldSpace* space = old_pointer_space();
4653 PageIterator pages(space);
4655 store_buffer()->SortUniq();
4657 while (pages.has_next()) {
4658 Page* page = pages.next();
4659 Object** current = reinterpret_cast<Object**>(page->area_start());
4661 Address end = page->area_end();
4663 Object*** store_buffer_position = store_buffer()->Start();
4664 Object*** store_buffer_top = store_buffer()->Top();
4666 Object** limit = reinterpret_cast<Object**>(end);
4667 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4668 store_buffer_top, &EverythingsAPointer, space->top(),
4674 void Heap::MapSpaceCheckStoreBuffer() {
4675 MapSpace* space = map_space();
4676 PageIterator pages(space);
4678 store_buffer()->SortUniq();
4680 while (pages.has_next()) {
4681 Page* page = pages.next();
4682 Object** current = reinterpret_cast<Object**>(page->area_start());
4684 Address end = page->area_end();
4686 Object*** store_buffer_position = store_buffer()->Start();
4687 Object*** store_buffer_top = store_buffer()->Top();
4689 Object** limit = reinterpret_cast<Object**>(end);
4690 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4691 store_buffer_top, &IsAMapPointerAddress, space->top(),
4697 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4698 LargeObjectIterator it(lo_space());
4699 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4700 // We only have code, sequential strings, or fixed arrays in large
4701 // object space, and only fixed arrays can possibly contain pointers to
4702 // the young generation.
4703 if (object->IsFixedArray()) {
4704 Object*** store_buffer_position = store_buffer()->Start();
4705 Object*** store_buffer_top = store_buffer()->Top();
4706 Object** current = reinterpret_cast<Object**>(object->address());
4708 reinterpret_cast<Object**>(object->address() + object->Size());
4709 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4710 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4717 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4718 IterateStrongRoots(v, mode);
4719 IterateWeakRoots(v, mode);
4723 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4724 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4725 v->Synchronize(VisitorSynchronization::kStringTable);
4726 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4727 // Scavenge collections have special processing for this.
4728 external_string_table_.Iterate(v);
4730 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4734 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4735 // Acquire execution access since we are going to read stack limit values.
4736 ExecutionAccess access(isolate());
4737 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4738 v->Synchronize(VisitorSynchronization::kSmiRootList);
4742 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4743 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4744 v->Synchronize(VisitorSynchronization::kStrongRootList);
4746 v->VisitPointer(BitCast<Object**>(&hidden_string_));
4747 v->Synchronize(VisitorSynchronization::kInternalizedString);
4749 isolate_->bootstrapper()->Iterate(v);
4750 v->Synchronize(VisitorSynchronization::kBootstrapper);
4751 isolate_->Iterate(v);
4752 v->Synchronize(VisitorSynchronization::kTop);
4753 Relocatable::Iterate(isolate_, v);
4754 v->Synchronize(VisitorSynchronization::kRelocatable);
4756 if (isolate_->deoptimizer_data() != NULL) {
4757 isolate_->deoptimizer_data()->Iterate(v);
4759 v->Synchronize(VisitorSynchronization::kDebug);
4760 isolate_->compilation_cache()->Iterate(v);
4761 v->Synchronize(VisitorSynchronization::kCompilationCache);
4763 // Iterate over local handles in handle scopes.
4764 isolate_->handle_scope_implementer()->Iterate(v);
4765 isolate_->IterateDeferredHandles(v);
4766 v->Synchronize(VisitorSynchronization::kHandleScope);
4768 // Iterate over the builtin code objects and code stubs in the
4769 // heap. Note that it is not necessary to iterate over code objects
4770 // on scavenge collections.
4771 if (mode != VISIT_ALL_IN_SCAVENGE) {
4772 isolate_->builtins()->IterateBuiltins(v);
4774 v->Synchronize(VisitorSynchronization::kBuiltins);
4776 // Iterate over global handles.
4778 case VISIT_ONLY_STRONG:
4779 isolate_->global_handles()->IterateStrongRoots(v);
4781 case VISIT_ALL_IN_SCAVENGE:
4782 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4784 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4786 isolate_->global_handles()->IterateAllRoots(v);
4789 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4791 // Iterate over eternal handles.
4792 if (mode == VISIT_ALL_IN_SCAVENGE) {
4793 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4795 isolate_->eternal_handles()->IterateAllRoots(v);
4797 v->Synchronize(VisitorSynchronization::kEternalHandles);
4799 // Iterate over pointers being held by inactive threads.
4800 isolate_->thread_manager()->Iterate(v);
4801 v->Synchronize(VisitorSynchronization::kThreadManager);
4803 // Iterate over the pointers the Serialization/Deserialization code is
4805 // During garbage collection this keeps the partial snapshot cache alive.
4806 // During deserialization of the startup snapshot this creates the partial
4807 // snapshot cache and deserializes the objects it refers to. During
4808 // serialization this does nothing, since the partial snapshot cache is
4809 // empty. However the next thing we do is create the partial snapshot,
4810 // filling up the partial snapshot cache with objects it needs as we go.
4811 SerializerDeserializer::Iterate(isolate_, v);
4812 // We don't do a v->Synchronize call here, because in debug mode that will
4813 // output a flag to the snapshot. However at this point the serializer and
4814 // deserializer are deliberately a little unsynchronized (see above) so the
4815 // checking of the sync flag in the snapshot would fail.
4819 // TODO(1236194): Since the heap size is configurable on the command line
4820 // and through the API, we should gracefully handle the case that the heap
4821 // size is not big enough to fit all the initial objects.
4822 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4823 int max_executable_size, size_t code_range_size) {
4824 if (HasBeenSetUp()) return false;
4826 // Overwrite default configuration.
4827 if (max_semi_space_size > 0) {
4828 max_semi_space_size_ = max_semi_space_size * MB;
4830 if (max_old_space_size > 0) {
4831 max_old_generation_size_ = max_old_space_size * MB;
4833 if (max_executable_size > 0) {
4834 max_executable_size_ = max_executable_size * MB;
4837 // If max space size flags are specified overwrite the configuration.
4838 if (FLAG_max_semi_space_size > 0) {
4839 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4841 if (FLAG_max_old_space_size > 0) {
4842 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4844 if (FLAG_max_executable_size > 0) {
4845 max_executable_size_ = FLAG_max_executable_size * MB;
4848 if (FLAG_stress_compaction) {
4849 // This will cause more frequent GCs when stressing.
4850 max_semi_space_size_ = Page::kPageSize;
4853 if (Snapshot::HaveASnapshotToStartFrom()) {
4854 // If we are using a snapshot we always reserve the default amount
4855 // of memory for each semispace because code in the snapshot has
4856 // write-barrier code that relies on the size and alignment of new
4857 // space. We therefore cannot use a larger max semispace size
4858 // than the default reserved semispace size.
4859 if (max_semi_space_size_ > reserved_semispace_size_) {
4860 max_semi_space_size_ = reserved_semispace_size_;
4861 if (FLAG_trace_gc) {
4862 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4863 reserved_semispace_size_ >> 10);
4867 // If we are not using snapshots we reserve space for the actual
4868 // max semispace size.
4869 reserved_semispace_size_ = max_semi_space_size_;
4872 // The max executable size must be less than or equal to the max old
4874 if (max_executable_size_ > max_old_generation_size_) {
4875 max_executable_size_ = max_old_generation_size_;
4878 // The new space size must be a power of two to support single-bit testing
4880 max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
4881 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4883 if (FLAG_min_semi_space_size > 0) {
4884 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4885 if (initial_semispace_size > max_semi_space_size_) {
4886 initial_semispace_size_ = max_semi_space_size_;
4887 if (FLAG_trace_gc) {
4889 "Min semi-space size cannot be more than the maximum"
4890 "semi-space size of %d MB\n",
4891 max_semi_space_size_);
4894 initial_semispace_size_ = initial_semispace_size;
4898 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4900 // The old generation is paged and needs at least one page for each space.
4901 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4902 max_old_generation_size_ =
4903 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4904 max_old_generation_size_);
4906 // We rely on being able to allocate new arrays in paged spaces.
4907 DCHECK(Page::kMaxRegularHeapObjectSize >=
4909 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4910 AllocationMemento::kSize));
4912 code_range_size_ = code_range_size * MB;
4919 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4922 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4923 *stats->start_marker = HeapStats::kStartMarker;
4924 *stats->end_marker = HeapStats::kEndMarker;
4925 *stats->new_space_size = new_space_.SizeAsInt();
4926 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4927 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4928 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4929 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4930 *stats->old_data_space_capacity = old_data_space_->Capacity();
4931 *stats->code_space_size = code_space_->SizeOfObjects();
4932 *stats->code_space_capacity = code_space_->Capacity();
4933 *stats->map_space_size = map_space_->SizeOfObjects();
4934 *stats->map_space_capacity = map_space_->Capacity();
4935 *stats->cell_space_size = cell_space_->SizeOfObjects();
4936 *stats->cell_space_capacity = cell_space_->Capacity();
4937 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4938 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4939 *stats->lo_space_size = lo_space_->Size();
4940 isolate_->global_handles()->RecordStats(stats);
4941 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4942 *stats->memory_allocator_capacity =
4943 isolate()->memory_allocator()->Size() +
4944 isolate()->memory_allocator()->Available();
4945 *stats->os_error = base::OS::GetLastError();
4946 isolate()->memory_allocator()->Available();
4947 if (take_snapshot) {
4948 HeapIterator iterator(this);
4949 for (HeapObject* obj = iterator.next(); obj != NULL;
4950 obj = iterator.next()) {
4951 InstanceType type = obj->map()->instance_type();
4952 DCHECK(0 <= type && type <= LAST_TYPE);
4953 stats->objects_per_type[type]++;
4954 stats->size_per_type[type] += obj->Size();
4960 intptr_t Heap::PromotedSpaceSizeOfObjects() {
4961 return old_pointer_space_->SizeOfObjects() +
4962 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
4963 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
4964 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
4968 int64_t Heap::PromotedExternalMemorySize() {
4969 if (amount_of_external_allocated_memory_ <=
4970 amount_of_external_allocated_memory_at_last_global_gc_)
4972 return amount_of_external_allocated_memory_ -
4973 amount_of_external_allocated_memory_at_last_global_gc_;
4977 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
4978 int freed_global_handles) {
4979 const int kMaxHandles = 1000;
4980 const int kMinHandles = 100;
4981 double min_factor = 1.1;
4982 double max_factor = 4;
4983 // We set the old generation growing factor to 2 to grow the heap slower on
4984 // memory-constrained devices.
4985 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
4988 // If there are many freed global handles, then the next full GC will
4989 // likely collect a lot of garbage. Choose the heap growing factor
4990 // depending on freed global handles.
4991 // TODO(ulan, hpayer): Take into account mutator utilization.
4993 if (freed_global_handles <= kMinHandles) {
4994 factor = max_factor;
4995 } else if (freed_global_handles >= kMaxHandles) {
4996 factor = min_factor;
4998 // Compute factor using linear interpolation between points
4999 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5000 factor = max_factor -
5001 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5002 (kMaxHandles - kMinHandles);
5005 if (FLAG_stress_compaction ||
5006 mark_compact_collector()->reduce_memory_footprint_) {
5007 factor = min_factor;
5010 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5011 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5012 limit += new_space_.Capacity();
5013 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5014 return Min(limit, halfway_to_the_max);
5018 void Heap::EnableInlineAllocation() {
5019 if (!inline_allocation_disabled_) return;
5020 inline_allocation_disabled_ = false;
5022 // Update inline allocation limit for new space.
5023 new_space()->UpdateInlineAllocationLimit(0);
5027 void Heap::DisableInlineAllocation() {
5028 if (inline_allocation_disabled_) return;
5029 inline_allocation_disabled_ = true;
5031 // Update inline allocation limit for new space.
5032 new_space()->UpdateInlineAllocationLimit(0);
5034 // Update inline allocation limit for old spaces.
5035 PagedSpaces spaces(this);
5036 for (PagedSpace* space = spaces.next(); space != NULL;
5037 space = spaces.next()) {
5038 space->EmptyAllocationInfo();
5043 V8_DECLARE_ONCE(initialize_gc_once);
5045 static void InitializeGCOnce() {
5046 InitializeScavengingVisitorsTables();
5047 NewSpaceScavenger::Initialize();
5048 MarkCompactCollector::Initialize();
5052 bool Heap::SetUp() {
5054 allocation_timeout_ = FLAG_gc_interval;
5057 // Initialize heap spaces and initial maps and objects. Whenever something
5058 // goes wrong, just return false. The caller should check the results and
5059 // call Heap::TearDown() to release allocated memory.
5061 // If the heap is not yet configured (e.g. through the API), configure it.
5062 // Configuration is based on the flags new-space-size (really the semispace
5063 // size) and old-space-size if set or the initial values of semispace_size_
5064 // and old_generation_size_ otherwise.
5066 if (!ConfigureHeapDefault()) return false;
5069 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5071 MarkMapPointersAsEncoded(false);
5073 // Set up memory allocator.
5074 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5077 // Set up new space.
5078 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5081 new_space_top_after_last_gc_ = new_space()->top();
5083 // Initialize old pointer space.
5084 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5085 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5086 if (old_pointer_space_ == NULL) return false;
5087 if (!old_pointer_space_->SetUp()) return false;
5089 // Initialize old data space.
5090 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5092 if (old_data_space_ == NULL) return false;
5093 if (!old_data_space_->SetUp()) return false;
5095 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5097 // Initialize the code space, set its maximum capacity to the old
5098 // generation size. It needs executable memory.
5100 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5101 if (code_space_ == NULL) return false;
5102 if (!code_space_->SetUp()) return false;
5104 // Initialize map space.
5105 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5106 if (map_space_ == NULL) return false;
5107 if (!map_space_->SetUp()) return false;
5109 // Initialize simple cell space.
5110 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5111 if (cell_space_ == NULL) return false;
5112 if (!cell_space_->SetUp()) return false;
5114 // Initialize global property cell space.
5115 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5116 PROPERTY_CELL_SPACE);
5117 if (property_cell_space_ == NULL) return false;
5118 if (!property_cell_space_->SetUp()) return false;
5120 // The large object code space may contain code or data. We set the memory
5121 // to be non-executable here for safety, but this means we need to enable it
5122 // explicitly when allocating large code objects.
5123 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5124 if (lo_space_ == NULL) return false;
5125 if (!lo_space_->SetUp()) return false;
5127 // Set up the seed that is used to randomize the string hash function.
5128 DCHECK(hash_seed() == 0);
5129 if (FLAG_randomize_hashes) {
5130 if (FLAG_hash_seed == 0) {
5131 int rnd = isolate()->random_number_generator()->NextInt();
5132 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5134 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5138 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5139 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5141 store_buffer()->SetUp();
5143 mark_compact_collector()->SetUp();
5149 bool Heap::CreateHeapObjects() {
5150 // Create initial maps.
5151 if (!CreateInitialMaps()) return false;
5154 // Create initial objects
5155 CreateInitialObjects();
5156 CHECK_EQ(0, gc_count_);
5158 set_native_contexts_list(undefined_value());
5159 set_array_buffers_list(undefined_value());
5160 set_allocation_sites_list(undefined_value());
5161 weak_object_to_code_table_ = undefined_value();
5166 void Heap::SetStackLimits() {
5167 DCHECK(isolate_ != NULL);
5168 DCHECK(isolate_ == isolate());
5169 // On 64 bit machines, pointers are generally out of range of Smis. We write
5170 // something that looks like an out of range Smi to the GC.
5172 // Set up the special root array entries containing the stack limits.
5173 // These are actually addresses, but the tag makes the GC ignore it.
5174 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5175 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5176 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5177 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5181 void Heap::TearDown() {
5183 if (FLAG_verify_heap) {
5188 UpdateMaximumCommitted();
5190 if (FLAG_print_cumulative_gc_stat) {
5192 PrintF("gc_count=%d ", gc_count_);
5193 PrintF("mark_sweep_count=%d ", ms_count_);
5194 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5195 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5196 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5197 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5198 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5199 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5203 if (FLAG_print_max_heap_committed) {
5205 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5206 MaximumCommittedMemory());
5207 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5208 new_space_.MaximumCommittedMemory());
5209 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5210 old_data_space_->MaximumCommittedMemory());
5211 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5212 old_pointer_space_->MaximumCommittedMemory());
5213 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5214 old_pointer_space_->MaximumCommittedMemory());
5215 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5216 code_space_->MaximumCommittedMemory());
5217 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5218 map_space_->MaximumCommittedMemory());
5219 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5220 cell_space_->MaximumCommittedMemory());
5221 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5222 property_cell_space_->MaximumCommittedMemory());
5223 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5224 lo_space_->MaximumCommittedMemory());
5228 if (FLAG_verify_predictable) {
5229 PrintAlloctionsHash();
5232 TearDownArrayBuffers();
5234 isolate_->global_handles()->TearDown();
5236 external_string_table_.TearDown();
5238 mark_compact_collector()->TearDown();
5240 new_space_.TearDown();
5242 if (old_pointer_space_ != NULL) {
5243 old_pointer_space_->TearDown();
5244 delete old_pointer_space_;
5245 old_pointer_space_ = NULL;
5248 if (old_data_space_ != NULL) {
5249 old_data_space_->TearDown();
5250 delete old_data_space_;
5251 old_data_space_ = NULL;
5254 if (code_space_ != NULL) {
5255 code_space_->TearDown();
5260 if (map_space_ != NULL) {
5261 map_space_->TearDown();
5266 if (cell_space_ != NULL) {
5267 cell_space_->TearDown();
5272 if (property_cell_space_ != NULL) {
5273 property_cell_space_->TearDown();
5274 delete property_cell_space_;
5275 property_cell_space_ = NULL;
5278 if (lo_space_ != NULL) {
5279 lo_space_->TearDown();
5284 store_buffer()->TearDown();
5285 incremental_marking()->TearDown();
5287 isolate_->memory_allocator()->TearDown();
5291 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5292 GCType gc_type, bool pass_isolate) {
5293 DCHECK(callback != NULL);
5294 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5295 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5296 return gc_prologue_callbacks_.Add(pair);
5300 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5301 DCHECK(callback != NULL);
5302 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5303 if (gc_prologue_callbacks_[i].callback == callback) {
5304 gc_prologue_callbacks_.Remove(i);
5312 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5313 GCType gc_type, bool pass_isolate) {
5314 DCHECK(callback != NULL);
5315 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5316 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5317 return gc_epilogue_callbacks_.Add(pair);
5321 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5322 DCHECK(callback != NULL);
5323 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5324 if (gc_epilogue_callbacks_[i].callback == callback) {
5325 gc_epilogue_callbacks_.Remove(i);
5333 // TODO(ishell): Find a better place for this.
5334 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5335 Handle<DependentCode> dep) {
5336 DCHECK(!InNewSpace(*obj));
5337 DCHECK(!InNewSpace(*dep));
5338 // This handle scope keeps the table handle local to this function, which
5339 // allows us to safely skip write barriers in table update operations.
5340 HandleScope scope(isolate());
5341 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5343 table = WeakHashTable::Put(table, obj, dep);
5345 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5346 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5348 set_weak_object_to_code_table(*table);
5349 DCHECK_EQ(*dep, table->Lookup(obj));
5353 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5354 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5355 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5356 return DependentCode::cast(empty_fixed_array());
5360 void Heap::EnsureWeakObjectToCodeTable() {
5361 if (!weak_object_to_code_table()->IsHashTable()) {
5362 set_weak_object_to_code_table(
5363 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5369 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5370 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5375 class PrintHandleVisitor : public ObjectVisitor {
5377 void VisitPointers(Object** start, Object** end) {
5378 for (Object** p = start; p < end; p++)
5379 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5380 reinterpret_cast<void*>(*p));
5385 void Heap::PrintHandles() {
5386 PrintF("Handles:\n");
5387 PrintHandleVisitor v;
5388 isolate_->handle_scope_implementer()->Iterate(&v);
5394 Space* AllSpaces::next() {
5395 switch (counter_++) {
5397 return heap_->new_space();
5398 case OLD_POINTER_SPACE:
5399 return heap_->old_pointer_space();
5400 case OLD_DATA_SPACE:
5401 return heap_->old_data_space();
5403 return heap_->code_space();
5405 return heap_->map_space();
5407 return heap_->cell_space();
5408 case PROPERTY_CELL_SPACE:
5409 return heap_->property_cell_space();
5411 return heap_->lo_space();
5418 PagedSpace* PagedSpaces::next() {
5419 switch (counter_++) {
5420 case OLD_POINTER_SPACE:
5421 return heap_->old_pointer_space();
5422 case OLD_DATA_SPACE:
5423 return heap_->old_data_space();
5425 return heap_->code_space();
5427 return heap_->map_space();
5429 return heap_->cell_space();
5430 case PROPERTY_CELL_SPACE:
5431 return heap_->property_cell_space();
5438 OldSpace* OldSpaces::next() {
5439 switch (counter_++) {
5440 case OLD_POINTER_SPACE:
5441 return heap_->old_pointer_space();
5442 case OLD_DATA_SPACE:
5443 return heap_->old_data_space();
5445 return heap_->code_space();
5452 SpaceIterator::SpaceIterator(Heap* heap)
5454 current_space_(FIRST_SPACE),
5459 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5461 current_space_(FIRST_SPACE),
5463 size_func_(size_func) {}
5466 SpaceIterator::~SpaceIterator() {
5467 // Delete active iterator if any.
5472 bool SpaceIterator::has_next() {
5473 // Iterate until no more spaces.
5474 return current_space_ != LAST_SPACE;
5478 ObjectIterator* SpaceIterator::next() {
5479 if (iterator_ != NULL) {
5482 // Move to the next space
5484 if (current_space_ > LAST_SPACE) {
5489 // Return iterator for the new current space.
5490 return CreateIterator();
5494 // Create an iterator for the space to iterate.
5495 ObjectIterator* SpaceIterator::CreateIterator() {
5496 DCHECK(iterator_ == NULL);
5498 switch (current_space_) {
5500 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5502 case OLD_POINTER_SPACE:
5504 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5506 case OLD_DATA_SPACE:
5507 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5510 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5513 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5516 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5518 case PROPERTY_CELL_SPACE:
5520 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5523 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5527 // Return the newly allocated iterator;
5528 DCHECK(iterator_ != NULL);
5533 class HeapObjectsFilter {
5535 virtual ~HeapObjectsFilter() {}
5536 virtual bool SkipObject(HeapObject* object) = 0;
5540 class UnreachableObjectsFilter : public HeapObjectsFilter {
5542 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5543 MarkReachableObjects();
5546 ~UnreachableObjectsFilter() {
5547 heap_->mark_compact_collector()->ClearMarkbits();
5550 bool SkipObject(HeapObject* object) {
5551 MarkBit mark_bit = Marking::MarkBitFrom(object);
5552 return !mark_bit.Get();
5556 class MarkingVisitor : public ObjectVisitor {
5558 MarkingVisitor() : marking_stack_(10) {}
5560 void VisitPointers(Object** start, Object** end) {
5561 for (Object** p = start; p < end; p++) {
5562 if (!(*p)->IsHeapObject()) continue;
5563 HeapObject* obj = HeapObject::cast(*p);
5564 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5565 if (!mark_bit.Get()) {
5567 marking_stack_.Add(obj);
5572 void TransitiveClosure() {
5573 while (!marking_stack_.is_empty()) {
5574 HeapObject* obj = marking_stack_.RemoveLast();
5580 List<HeapObject*> marking_stack_;
5583 void MarkReachableObjects() {
5584 MarkingVisitor visitor;
5585 heap_->IterateRoots(&visitor, VISIT_ALL);
5586 visitor.TransitiveClosure();
5590 DisallowHeapAllocation no_allocation_;
5594 HeapIterator::HeapIterator(Heap* heap)
5595 : make_heap_iterable_helper_(heap),
5596 no_heap_allocation_(),
5598 filtering_(HeapIterator::kNoFiltering),
5604 HeapIterator::HeapIterator(Heap* heap,
5605 HeapIterator::HeapObjectsFiltering filtering)
5606 : make_heap_iterable_helper_(heap),
5607 no_heap_allocation_(),
5609 filtering_(filtering),
5615 HeapIterator::~HeapIterator() { Shutdown(); }
5618 void HeapIterator::Init() {
5619 // Start the iteration.
5620 space_iterator_ = new SpaceIterator(heap_);
5621 switch (filtering_) {
5622 case kFilterUnreachable:
5623 filter_ = new UnreachableObjectsFilter(heap_);
5628 object_iterator_ = space_iterator_->next();
5632 void HeapIterator::Shutdown() {
5634 // Assert that in filtering mode we have iterated through all
5635 // objects. Otherwise, heap will be left in an inconsistent state.
5636 if (filtering_ != kNoFiltering) {
5637 DCHECK(object_iterator_ == NULL);
5640 // Make sure the last iterator is deallocated.
5641 delete space_iterator_;
5642 space_iterator_ = NULL;
5643 object_iterator_ = NULL;
5649 HeapObject* HeapIterator::next() {
5650 if (filter_ == NULL) return NextObject();
5652 HeapObject* obj = NextObject();
5653 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5658 HeapObject* HeapIterator::NextObject() {
5659 // No iterator means we are done.
5660 if (object_iterator_ == NULL) return NULL;
5662 if (HeapObject* obj = object_iterator_->next_object()) {
5663 // If the current iterator has more objects we are fine.
5666 // Go though the spaces looking for one that has objects.
5667 while (space_iterator_->has_next()) {
5668 object_iterator_ = space_iterator_->next();
5669 if (HeapObject* obj = object_iterator_->next_object()) {
5674 // Done with the last space.
5675 object_iterator_ = NULL;
5680 void HeapIterator::reset() {
5681 // Restart the iterator.
5689 Object* const PathTracer::kAnyGlobalObject = NULL;
5691 class PathTracer::MarkVisitor : public ObjectVisitor {
5693 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5694 void VisitPointers(Object** start, Object** end) {
5695 // Scan all HeapObject pointers in [start, end)
5696 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5697 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5702 PathTracer* tracer_;
5706 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5708 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5709 void VisitPointers(Object** start, Object** end) {
5710 // Scan all HeapObject pointers in [start, end)
5711 for (Object** p = start; p < end; p++) {
5712 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5717 PathTracer* tracer_;
5721 void PathTracer::VisitPointers(Object** start, Object** end) {
5722 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5723 // Visit all HeapObject pointers in [start, end)
5724 for (Object** p = start; !done && (p < end); p++) {
5725 if ((*p)->IsHeapObject()) {
5727 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5733 void PathTracer::Reset() {
5734 found_target_ = false;
5735 object_stack_.Clear();
5739 void PathTracer::TracePathFrom(Object** root) {
5740 DCHECK((search_target_ == kAnyGlobalObject) ||
5741 search_target_->IsHeapObject());
5742 found_target_in_trace_ = false;
5745 MarkVisitor mark_visitor(this);
5746 MarkRecursively(root, &mark_visitor);
5748 UnmarkVisitor unmark_visitor(this);
5749 UnmarkRecursively(root, &unmark_visitor);
5755 static bool SafeIsNativeContext(HeapObject* obj) {
5756 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5760 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5761 if (!(*p)->IsHeapObject()) return;
5763 HeapObject* obj = HeapObject::cast(*p);
5765 MapWord map_word = obj->map_word();
5766 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5768 if (found_target_in_trace_) return; // stop if target found
5769 object_stack_.Add(obj);
5770 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5771 (obj == search_target_)) {
5772 found_target_in_trace_ = true;
5773 found_target_ = true;
5777 bool is_native_context = SafeIsNativeContext(obj);
5780 Map* map = Map::cast(map_word.ToMap());
5782 MapWord marked_map_word =
5783 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5784 obj->set_map_word(marked_map_word);
5786 // Scan the object body.
5787 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5788 // This is specialized to scan Context's properly.
5790 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5792 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5793 Context::FIRST_WEAK_SLOT * kPointerSize);
5794 mark_visitor->VisitPointers(start, end);
5796 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5799 // Scan the map after the body because the body is a lot more interesting
5800 // when doing leak detection.
5801 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5803 if (!found_target_in_trace_) { // don't pop if found the target
5804 object_stack_.RemoveLast();
5809 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5810 if (!(*p)->IsHeapObject()) return;
5812 HeapObject* obj = HeapObject::cast(*p);
5814 MapWord map_word = obj->map_word();
5815 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5817 MapWord unmarked_map_word =
5818 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5819 obj->set_map_word(unmarked_map_word);
5821 Map* map = Map::cast(unmarked_map_word.ToMap());
5823 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5825 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5829 void PathTracer::ProcessResults() {
5830 if (found_target_) {
5831 OFStream os(stdout);
5832 os << "=====================================\n"
5833 << "==== Path to object ====\n"
5834 << "=====================================\n\n";
5836 DCHECK(!object_stack_.is_empty());
5837 for (int i = 0; i < object_stack_.length(); i++) {
5838 if (i > 0) os << "\n |\n |\n V\n\n";
5839 object_stack_[i]->Print(os);
5841 os << "=====================================\n";
5846 // Triggers a depth-first traversal of reachable objects from one
5847 // given root object and finds a path to a specific heap object and
5849 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5850 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5851 tracer.VisitPointer(&root);
5855 // Triggers a depth-first traversal of reachable objects from roots
5856 // and finds a path to a specific heap object and prints it.
5857 void Heap::TracePathToObject(Object* target) {
5858 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5859 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5863 // Triggers a depth-first traversal of reachable objects from roots
5864 // and finds a path to any global object and prints it. Useful for
5865 // determining the source for leaks of global objects.
5866 void Heap::TracePathToGlobal() {
5867 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5869 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5874 void Heap::UpdateCumulativeGCStatistics(double duration,
5875 double spent_in_mutator,
5876 double marking_time) {
5877 if (FLAG_print_cumulative_gc_stat) {
5878 total_gc_time_ms_ += duration;
5879 max_gc_pause_ = Max(max_gc_pause_, duration);
5880 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5881 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5882 } else if (FLAG_trace_gc_verbose) {
5883 total_gc_time_ms_ += duration;
5886 marking_time_ += marking_time;
5890 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5891 DisallowHeapAllocation no_gc;
5892 // Uses only lower 32 bits if pointers are larger.
5893 uintptr_t addr_hash =
5894 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5895 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5899 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5900 DisallowHeapAllocation no_gc;
5901 int index = (Hash(map, name) & kHashMask);
5902 for (int i = 0; i < kEntriesPerBucket; i++) {
5903 Key& key = keys_[index + i];
5904 if ((key.map == *map) && key.name->Equals(*name)) {
5905 return field_offsets_[index + i];
5912 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5914 DisallowHeapAllocation no_gc;
5915 if (!name->IsUniqueName()) {
5916 if (!StringTable::InternalizeStringIfExists(
5917 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5921 // This cache is cleared only between mark compact passes, so we expect the
5922 // cache to only contain old space names.
5923 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5925 int index = (Hash(map, name) & kHashMask);
5926 // After a GC there will be free slots, so we use them in order (this may
5927 // help to get the most frequently used one in position 0).
5928 for (int i = 0; i < kEntriesPerBucket; i++) {
5929 Key& key = keys_[index];
5930 Object* free_entry_indicator = NULL;
5931 if (key.map == free_entry_indicator) {
5934 field_offsets_[index + i] = field_offset;
5938 // No free entry found in this bucket, so we move them all down one and
5939 // put the new entry at position zero.
5940 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5941 Key& key = keys_[index + i];
5942 Key& key2 = keys_[index + i - 1];
5944 field_offsets_[index + i] = field_offsets_[index + i - 1];
5947 // Write the new first entry.
5948 Key& key = keys_[index];
5951 field_offsets_[index] = field_offset;
5955 void KeyedLookupCache::Clear() {
5956 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5960 void DescriptorLookupCache::Clear() {
5961 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
5965 void ExternalStringTable::CleanUp() {
5967 for (int i = 0; i < new_space_strings_.length(); ++i) {
5968 if (new_space_strings_[i] == heap_->the_hole_value()) {
5971 DCHECK(new_space_strings_[i]->IsExternalString());
5972 if (heap_->InNewSpace(new_space_strings_[i])) {
5973 new_space_strings_[last++] = new_space_strings_[i];
5975 old_space_strings_.Add(new_space_strings_[i]);
5978 new_space_strings_.Rewind(last);
5979 new_space_strings_.Trim();
5982 for (int i = 0; i < old_space_strings_.length(); ++i) {
5983 if (old_space_strings_[i] == heap_->the_hole_value()) {
5986 DCHECK(old_space_strings_[i]->IsExternalString());
5987 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
5988 old_space_strings_[last++] = old_space_strings_[i];
5990 old_space_strings_.Rewind(last);
5991 old_space_strings_.Trim();
5993 if (FLAG_verify_heap) {
6000 void ExternalStringTable::TearDown() {
6001 for (int i = 0; i < new_space_strings_.length(); ++i) {
6002 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6004 new_space_strings_.Free();
6005 for (int i = 0; i < old_space_strings_.length(); ++i) {
6006 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6008 old_space_strings_.Free();
6012 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6013 chunk->set_next_chunk(chunks_queued_for_free_);
6014 chunks_queued_for_free_ = chunk;
6018 void Heap::FreeQueuedChunks() {
6019 if (chunks_queued_for_free_ == NULL) return;
6022 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6023 next = chunk->next_chunk();
6024 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6026 if (chunk->owner()->identity() == LO_SPACE) {
6027 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6028 // If FromAnyPointerAddress encounters a slot that belongs to a large
6029 // chunk queued for deletion it will fail to find the chunk because
6030 // it try to perform a search in the list of pages owned by of the large
6031 // object space and queued chunks were detached from that list.
6032 // To work around this we split large chunk into normal kPageSize aligned
6033 // pieces and initialize size, owner and flags field of every piece.
6034 // If FromAnyPointerAddress encounters a slot that belongs to one of
6035 // these smaller pieces it will treat it as a slot on a normal Page.
6036 Address chunk_end = chunk->address() + chunk->size();
6037 MemoryChunk* inner =
6038 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6039 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6040 while (inner <= inner_last) {
6041 // Size of a large chunk is always a multiple of
6042 // OS::AllocateAlignment() so there is always
6043 // enough space for a fake MemoryChunk header.
6044 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6045 // Guard against overflow.
6046 if (area_end < inner->address()) area_end = chunk_end;
6047 inner->SetArea(inner->address(), area_end);
6048 inner->set_size(Page::kPageSize);
6049 inner->set_owner(lo_space());
6050 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6051 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6055 isolate_->heap()->store_buffer()->Compact();
6056 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6057 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6058 next = chunk->next_chunk();
6059 isolate_->memory_allocator()->Free(chunk);
6061 chunks_queued_for_free_ = NULL;
6065 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6066 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6067 // Tag the page pointer to make it findable in the dump file.
6069 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6071 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6073 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6074 reinterpret_cast<Address>(p);
6075 remembered_unmapped_pages_index_++;
6076 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6080 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6081 memset(object_counts_, 0, sizeof(object_counts_));
6082 memset(object_sizes_, 0, sizeof(object_sizes_));
6083 if (clear_last_time_stats) {
6084 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6085 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6090 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6093 void Heap::CheckpointObjectStats() {
6094 base::LockGuard<base::Mutex> lock_guard(
6095 checkpoint_object_stats_mutex.Pointer());
6096 Counters* counters = isolate()->counters();
6097 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6098 counters->count_of_##name()->Increment( \
6099 static_cast<int>(object_counts_[name])); \
6100 counters->count_of_##name()->Decrement( \
6101 static_cast<int>(object_counts_last_time_[name])); \
6102 counters->size_of_##name()->Increment( \
6103 static_cast<int>(object_sizes_[name])); \
6104 counters->size_of_##name()->Decrement( \
6105 static_cast<int>(object_sizes_last_time_[name]));
6106 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6107 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6109 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6110 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6111 counters->count_of_CODE_TYPE_##name()->Increment( \
6112 static_cast<int>(object_counts_[index])); \
6113 counters->count_of_CODE_TYPE_##name()->Decrement( \
6114 static_cast<int>(object_counts_last_time_[index])); \
6115 counters->size_of_CODE_TYPE_##name()->Increment( \
6116 static_cast<int>(object_sizes_[index])); \
6117 counters->size_of_CODE_TYPE_##name()->Decrement( \
6118 static_cast<int>(object_sizes_last_time_[index]));
6119 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6120 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6121 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6122 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6123 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6124 static_cast<int>(object_counts_[index])); \
6125 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6126 static_cast<int>(object_counts_last_time_[index])); \
6127 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6128 static_cast<int>(object_sizes_[index])); \
6129 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6130 static_cast<int>(object_sizes_last_time_[index]));
6131 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6132 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6133 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6135 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6136 counters->count_of_CODE_AGE_##name()->Increment( \
6137 static_cast<int>(object_counts_[index])); \
6138 counters->count_of_CODE_AGE_##name()->Decrement( \
6139 static_cast<int>(object_counts_last_time_[index])); \
6140 counters->size_of_CODE_AGE_##name()->Increment( \
6141 static_cast<int>(object_sizes_[index])); \
6142 counters->size_of_CODE_AGE_##name()->Decrement( \
6143 static_cast<int>(object_sizes_last_time_[index]));
6144 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6145 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6147 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6148 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6152 } // namespace v8::internal