1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/once.h"
10 #include "src/base/utils/random-number-generator.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/compilation-cache.h"
14 #include "src/conversions.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/deoptimizer.h"
18 #include "src/global-handles.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact.h"
21 #include "src/heap/objects-visiting-inl.h"
22 #include "src/heap/objects-visiting.h"
23 #include "src/heap/store-buffer.h"
24 #include "src/heap-profiler.h"
25 #include "src/isolate-inl.h"
26 #include "src/natives.h"
27 #include "src/runtime-profiler.h"
28 #include "src/scopeinfo.h"
29 #include "src/snapshot.h"
30 #include "src/utils.h"
31 #include "src/v8threads.h"
32 #include "src/vm-state-inl.h"
34 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
35 #include "src/regexp-macro-assembler.h" // NOLINT
36 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
38 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
39 #include "src/regexp-macro-assembler.h" // NOLINT
40 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
42 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
43 #include "src/regexp-macro-assembler.h"
44 #include "src/mips64/regexp-macro-assembler-mips64.h"
52 : amount_of_external_allocated_memory_(0),
53 amount_of_external_allocated_memory_at_last_global_gc_(0),
56 // semispace_size_ should be a power of 2 and old_generation_size_ should
57 // be a multiple of Page::kPageSize.
58 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
59 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
60 initial_semispace_size_(Page::kPageSize),
61 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
62 max_executable_size_(256ul * (kPointerSize / 4) * MB),
63 // Variables set based on semispace_size_ and old_generation_size_ in
65 // Will be 4 * reserved_semispace_size_ to ensure that young
66 // generation can be aligned to its size.
67 maximum_committed_(0),
68 survived_since_last_expansion_(0),
70 always_allocate_scope_depth_(0),
71 contexts_disposed_(0),
73 flush_monomorphic_ics_(false),
74 scan_on_scavenge_pages_(0),
76 old_pointer_space_(NULL),
77 old_data_space_(NULL),
81 property_cell_space_(NULL),
84 gc_post_processing_depth_(0),
85 allocations_count_(0),
86 raw_allocations_hash_(0),
87 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
90 remembered_unmapped_pages_index_(0),
91 unflattened_strings_length_(0),
93 allocation_timeout_(0),
95 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
96 old_gen_exhausted_(false),
97 inline_allocation_disabled_(false),
98 store_buffer_rebuilder_(store_buffer()),
100 gc_safe_size_of_old_object_(NULL),
101 total_regexp_code_generated_(0),
103 high_survival_rate_period_length_(0),
104 promoted_objects_size_(0),
106 semi_space_copied_object_size_(0),
107 semi_space_copied_rate_(0),
108 nodes_died_in_new_space_(0),
109 nodes_copied_in_new_space_(0),
111 maximum_size_scavenges_(0),
113 total_gc_time_ms_(0.0),
114 max_alive_after_gc_(0),
115 min_in_mutator_(kMaxInt),
118 mark_compact_collector_(this),
121 incremental_marking_(this),
122 number_idle_notifications_(0),
123 last_idle_notification_gc_count_(0),
124 last_idle_notification_gc_count_init_(false),
125 mark_sweeps_since_idle_round_started_(0),
126 gc_count_at_last_idle_gc_(0),
127 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
128 full_codegen_bytes_generated_(0),
129 crankshaft_codegen_bytes_generated_(0),
130 gcs_since_last_deopt_(0),
132 no_weak_object_verification_scope_depth_(0),
134 allocation_sites_scratchpad_length_(0),
135 promotion_queue_(this),
137 external_string_table_(this),
138 chunks_queued_for_free_(NULL),
139 gc_callbacks_depth_(0) {
140 // Allow build-time customization of the max semispace size. Building
141 // V8 with snapshots and a non-default max semispace size is much
142 // easier if you can define it as part of the build environment.
143 #if defined(V8_MAX_SEMISPACE_SIZE)
144 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
147 // Ensure old_generation_size_ is a multiple of kPageSize.
148 DCHECK(MB >= Page::kPageSize);
150 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
151 set_native_contexts_list(NULL);
152 set_array_buffers_list(Smi::FromInt(0));
153 set_allocation_sites_list(Smi::FromInt(0));
154 set_encountered_weak_collections(Smi::FromInt(0));
155 // Put a dummy entry in the remembered pages so we can find the list the
156 // minidump even if there are no real unmapped pages.
157 RememberUnmappedPage(NULL, false);
159 ClearObjectStats(true);
163 intptr_t Heap::Capacity() {
164 if (!HasBeenSetUp()) return 0;
166 return new_space_.Capacity() + old_pointer_space_->Capacity() +
167 old_data_space_->Capacity() + code_space_->Capacity() +
168 map_space_->Capacity() + cell_space_->Capacity() +
169 property_cell_space_->Capacity();
173 intptr_t Heap::CommittedMemory() {
174 if (!HasBeenSetUp()) return 0;
176 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
177 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
178 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
179 property_cell_space_->CommittedMemory() + lo_space_->Size();
183 size_t Heap::CommittedPhysicalMemory() {
184 if (!HasBeenSetUp()) return 0;
186 return new_space_.CommittedPhysicalMemory() +
187 old_pointer_space_->CommittedPhysicalMemory() +
188 old_data_space_->CommittedPhysicalMemory() +
189 code_space_->CommittedPhysicalMemory() +
190 map_space_->CommittedPhysicalMemory() +
191 cell_space_->CommittedPhysicalMemory() +
192 property_cell_space_->CommittedPhysicalMemory() +
193 lo_space_->CommittedPhysicalMemory();
197 intptr_t Heap::CommittedMemoryExecutable() {
198 if (!HasBeenSetUp()) return 0;
200 return isolate()->memory_allocator()->SizeExecutable();
204 void Heap::UpdateMaximumCommitted() {
205 if (!HasBeenSetUp()) return;
207 intptr_t current_committed_memory = CommittedMemory();
208 if (current_committed_memory > maximum_committed_) {
209 maximum_committed_ = current_committed_memory;
214 intptr_t Heap::Available() {
215 if (!HasBeenSetUp()) return 0;
217 return new_space_.Available() + old_pointer_space_->Available() +
218 old_data_space_->Available() + code_space_->Available() +
219 map_space_->Available() + cell_space_->Available() +
220 property_cell_space_->Available();
224 bool Heap::HasBeenSetUp() {
225 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
226 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
227 property_cell_space_ != NULL && lo_space_ != NULL;
231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
232 if (IntrusiveMarking::IsMarked(object)) {
233 return IntrusiveMarking::SizeOfMarkedObject(object);
235 return object->SizeFromMap(object->map());
239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
240 const char** reason) {
241 // Is global GC requested?
242 if (space != NEW_SPACE) {
243 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
244 *reason = "GC in old space requested";
245 return MARK_COMPACTOR;
248 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
249 *reason = "GC in old space forced by flags";
250 return MARK_COMPACTOR;
253 // Is enough data promoted to justify a global GC?
254 if (OldGenerationAllocationLimitReached()) {
255 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
256 *reason = "promotion limit reached";
257 return MARK_COMPACTOR;
260 // Have allocation in OLD and LO failed?
261 if (old_gen_exhausted_) {
263 ->gc_compactor_caused_by_oldspace_exhaustion()
265 *reason = "old generations exhausted";
266 return MARK_COMPACTOR;
269 // Is there enough space left in OLD to guarantee that a scavenge can
272 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
273 // for object promotion. It counts only the bytes that the memory
274 // allocator has not yet allocated from the OS and assigned to any space,
275 // and does not count available bytes already in the old space or code
276 // space. Undercounting is safe---we may get an unrequested full GC when
277 // a scavenge would have succeeded.
278 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
280 ->gc_compactor_caused_by_oldspace_exhaustion()
282 *reason = "scavenge might not succeed";
283 return MARK_COMPACTOR;
292 // TODO(1238405): Combine the infrastructure for --heap-stats and
293 // --log-gc to avoid the complicated preprocessor and flag testing.
294 void Heap::ReportStatisticsBeforeGC() {
295 // Heap::ReportHeapStatistics will also log NewSpace statistics when
296 // compiled --log-gc is set. The following logic is used to avoid
299 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
300 if (FLAG_heap_stats) {
301 ReportHeapStatistics("Before GC");
302 } else if (FLAG_log_gc) {
303 new_space_.ReportStatistics();
305 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
308 new_space_.CollectStatistics();
309 new_space_.ReportStatistics();
310 new_space_.ClearHistograms();
316 void Heap::PrintShortHeapStatistics() {
317 if (!FLAG_trace_gc_verbose) return;
318 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
320 ", available: %6" V8_PTR_PREFIX "d KB\n",
321 isolate_->memory_allocator()->Size() / KB,
322 isolate_->memory_allocator()->Available() / KB);
323 PrintPID("New space, used: %6" V8_PTR_PREFIX
325 ", available: %6" V8_PTR_PREFIX
327 ", committed: %6" V8_PTR_PREFIX "d KB\n",
328 new_space_.Size() / KB, new_space_.Available() / KB,
329 new_space_.CommittedMemory() / KB);
330 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
332 ", available: %6" V8_PTR_PREFIX
334 ", committed: %6" V8_PTR_PREFIX "d KB\n",
335 old_pointer_space_->SizeOfObjects() / KB,
336 old_pointer_space_->Available() / KB,
337 old_pointer_space_->CommittedMemory() / KB);
338 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
340 ", available: %6" V8_PTR_PREFIX
342 ", committed: %6" V8_PTR_PREFIX "d KB\n",
343 old_data_space_->SizeOfObjects() / KB,
344 old_data_space_->Available() / KB,
345 old_data_space_->CommittedMemory() / KB);
346 PrintPID("Code space, used: %6" V8_PTR_PREFIX
348 ", available: %6" V8_PTR_PREFIX
350 ", committed: %6" V8_PTR_PREFIX "d KB\n",
351 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
352 code_space_->CommittedMemory() / KB);
353 PrintPID("Map space, used: %6" V8_PTR_PREFIX
355 ", available: %6" V8_PTR_PREFIX
357 ", committed: %6" V8_PTR_PREFIX "d KB\n",
358 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
359 map_space_->CommittedMemory() / KB);
360 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
362 ", available: %6" V8_PTR_PREFIX
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
366 cell_space_->CommittedMemory() / KB);
367 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
369 ", available: %6" V8_PTR_PREFIX
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 property_cell_space_->SizeOfObjects() / KB,
373 property_cell_space_->Available() / KB,
374 property_cell_space_->CommittedMemory() / KB);
375 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
377 ", available: %6" V8_PTR_PREFIX
379 ", committed: %6" V8_PTR_PREFIX "d KB\n",
380 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
381 lo_space_->CommittedMemory() / KB);
382 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
384 ", available: %6" V8_PTR_PREFIX
386 ", committed: %6" V8_PTR_PREFIX "d KB\n",
387 this->SizeOfObjects() / KB, this->Available() / KB,
388 this->CommittedMemory() / KB);
389 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
390 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
391 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
395 // TODO(1238405): Combine the infrastructure for --heap-stats and
396 // --log-gc to avoid the complicated preprocessor and flag testing.
397 void Heap::ReportStatisticsAfterGC() {
398 // Similar to the before GC, we use some complicated logic to ensure that
399 // NewSpace statistics are logged exactly once when --log-gc is turned on.
401 if (FLAG_heap_stats) {
402 new_space_.CollectStatistics();
403 ReportHeapStatistics("After GC");
404 } else if (FLAG_log_gc) {
405 new_space_.ReportStatistics();
408 if (FLAG_log_gc) new_space_.ReportStatistics();
413 void Heap::GarbageCollectionPrologue() {
415 AllowHeapAllocation for_the_first_part_of_prologue;
416 ClearJSFunctionResultCaches();
418 unflattened_strings_length_ = 0;
420 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
421 mark_compact_collector()->EnableCodeFlushing(true);
425 if (FLAG_verify_heap) {
431 // Reset GC statistics.
432 promoted_objects_size_ = 0;
433 semi_space_copied_object_size_ = 0;
434 nodes_died_in_new_space_ = 0;
435 nodes_copied_in_new_space_ = 0;
438 UpdateMaximumCommitted();
441 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
443 if (FLAG_gc_verbose) Print();
445 ReportStatisticsBeforeGC();
448 store_buffer()->GCPrologue();
450 if (isolate()->concurrent_osr_enabled()) {
451 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
454 if (new_space_.IsAtMaximumCapacity()) {
455 maximum_size_scavenges_++;
457 maximum_size_scavenges_ = 0;
459 CheckNewSpaceExpansionCriteria();
463 intptr_t Heap::SizeOfObjects() {
465 AllSpaces spaces(this);
466 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
467 total += space->SizeOfObjects();
473 void Heap::ClearAllICsByKind(Code::Kind kind) {
474 HeapObjectIterator it(code_space());
476 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
477 Code* code = Code::cast(object);
478 Code::Kind current_kind = code->kind();
479 if (current_kind == Code::FUNCTION ||
480 current_kind == Code::OPTIMIZED_FUNCTION) {
481 code->ClearInlineCaches(kind);
487 void Heap::RepairFreeListsAfterBoot() {
488 PagedSpaces spaces(this);
489 for (PagedSpace* space = spaces.next(); space != NULL;
490 space = spaces.next()) {
491 space->RepairFreeListsAfterBoot();
496 void Heap::ProcessPretenuringFeedback() {
497 if (FLAG_allocation_site_pretenuring) {
498 int tenure_decisions = 0;
499 int dont_tenure_decisions = 0;
500 int allocation_mementos_found = 0;
501 int allocation_sites = 0;
502 int active_allocation_sites = 0;
504 // If the scratchpad overflowed, we have to iterate over the allocation
506 // TODO(hpayer): We iterate over the whole list of allocation sites when
507 // we grew to the maximum semi-space size to deopt maybe tenured
508 // allocation sites. We could hold the maybe tenured allocation sites
509 // in a seperate data structure if this is a performance problem.
510 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
511 bool use_scratchpad =
512 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
513 !deopt_maybe_tenured;
516 Object* list_element = allocation_sites_list();
517 bool trigger_deoptimization = false;
518 bool maximum_size_scavenge = MaximumSizeScavenge();
519 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
520 : list_element->IsAllocationSite()) {
521 AllocationSite* site =
523 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
524 : AllocationSite::cast(list_element);
525 allocation_mementos_found += site->memento_found_count();
526 if (site->memento_found_count() > 0) {
527 active_allocation_sites++;
528 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
529 trigger_deoptimization = true;
531 if (site->GetPretenureMode() == TENURED) {
534 dont_tenure_decisions++;
539 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
540 site->set_deopt_dependent_code(true);
541 trigger_deoptimization = true;
544 if (use_scratchpad) {
547 list_element = site->weak_next();
551 if (trigger_deoptimization) {
552 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
555 FlushAllocationSitesScratchpad();
557 if (FLAG_trace_pretenuring_statistics &&
558 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
559 dont_tenure_decisions > 0)) {
561 "GC: (mode, #visited allocation sites, #active allocation sites, "
562 "#mementos, #tenure decisions, #donttenure decisions) "
563 "(%s, %d, %d, %d, %d, %d)\n",
564 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
565 active_allocation_sites, allocation_mementos_found, tenure_decisions,
566 dont_tenure_decisions);
572 void Heap::DeoptMarkedAllocationSites() {
573 // TODO(hpayer): If iterating over the allocation sites list becomes a
574 // performance issue, use a cache heap data structure instead (similar to the
575 // allocation sites scratchpad).
576 Object* list_element = allocation_sites_list();
577 while (list_element->IsAllocationSite()) {
578 AllocationSite* site = AllocationSite::cast(list_element);
579 if (site->deopt_dependent_code()) {
580 site->dependent_code()->MarkCodeForDeoptimization(
581 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
582 site->set_deopt_dependent_code(false);
584 list_element = site->weak_next();
586 Deoptimizer::DeoptimizeMarkedCode(isolate_);
590 void Heap::GarbageCollectionEpilogue() {
591 store_buffer()->GCEpilogue();
593 // In release mode, we only zap the from space under heap verification.
594 if (Heap::ShouldZapGarbage()) {
598 // Process pretenuring feedback and update allocation sites.
599 ProcessPretenuringFeedback();
602 if (FLAG_verify_heap) {
607 AllowHeapAllocation for_the_rest_of_the_epilogue;
610 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
611 if (FLAG_print_handles) PrintHandles();
612 if (FLAG_gc_verbose) Print();
613 if (FLAG_code_stats) ReportCodeStatistics("After GC");
615 if (FLAG_deopt_every_n_garbage_collections > 0) {
616 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
617 // the topmost optimized frame can be deoptimized safely, because it
618 // might not have a lazy bailout point right after its current PC.
619 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
620 Deoptimizer::DeoptimizeAll(isolate());
621 gcs_since_last_deopt_ = 0;
625 UpdateMaximumCommitted();
627 isolate_->counters()->alive_after_last_gc()->Set(
628 static_cast<int>(SizeOfObjects()));
630 isolate_->counters()->string_table_capacity()->Set(
631 string_table()->Capacity());
632 isolate_->counters()->number_of_symbols()->Set(
633 string_table()->NumberOfElements());
635 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
636 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
637 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
638 (crankshaft_codegen_bytes_generated_ +
639 full_codegen_bytes_generated_)));
642 if (CommittedMemory() > 0) {
643 isolate_->counters()->external_fragmentation_total()->AddSample(
644 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
646 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
647 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
648 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
649 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
651 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
652 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
654 isolate_->counters()->heap_fraction_code_space()->AddSample(
655 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
657 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
658 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
659 isolate_->counters()->heap_fraction_cell_space()->AddSample(
660 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
663 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
665 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
666 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
668 isolate_->counters()->heap_sample_total_committed()->AddSample(
669 static_cast<int>(CommittedMemory() / KB));
670 isolate_->counters()->heap_sample_total_used()->AddSample(
671 static_cast<int>(SizeOfObjects() / KB));
672 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
673 static_cast<int>(map_space()->CommittedMemory() / KB));
674 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
675 static_cast<int>(cell_space()->CommittedMemory() / KB));
677 ->heap_sample_property_cell_space_committed()
679 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
681 static_cast<int>(code_space()->CommittedMemory() / KB));
683 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
684 static_cast<int>(MaximumCommittedMemory() / KB));
687 #define UPDATE_COUNTERS_FOR_SPACE(space) \
688 isolate_->counters()->space##_bytes_available()->Set( \
689 static_cast<int>(space()->Available())); \
690 isolate_->counters()->space##_bytes_committed()->Set( \
691 static_cast<int>(space()->CommittedMemory())); \
692 isolate_->counters()->space##_bytes_used()->Set( \
693 static_cast<int>(space()->SizeOfObjects()));
694 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
695 if (space()->CommittedMemory() > 0) { \
696 isolate_->counters()->external_fragmentation_##space()->AddSample( \
697 static_cast<int>(100 - \
698 (space()->SizeOfObjects() * 100.0) / \
699 space()->CommittedMemory())); \
701 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
702 UPDATE_COUNTERS_FOR_SPACE(space) \
703 UPDATE_FRAGMENTATION_FOR_SPACE(space)
705 UPDATE_COUNTERS_FOR_SPACE(new_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
710 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
711 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
712 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
713 #undef UPDATE_COUNTERS_FOR_SPACE
714 #undef UPDATE_FRAGMENTATION_FOR_SPACE
715 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
718 ReportStatisticsAfterGC();
721 // Remember the last top pointer so that we can later find out
722 // whether we allocated in new space since the last GC.
723 new_space_top_after_last_gc_ = new_space()->top();
727 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
728 const v8::GCCallbackFlags gc_callback_flags) {
729 // Since we are ignoring the return value, the exact choice of space does
730 // not matter, so long as we do not specify NEW_SPACE, which would not
732 mark_compact_collector_.SetFlags(flags);
733 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
734 mark_compact_collector_.SetFlags(kNoGCFlags);
738 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
739 // Since we are ignoring the return value, the exact choice of space does
740 // not matter, so long as we do not specify NEW_SPACE, which would not
742 // Major GC would invoke weak handle callbacks on weakly reachable
743 // handles, but won't collect weakly reachable objects until next
744 // major GC. Therefore if we collect aggressively and weak handle callback
745 // has been invoked, we rerun major GC to release objects which become
747 // Note: as weak callbacks can execute arbitrary code, we cannot
748 // hope that eventually there will be no weak callbacks invocations.
749 // Therefore stop recollecting after several attempts.
750 if (isolate()->concurrent_recompilation_enabled()) {
751 // The optimizing compiler may be unnecessarily holding on to memory.
752 DisallowHeapAllocation no_recursive_gc;
753 isolate()->optimizing_compiler_thread()->Flush();
755 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
756 kReduceMemoryFootprintMask);
757 isolate_->compilation_cache()->Clear();
758 const int kMaxNumberOfAttempts = 7;
759 const int kMinNumberOfAttempts = 2;
760 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
761 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
762 attempt + 1 >= kMinNumberOfAttempts) {
766 mark_compact_collector()->SetFlags(kNoGCFlags);
769 incremental_marking()->UncommitMarkingDeque();
773 void Heap::EnsureFillerObjectAtTop() {
774 // There may be an allocation memento behind every object in new space.
775 // If we evacuate a not full new space or if we are on the last page of
776 // the new space, then there may be uninitialized memory behind the top
777 // pointer of the new space page. We store a filler object there to
778 // identify the unused space.
779 Address from_top = new_space_.top();
780 Address from_limit = new_space_.limit();
781 if (from_top < from_limit) {
782 int remaining_in_page = static_cast<int>(from_limit - from_top);
783 CreateFillerObjectAt(from_top, remaining_in_page);
788 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
789 const char* collector_reason,
790 const v8::GCCallbackFlags gc_callback_flags) {
791 // The VM is in the GC state until exiting this function.
792 VMState<GC> state(isolate_);
795 // Reset the allocation timeout to the GC interval, but make sure to
796 // allow at least a few allocations after a collection. The reason
797 // for this is that we have a lot of allocation sequences and we
798 // assume that a garbage collection will allow the subsequent
799 // allocation attempts to go through.
800 allocation_timeout_ = Max(6, FLAG_gc_interval);
803 EnsureFillerObjectAtTop();
805 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
806 if (FLAG_trace_incremental_marking) {
807 PrintF("[IncrementalMarking] Scavenge during marking.\n");
811 if (collector == MARK_COMPACTOR &&
812 !mark_compact_collector()->abort_incremental_marking() &&
813 !incremental_marking()->IsStopped() &&
814 !incremental_marking()->should_hurry() &&
815 FLAG_incremental_marking_steps) {
816 // Make progress in incremental marking.
817 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
818 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
819 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
820 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
821 if (FLAG_trace_incremental_marking) {
822 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
824 collector = SCAVENGER;
825 collector_reason = "incremental marking delaying mark-sweep";
829 bool next_gc_likely_to_collect_more = false;
832 tracer()->Start(collector, gc_reason, collector_reason);
833 DCHECK(AllowHeapAllocation::IsAllowed());
834 DisallowHeapAllocation no_allocation_during_gc;
835 GarbageCollectionPrologue();
838 HistogramTimerScope histogram_timer_scope(
839 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
840 : isolate_->counters()->gc_compactor());
841 next_gc_likely_to_collect_more =
842 PerformGarbageCollection(collector, gc_callback_flags);
845 GarbageCollectionEpilogue();
849 // Start incremental marking for the next cycle. The heap snapshot
850 // generator needs incremental marking to stay off after it aborted.
851 if (!mark_compact_collector()->abort_incremental_marking() &&
852 incremental_marking()->IsStopped() &&
853 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
854 incremental_marking()->Start();
857 return next_gc_likely_to_collect_more;
861 int Heap::NotifyContextDisposed() {
862 if (isolate()->concurrent_recompilation_enabled()) {
863 // Flush the queued recompilation tasks.
864 isolate()->optimizing_compiler_thread()->Flush();
866 flush_monomorphic_ics_ = true;
868 return ++contexts_disposed_;
872 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
874 if (len == 0) return;
876 DCHECK(array->map() != fixed_cow_array_map());
877 Object** dst_objects = array->data_start() + dst_index;
878 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
879 if (!InNewSpace(array)) {
880 for (int i = 0; i < len; i++) {
881 // TODO(hpayer): check store buffer for entries
882 if (InNewSpace(dst_objects[i])) {
883 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
887 incremental_marking()->RecordWrites(array);
892 // Helper class for verifying the string table.
893 class StringTableVerifier : public ObjectVisitor {
895 void VisitPointers(Object** start, Object** end) {
896 // Visit all HeapObject pointers in [start, end).
897 for (Object** p = start; p < end; p++) {
898 if ((*p)->IsHeapObject()) {
899 // Check that the string is actually internalized.
900 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
901 (*p)->IsInternalizedString());
908 static void VerifyStringTable(Heap* heap) {
909 StringTableVerifier verifier;
910 heap->string_table()->IterateElements(&verifier);
912 #endif // VERIFY_HEAP
915 static bool AbortIncrementalMarkingAndCollectGarbage(
916 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
917 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
918 bool result = heap->CollectGarbage(space, gc_reason);
919 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
924 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
925 bool gc_performed = true;
927 static const int kThreshold = 20;
928 while (gc_performed && counter++ < kThreshold) {
929 gc_performed = false;
930 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
931 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
932 if (sizes[space] != 0) {
933 AllocationResult allocation;
934 if (space == NEW_SPACE) {
935 allocation = new_space()->AllocateRaw(sizes[space]);
937 allocation = paged_space(space)->AllocateRaw(sizes[space]);
940 if (!allocation.To(&node)) {
941 if (space == NEW_SPACE) {
942 Heap::CollectGarbage(NEW_SPACE,
943 "failed to reserve space in the new space");
945 AbortIncrementalMarkingAndCollectGarbage(
946 this, static_cast<AllocationSpace>(space),
947 "failed to reserve space in paged space");
952 // Mark with a free list node, in case we have a GC before
954 node->set_size(this, sizes[space]);
955 locations_out[space] = node->address();
962 // Failed to reserve the space after several attempts.
963 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
968 void Heap::EnsureFromSpaceIsCommitted() {
969 if (new_space_.CommitFromSpaceIfNeeded()) return;
971 // Committing memory to from space failed.
972 // Memory is exhausted and we will die.
973 V8::FatalProcessOutOfMemory("Committing semi space failed.");
977 void Heap::ClearJSFunctionResultCaches() {
978 if (isolate_->bootstrapper()->IsActive()) return;
980 Object* context = native_contexts_list();
981 while (!context->IsUndefined()) {
982 // Get the caches for this context. GC can happen when the context
983 // is not fully initialized, so the caches can be undefined.
984 Object* caches_or_undefined =
985 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
986 if (!caches_or_undefined->IsUndefined()) {
987 FixedArray* caches = FixedArray::cast(caches_or_undefined);
989 int length = caches->length();
990 for (int i = 0; i < length; i++) {
991 JSFunctionResultCache::cast(caches->get(i))->Clear();
994 // Get the next context:
995 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1000 void Heap::ClearNormalizedMapCaches() {
1001 if (isolate_->bootstrapper()->IsActive() &&
1002 !incremental_marking()->IsMarking()) {
1006 Object* context = native_contexts_list();
1007 while (!context->IsUndefined()) {
1008 // GC can happen when the context is not fully initialized,
1009 // so the cache can be undefined.
1011 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1012 if (!cache->IsUndefined()) {
1013 NormalizedMapCache::cast(cache)->Clear();
1015 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1020 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1021 if (start_new_space_size == 0) return;
1023 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1026 semi_space_copied_rate_ =
1027 (static_cast<double>(semi_space_copied_object_size_) /
1028 static_cast<double>(start_new_space_size) * 100);
1030 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1032 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1033 high_survival_rate_period_length_++;
1035 high_survival_rate_period_length_ = 0;
1039 bool Heap::PerformGarbageCollection(
1040 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1041 int freed_global_handles = 0;
1043 if (collector != SCAVENGER) {
1044 PROFILE(isolate_, CodeMovingGCEvent());
1048 if (FLAG_verify_heap) {
1049 VerifyStringTable(this);
1054 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1057 GCCallbacksScope scope(this);
1058 if (scope.CheckReenter()) {
1059 AllowHeapAllocation allow_allocation;
1060 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1061 VMState<EXTERNAL> state(isolate_);
1062 HandleScope handle_scope(isolate_);
1063 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1067 EnsureFromSpaceIsCommitted();
1069 int start_new_space_size = Heap::new_space()->SizeAsInt();
1071 if (IsHighSurvivalRate()) {
1072 // We speed up the incremental marker if it is running so that it
1073 // does not fall behind the rate of promotion, which would cause a
1074 // constantly growing old space.
1075 incremental_marking()->NotifyOfHighPromotionRate();
1078 if (collector == MARK_COMPACTOR) {
1079 // Perform mark-sweep with optional compaction.
1081 sweep_generation_++;
1082 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1083 // allocates and triggers GC. The real limit is set at after
1084 // PostGarbageCollectionProcessing.
1085 old_generation_allocation_limit_ =
1086 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1087 old_gen_exhausted_ = false;
1092 UpdateSurvivalStatistics(start_new_space_size);
1094 isolate_->counters()->objs_since_last_young()->Set(0);
1096 // Callbacks that fire after this point might trigger nested GCs and
1097 // restart incremental marking, the assertion can't be moved down.
1098 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1100 gc_post_processing_depth_++;
1102 AllowHeapAllocation allow_allocation;
1103 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1104 freed_global_handles =
1105 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1107 gc_post_processing_depth_--;
1109 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1111 // Update relocatables.
1112 Relocatable::PostGarbageCollectionProcessing(isolate_);
1114 if (collector == MARK_COMPACTOR) {
1115 // Register the amount of external allocated memory.
1116 amount_of_external_allocated_memory_at_last_global_gc_ =
1117 amount_of_external_allocated_memory_;
1118 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1119 PromotedSpaceSizeOfObjects(), freed_global_handles);
1123 GCCallbacksScope scope(this);
1124 if (scope.CheckReenter()) {
1125 AllowHeapAllocation allow_allocation;
1126 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1127 VMState<EXTERNAL> state(isolate_);
1128 HandleScope handle_scope(isolate_);
1129 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1134 if (FLAG_verify_heap) {
1135 VerifyStringTable(this);
1139 return freed_global_handles > 0;
1143 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1144 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1145 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1146 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1147 v8::GCPrologueCallback callback =
1148 reinterpret_cast<v8::GCPrologueCallback>(
1149 gc_prologue_callbacks_[i].callback);
1150 callback(gc_type, flags);
1152 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1153 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1160 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1161 GCCallbackFlags gc_callback_flags) {
1162 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1163 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1164 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1165 v8::GCPrologueCallback callback =
1166 reinterpret_cast<v8::GCPrologueCallback>(
1167 gc_epilogue_callbacks_[i].callback);
1168 callback(gc_type, gc_callback_flags);
1170 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1171 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1178 void Heap::MarkCompact() {
1179 gc_state_ = MARK_COMPACT;
1180 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1182 uint64_t size_of_objects_before_gc = SizeOfObjects();
1184 mark_compact_collector_.Prepare();
1188 MarkCompactPrologue();
1190 mark_compact_collector_.CollectGarbage();
1192 LOG(isolate_, ResourceEvent("markcompact", "end"));
1194 gc_state_ = NOT_IN_GC;
1196 isolate_->counters()->objs_since_last_full()->Set(0);
1198 flush_monomorphic_ics_ = false;
1200 if (FLAG_allocation_site_pretenuring) {
1201 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1206 void Heap::MarkCompactPrologue() {
1207 // At any old GC clear the keyed lookup cache to enable collection of unused
1209 isolate_->keyed_lookup_cache()->Clear();
1210 isolate_->context_slot_cache()->Clear();
1211 isolate_->descriptor_lookup_cache()->Clear();
1212 RegExpResultsCache::Clear(string_split_cache());
1213 RegExpResultsCache::Clear(regexp_multiple_cache());
1215 isolate_->compilation_cache()->MarkCompactPrologue();
1217 CompletelyClearInstanceofCache();
1219 FlushNumberStringCache();
1220 if (FLAG_cleanup_code_caches_at_gc) {
1221 polymorphic_code_cache()->set_cache(undefined_value());
1224 ClearNormalizedMapCaches();
1228 // Helper class for copying HeapObjects
1229 class ScavengeVisitor : public ObjectVisitor {
1231 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1233 void VisitPointer(Object** p) { ScavengePointer(p); }
1235 void VisitPointers(Object** start, Object** end) {
1236 // Copy all HeapObject pointers in [start, end)
1237 for (Object** p = start; p < end; p++) ScavengePointer(p);
1241 void ScavengePointer(Object** p) {
1242 Object* object = *p;
1243 if (!heap_->InNewSpace(object)) return;
1244 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1245 reinterpret_cast<HeapObject*>(object));
1253 // Visitor class to verify pointers in code or data space do not point into
1255 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1257 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1258 void VisitPointers(Object** start, Object** end) {
1259 for (Object** current = start; current < end; current++) {
1260 if ((*current)->IsHeapObject()) {
1261 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1271 static void VerifyNonPointerSpacePointers(Heap* heap) {
1272 // Verify that there are no pointers to new space in spaces where we
1273 // do not expect them.
1274 VerifyNonPointerSpacePointersVisitor v(heap);
1275 HeapObjectIterator code_it(heap->code_space());
1276 for (HeapObject* object = code_it.Next(); object != NULL;
1277 object = code_it.Next())
1278 object->Iterate(&v);
1280 // The old data space was normally swept conservatively so that the iterator
1281 // doesn't work, so we normally skip the next bit.
1282 if (heap->old_data_space()->swept_precisely()) {
1283 HeapObjectIterator data_it(heap->old_data_space());
1284 for (HeapObject* object = data_it.Next(); object != NULL;
1285 object = data_it.Next())
1286 object->Iterate(&v);
1289 #endif // VERIFY_HEAP
1292 void Heap::CheckNewSpaceExpansionCriteria() {
1293 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1294 survived_since_last_expansion_ > new_space_.Capacity()) {
1295 // Grow the size of new space if there is room to grow, enough data
1296 // has survived scavenge since the last expansion and we are not in
1297 // high promotion mode.
1299 survived_since_last_expansion_ = 0;
1304 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1305 return heap->InNewSpace(*p) &&
1306 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1310 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1311 StoreBufferEvent event) {
1312 heap->store_buffer_rebuilder_.Callback(page, event);
1316 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1317 if (event == kStoreBufferStartScanningPagesEvent) {
1318 start_of_current_page_ = NULL;
1319 current_page_ = NULL;
1320 } else if (event == kStoreBufferScanningPageEvent) {
1321 if (current_page_ != NULL) {
1322 // If this page already overflowed the store buffer during this iteration.
1323 if (current_page_->scan_on_scavenge()) {
1324 // Then we should wipe out the entries that have been added for it.
1325 store_buffer_->SetTop(start_of_current_page_);
1326 } else if (store_buffer_->Top() - start_of_current_page_ >=
1327 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1328 // Did we find too many pointers in the previous page? The heuristic is
1329 // that no page can take more then 1/5 the remaining slots in the store
1331 current_page_->set_scan_on_scavenge(true);
1332 store_buffer_->SetTop(start_of_current_page_);
1334 // In this case the page we scanned took a reasonable number of slots in
1335 // the store buffer. It has now been rehabilitated and is no longer
1336 // marked scan_on_scavenge.
1337 DCHECK(!current_page_->scan_on_scavenge());
1340 start_of_current_page_ = store_buffer_->Top();
1341 current_page_ = page;
1342 } else if (event == kStoreBufferFullEvent) {
1343 // The current page overflowed the store buffer again. Wipe out its entries
1344 // in the store buffer and mark it scan-on-scavenge again. This may happen
1345 // several times while scanning.
1346 if (current_page_ == NULL) {
1347 // Store Buffer overflowed while scanning promoted objects. These are not
1348 // in any particular page, though they are likely to be clustered by the
1349 // allocation routines.
1350 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1352 // Store Buffer overflowed while scanning a particular old space page for
1353 // pointers to new space.
1354 DCHECK(current_page_ == page);
1355 DCHECK(page != NULL);
1356 current_page_->set_scan_on_scavenge(true);
1357 DCHECK(start_of_current_page_ != store_buffer_->Top());
1358 store_buffer_->SetTop(start_of_current_page_);
1366 void PromotionQueue::Initialize() {
1367 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1368 // entries (where each is a pair of intptr_t). This allows us to simplify
1369 // the test fpr when to switch pages.
1370 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1372 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1374 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1375 emergency_stack_ = NULL;
1379 void PromotionQueue::RelocateQueueHead() {
1380 DCHECK(emergency_stack_ == NULL);
1382 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1383 intptr_t* head_start = rear_;
1384 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1387 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1389 emergency_stack_ = new List<Entry>(2 * entries_count);
1391 while (head_start != head_end) {
1392 int size = static_cast<int>(*(head_start++));
1393 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1394 emergency_stack_->Add(Entry(obj, size));
1400 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1402 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1404 virtual Object* RetainAs(Object* object) {
1405 if (!heap_->InFromSpace(object)) {
1409 MapWord map_word = HeapObject::cast(object)->map_word();
1410 if (map_word.IsForwardingAddress()) {
1411 return map_word.ToForwardingAddress();
1421 void Heap::Scavenge() {
1422 RelocationLock relocation_lock(this);
1425 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1428 gc_state_ = SCAVENGE;
1430 // Implements Cheney's copying algorithm
1431 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1433 // Clear descriptor cache.
1434 isolate_->descriptor_lookup_cache()->Clear();
1436 // Used for updating survived_since_last_expansion_ at function end.
1437 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1439 SelectScavengingVisitorsTable();
1441 incremental_marking()->PrepareForScavenge();
1443 // Flip the semispaces. After flipping, to space is empty, from space has
1446 new_space_.ResetAllocationInfo();
1448 // We need to sweep newly copied objects which can be either in the
1449 // to space or promoted to the old generation. For to-space
1450 // objects, we treat the bottom of the to space as a queue. Newly
1451 // copied and unswept objects lie between a 'front' mark and the
1452 // allocation pointer.
1454 // Promoted objects can go into various old-generation spaces, and
1455 // can be allocated internally in the spaces (from the free list).
1456 // We treat the top of the to space as a queue of addresses of
1457 // promoted objects. The addresses of newly promoted and unswept
1458 // objects lie between a 'front' mark and a 'rear' mark that is
1459 // updated as a side effect of promoting an object.
1461 // There is guaranteed to be enough room at the top of the to space
1462 // for the addresses of promoted objects: every object promoted
1463 // frees up its size in bytes from the top of the new space, and
1464 // objects are at least one pointer in size.
1465 Address new_space_front = new_space_.ToSpaceStart();
1466 promotion_queue_.Initialize();
1469 store_buffer()->Clean();
1472 ScavengeVisitor scavenge_visitor(this);
1474 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1476 // Copy objects reachable from the old generation.
1478 StoreBufferRebuildScope scope(this, store_buffer(),
1479 &ScavengeStoreBufferCallback);
1480 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1483 // Copy objects reachable from simple cells by scavenging cell values
1485 HeapObjectIterator cell_iterator(cell_space_);
1486 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1487 heap_object = cell_iterator.Next()) {
1488 if (heap_object->IsCell()) {
1489 Cell* cell = Cell::cast(heap_object);
1490 Address value_address = cell->ValueAddress();
1491 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1495 // Copy objects reachable from global property cells by scavenging global
1496 // property cell values directly.
1497 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1498 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1499 heap_object != NULL;
1500 heap_object = js_global_property_cell_iterator.Next()) {
1501 if (heap_object->IsPropertyCell()) {
1502 PropertyCell* cell = PropertyCell::cast(heap_object);
1503 Address value_address = cell->ValueAddress();
1504 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1505 Address type_address = cell->TypeAddress();
1506 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1510 // Copy objects reachable from the encountered weak collections list.
1511 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1513 // Copy objects reachable from the code flushing candidates list.
1514 MarkCompactCollector* collector = mark_compact_collector();
1515 if (collector->is_code_flushing_enabled()) {
1516 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1519 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1521 while (isolate()->global_handles()->IterateObjectGroups(
1522 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1523 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1525 isolate()->global_handles()->RemoveObjectGroups();
1526 isolate()->global_handles()->RemoveImplicitRefGroups();
1528 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1529 &IsUnscavengedHeapObject);
1530 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1532 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1534 UpdateNewSpaceReferencesInExternalStringTable(
1535 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1537 promotion_queue_.Destroy();
1539 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1541 ScavengeWeakObjectRetainer weak_object_retainer(this);
1542 ProcessWeakReferences(&weak_object_retainer);
1544 DCHECK(new_space_front == new_space_.top());
1547 new_space_.set_age_mark(new_space_.top());
1549 new_space_.LowerInlineAllocationLimit(
1550 new_space_.inline_allocation_limit_step());
1552 // Update how much has survived scavenge.
1553 IncrementYoungSurvivorsCounter(static_cast<int>(
1554 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1556 LOG(isolate_, ResourceEvent("scavenge", "end"));
1558 gc_state_ = NOT_IN_GC;
1560 scavenges_since_last_idle_round_++;
1564 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1566 MapWord first_word = HeapObject::cast(*p)->map_word();
1568 if (!first_word.IsForwardingAddress()) {
1569 // Unreachable external string can be finalized.
1570 heap->FinalizeExternalString(String::cast(*p));
1574 // String is still reachable.
1575 return String::cast(first_word.ToForwardingAddress());
1579 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1580 ExternalStringTableUpdaterCallback updater_func) {
1582 if (FLAG_verify_heap) {
1583 external_string_table_.Verify();
1587 if (external_string_table_.new_space_strings_.is_empty()) return;
1589 Object** start = &external_string_table_.new_space_strings_[0];
1590 Object** end = start + external_string_table_.new_space_strings_.length();
1591 Object** last = start;
1593 for (Object** p = start; p < end; ++p) {
1594 DCHECK(InFromSpace(*p));
1595 String* target = updater_func(this, p);
1597 if (target == NULL) continue;
1599 DCHECK(target->IsExternalString());
1601 if (InNewSpace(target)) {
1602 // String is still in new space. Update the table entry.
1606 // String got promoted. Move it to the old string list.
1607 external_string_table_.AddOldString(target);
1611 DCHECK(last <= end);
1612 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1616 void Heap::UpdateReferencesInExternalStringTable(
1617 ExternalStringTableUpdaterCallback updater_func) {
1618 // Update old space string references.
1619 if (external_string_table_.old_space_strings_.length() > 0) {
1620 Object** start = &external_string_table_.old_space_strings_[0];
1621 Object** end = start + external_string_table_.old_space_strings_.length();
1622 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1625 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1629 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1630 ProcessArrayBuffers(retainer);
1631 ProcessNativeContexts(retainer);
1632 // TODO(mvstanton): AllocationSites only need to be processed during
1633 // MARK_COMPACT, as they live in old space. Verify and address.
1634 ProcessAllocationSites(retainer);
1638 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1639 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1640 // Update the head of the list of contexts.
1641 set_native_contexts_list(head);
1645 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1646 Object* array_buffer_obj =
1647 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1648 set_array_buffers_list(array_buffer_obj);
1652 void Heap::TearDownArrayBuffers() {
1653 Object* undefined = undefined_value();
1654 for (Object* o = array_buffers_list(); o != undefined;) {
1655 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1656 Runtime::FreeArrayBuffer(isolate(), buffer);
1657 o = buffer->weak_next();
1659 set_array_buffers_list(undefined);
1663 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1664 Object* allocation_site_obj =
1665 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1666 set_allocation_sites_list(allocation_site_obj);
1670 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1671 DisallowHeapAllocation no_allocation_scope;
1672 Object* cur = allocation_sites_list();
1673 bool marked = false;
1674 while (cur->IsAllocationSite()) {
1675 AllocationSite* casted = AllocationSite::cast(cur);
1676 if (casted->GetPretenureMode() == flag) {
1677 casted->ResetPretenureDecision();
1678 casted->set_deopt_dependent_code(true);
1681 cur = casted->weak_next();
1683 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1687 void Heap::EvaluateOldSpaceLocalPretenuring(
1688 uint64_t size_of_objects_before_gc) {
1689 uint64_t size_of_objects_after_gc = SizeOfObjects();
1690 double old_generation_survival_rate =
1691 (static_cast<double>(size_of_objects_after_gc) * 100) /
1692 static_cast<double>(size_of_objects_before_gc);
1694 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1695 // Too many objects died in the old generation, pretenuring of wrong
1696 // allocation sites may be the cause for that. We have to deopt all
1697 // dependent code registered in the allocation sites to re-evaluate
1698 // our pretenuring decisions.
1699 ResetAllAllocationSitesDependentCode(TENURED);
1700 if (FLAG_trace_pretenuring) {
1702 "Deopt all allocation sites dependent code due to low survival "
1703 "rate in the old generation %f\n",
1704 old_generation_survival_rate);
1710 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1711 DisallowHeapAllocation no_allocation;
1712 // All external strings are listed in the external string table.
1714 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1716 explicit ExternalStringTableVisitorAdapter(
1717 v8::ExternalResourceVisitor* visitor)
1718 : visitor_(visitor) {}
1719 virtual void VisitPointers(Object** start, Object** end) {
1720 for (Object** p = start; p < end; p++) {
1721 DCHECK((*p)->IsExternalString());
1722 visitor_->VisitExternalString(
1723 Utils::ToLocal(Handle<String>(String::cast(*p))));
1728 v8::ExternalResourceVisitor* visitor_;
1729 } external_string_table_visitor(visitor);
1731 external_string_table_.Iterate(&external_string_table_visitor);
1735 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1737 static inline void VisitPointer(Heap* heap, Object** p) {
1738 Object* object = *p;
1739 if (!heap->InNewSpace(object)) return;
1740 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1741 reinterpret_cast<HeapObject*>(object));
1746 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1747 Address new_space_front) {
1749 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1750 // The addresses new_space_front and new_space_.top() define a
1751 // queue of unprocessed copied objects. Process them until the
1753 while (new_space_front != new_space_.top()) {
1754 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1755 HeapObject* object = HeapObject::FromAddress(new_space_front);
1757 NewSpaceScavenger::IterateBody(object->map(), object);
1760 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1764 // Promote and process all the to-be-promoted objects.
1766 StoreBufferRebuildScope scope(this, store_buffer(),
1767 &ScavengeStoreBufferCallback);
1768 while (!promotion_queue()->is_empty()) {
1771 promotion_queue()->remove(&target, &size);
1773 // Promoted object might be already partially visited
1774 // during old space pointer iteration. Thus we search specificly
1775 // for pointers to from semispace instead of looking for pointers
1777 DCHECK(!target->IsMap());
1778 IterateAndMarkPointersToFromSpace(
1779 target->address(), target->address() + size, &ScavengeObject);
1783 // Take another spin if there are now unswept objects in new space
1784 // (there are currently no more unswept promoted objects).
1785 } while (new_space_front != new_space_.top());
1787 return new_space_front;
1791 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1793 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1795 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1796 kDoubleAlignmentMask) == 0); // NOLINT
1799 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1802 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1804 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1805 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1806 return HeapObject::FromAddress(object->address() + kPointerSize);
1808 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1815 enum LoggingAndProfiling {
1816 LOGGING_AND_PROFILING_ENABLED,
1817 LOGGING_AND_PROFILING_DISABLED
1821 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1824 template <MarksHandling marks_handling,
1825 LoggingAndProfiling logging_and_profiling_mode>
1826 class ScavengingVisitor : public StaticVisitorBase {
1828 static void Initialize() {
1829 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1830 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1831 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1832 table_.Register(kVisitByteArray, &EvacuateByteArray);
1833 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1834 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1835 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1836 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1839 kVisitNativeContext,
1840 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1845 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1846 ConsString::kSize>);
1850 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1851 SlicedString::kSize>);
1855 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1859 kVisitSharedFunctionInfo,
1860 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1861 SharedFunctionInfo::kSize>);
1863 table_.Register(kVisitJSWeakCollection,
1864 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1866 table_.Register(kVisitJSArrayBuffer,
1867 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1869 table_.Register(kVisitJSTypedArray,
1870 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1872 table_.Register(kVisitJSDataView,
1873 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1875 table_.Register(kVisitJSRegExp,
1876 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1878 if (marks_handling == IGNORE_MARKS) {
1881 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1882 JSFunction::kSize>);
1884 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1887 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1888 kVisitDataObject, kVisitDataObjectGeneric>();
1890 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1891 kVisitJSObject, kVisitJSObjectGeneric>();
1893 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1894 kVisitStruct, kVisitStructGeneric>();
1897 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1902 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1904 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1905 bool should_record = false;
1907 should_record = FLAG_heap_stats;
1909 should_record = should_record || FLAG_log_gc;
1910 if (should_record) {
1911 if (heap->new_space()->Contains(obj)) {
1912 heap->new_space()->RecordAllocation(obj);
1914 heap->new_space()->RecordPromotion(obj);
1919 // Helper function used by CopyObject to copy a source object to an
1920 // allocated target object and update the forwarding pointer in the source
1921 // object. Returns the target object.
1922 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1923 HeapObject* target, int size)) {
1924 // If we migrate into to-space, then the to-space top pointer should be
1925 // right after the target object. Incorporate double alignment
1927 DCHECK(!heap->InToSpace(target) ||
1928 target->address() + size == heap->new_space()->top() ||
1929 target->address() + size + kPointerSize == heap->new_space()->top());
1931 // Make sure that we do not overwrite the promotion queue which is at
1932 // the end of to-space.
1933 DCHECK(!heap->InToSpace(target) ||
1934 heap->promotion_queue()->IsBelowPromotionQueue(
1935 heap->new_space()->top()));
1937 // Copy the content of source to target.
1938 heap->CopyBlock(target->address(), source->address(), size);
1940 // Set the forwarding address.
1941 source->set_map_word(MapWord::FromForwardingAddress(target));
1943 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1944 // Update NewSpace stats if necessary.
1945 RecordCopiedObject(heap, target);
1946 heap->OnMoveEvent(target, source, size);
1949 if (marks_handling == TRANSFER_MARKS) {
1950 if (Marking::TransferColor(source, target)) {
1951 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1956 template <int alignment>
1957 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1958 HeapObject* object, int object_size) {
1959 Heap* heap = map->GetHeap();
1961 int allocation_size = object_size;
1962 if (alignment != kObjectAlignment) {
1963 DCHECK(alignment == kDoubleAlignment);
1964 allocation_size += kPointerSize;
1967 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1968 AllocationResult allocation =
1969 heap->new_space()->AllocateRaw(allocation_size);
1971 HeapObject* target = NULL; // Initialization to please compiler.
1972 if (allocation.To(&target)) {
1973 // Order is important here: Set the promotion limit before storing a
1974 // filler for double alignment or migrating the object. Otherwise we
1975 // may end up overwriting promotion queue entries when we migrate the
1977 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1979 if (alignment != kObjectAlignment) {
1980 target = EnsureDoubleAligned(heap, target, allocation_size);
1983 // Order is important: slot might be inside of the target if target
1984 // was allocated over a dead object and slot comes from the store
1987 MigrateObject(heap, object, target, object_size);
1989 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1996 template <ObjectContents object_contents, int alignment>
1997 static inline bool PromoteObject(Map* map, HeapObject** slot,
1998 HeapObject* object, int object_size) {
1999 Heap* heap = map->GetHeap();
2001 int allocation_size = object_size;
2002 if (alignment != kObjectAlignment) {
2003 DCHECK(alignment == kDoubleAlignment);
2004 allocation_size += kPointerSize;
2007 AllocationResult allocation;
2008 if (object_contents == DATA_OBJECT) {
2009 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2010 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2012 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2013 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2016 HeapObject* target = NULL; // Initialization to please compiler.
2017 if (allocation.To(&target)) {
2018 if (alignment != kObjectAlignment) {
2019 target = EnsureDoubleAligned(heap, target, allocation_size);
2022 // Order is important: slot might be inside of the target if target
2023 // was allocated over a dead object and slot comes from the store
2026 MigrateObject(heap, object, target, object_size);
2028 if (object_contents == POINTER_OBJECT) {
2029 if (map->instance_type() == JS_FUNCTION_TYPE) {
2030 heap->promotion_queue()->insert(target,
2031 JSFunction::kNonWeakFieldsEndOffset);
2033 heap->promotion_queue()->insert(target, object_size);
2036 heap->IncrementPromotedObjectsSize(object_size);
2043 template <ObjectContents object_contents, int alignment>
2044 static inline void EvacuateObject(Map* map, HeapObject** slot,
2045 HeapObject* object, int object_size) {
2046 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2047 SLOW_DCHECK(object->Size() == object_size);
2048 Heap* heap = map->GetHeap();
2050 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2051 // A semi-space copy may fail due to fragmentation. In that case, we
2052 // try to promote the object.
2053 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2058 if (PromoteObject<object_contents, alignment>(map, slot, object,
2063 // If promotion failed, we try to copy the object to the other semi-space
2064 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2070 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2071 HeapObject* object) {
2072 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2073 JSFunction::kSize>(map, slot, object);
2075 HeapObject* target = *slot;
2076 MarkBit mark_bit = Marking::MarkBitFrom(target);
2077 if (Marking::IsBlack(mark_bit)) {
2078 // This object is black and it might not be rescanned by marker.
2079 // We should explicitly record code entry slot for compaction because
2080 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2081 // miss it as it is not HeapObject-tagged.
2082 Address code_entry_slot =
2083 target->address() + JSFunction::kCodeEntryOffset;
2084 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2085 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2086 code_entry_slot, code);
2091 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2092 HeapObject* object) {
2093 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2094 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2099 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2100 HeapObject* object) {
2101 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2102 int object_size = FixedDoubleArray::SizeFor(length);
2103 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2108 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2109 HeapObject* object) {
2110 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2111 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2116 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2117 HeapObject* object) {
2118 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2119 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2124 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2125 HeapObject* object) {
2126 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2127 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2132 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2133 HeapObject* object) {
2134 int object_size = SeqOneByteString::cast(object)
2135 ->SeqOneByteStringSize(map->instance_type());
2136 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2141 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2142 HeapObject* object) {
2143 int object_size = SeqTwoByteString::cast(object)
2144 ->SeqTwoByteStringSize(map->instance_type());
2145 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2150 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2151 HeapObject* object) {
2152 DCHECK(IsShortcutCandidate(map->instance_type()));
2154 Heap* heap = map->GetHeap();
2156 if (marks_handling == IGNORE_MARKS &&
2157 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2159 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2163 if (!heap->InNewSpace(first)) {
2164 object->set_map_word(MapWord::FromForwardingAddress(first));
2168 MapWord first_word = first->map_word();
2169 if (first_word.IsForwardingAddress()) {
2170 HeapObject* target = first_word.ToForwardingAddress();
2173 object->set_map_word(MapWord::FromForwardingAddress(target));
2177 heap->DoScavengeObject(first->map(), slot, first);
2178 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2182 int object_size = ConsString::kSize;
2183 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2187 template <ObjectContents object_contents>
2188 class ObjectEvacuationStrategy {
2190 template <int object_size>
2191 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2192 HeapObject* object) {
2193 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2197 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2198 int object_size = map->instance_size();
2199 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2204 static VisitorDispatchTable<ScavengingCallback> table_;
2208 template <MarksHandling marks_handling,
2209 LoggingAndProfiling logging_and_profiling_mode>
2210 VisitorDispatchTable<ScavengingCallback>
2211 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2214 static void InitializeScavengingVisitorsTables() {
2215 ScavengingVisitor<TRANSFER_MARKS,
2216 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2217 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2218 ScavengingVisitor<TRANSFER_MARKS,
2219 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2220 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2224 void Heap::SelectScavengingVisitorsTable() {
2225 bool logging_and_profiling =
2226 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2227 isolate()->cpu_profiler()->is_profiling() ||
2228 (isolate()->heap_profiler() != NULL &&
2229 isolate()->heap_profiler()->is_tracking_object_moves());
2231 if (!incremental_marking()->IsMarking()) {
2232 if (!logging_and_profiling) {
2233 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2234 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2236 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2237 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2240 if (!logging_and_profiling) {
2241 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2242 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2244 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2245 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2248 if (incremental_marking()->IsCompacting()) {
2249 // When compacting forbid short-circuiting of cons-strings.
2250 // Scavenging code relies on the fact that new space object
2251 // can't be evacuated into evacuation candidate but
2252 // short-circuiting violates this assumption.
2253 scavenging_visitors_table_.Register(
2254 StaticVisitorBase::kVisitShortcutCandidate,
2255 scavenging_visitors_table_.GetVisitorById(
2256 StaticVisitorBase::kVisitConsString));
2262 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2263 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2264 MapWord first_word = object->map_word();
2265 SLOW_DCHECK(!first_word.IsForwardingAddress());
2266 Map* map = first_word.ToMap();
2267 map->GetHeap()->DoScavengeObject(map, p, object);
2271 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2272 int instance_size) {
2274 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2275 if (!allocation.To(&result)) return allocation;
2277 // Map::cast cannot be used due to uninitialized map field.
2278 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2279 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2280 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2281 reinterpret_cast<Map*>(result)->set_visitor_id(
2282 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2283 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2284 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2285 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2286 reinterpret_cast<Map*>(result)->set_bit_field(0);
2287 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2288 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2289 Map::OwnsDescriptors::encode(true);
2290 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2295 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2297 ElementsKind elements_kind) {
2299 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2300 if (!allocation.To(&result)) return allocation;
2302 result->set_map_no_write_barrier(meta_map());
2303 Map* map = Map::cast(result);
2304 map->set_instance_type(instance_type);
2305 map->set_visitor_id(
2306 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2307 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2308 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2309 map->set_instance_size(instance_size);
2310 map->set_inobject_properties(0);
2311 map->set_pre_allocated_property_fields(0);
2312 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2313 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2314 SKIP_WRITE_BARRIER);
2315 map->init_back_pointer(undefined_value());
2316 map->set_unused_property_fields(0);
2317 map->set_instance_descriptors(empty_descriptor_array());
2318 map->set_bit_field(0);
2319 map->set_bit_field2(1 << Map::kIsExtensible);
2320 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2321 Map::OwnsDescriptors::encode(true);
2322 map->set_bit_field3(bit_field3);
2323 map->set_elements_kind(elements_kind);
2329 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2330 AllocationSpace space) {
2333 AllocationResult allocation = AllocateRaw(size, space, space);
2334 if (!allocation.To(&obj)) return allocation;
2337 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2338 DCHECK(chunk->owner()->identity() == space);
2340 CreateFillerObjectAt(obj->address(), size);
2345 const Heap::StringTypeTable Heap::string_type_table[] = {
2346 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2347 { type, size, k##camel_name##MapRootIndex } \
2349 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2350 #undef STRING_TYPE_ELEMENT
2354 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2355 #define CONSTANT_STRING_ELEMENT(name, contents) \
2356 { contents, k##name##RootIndex } \
2358 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2359 #undef CONSTANT_STRING_ELEMENT
2363 const Heap::StructTable Heap::struct_table[] = {
2364 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2365 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2367 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2368 #undef STRUCT_TABLE_ELEMENT
2372 bool Heap::CreateInitialMaps() {
2375 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2376 if (!allocation.To(&obj)) return false;
2378 // Map::cast cannot be used due to uninitialized map field.
2379 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2380 set_meta_map(new_meta_map);
2381 new_meta_map->set_map(new_meta_map);
2383 { // Partial map allocation
2384 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2387 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388 set_##field_name##_map(map); \
2391 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2392 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2393 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2394 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2395 constant_pool_array);
2397 #undef ALLOCATE_PARTIAL_MAP
2400 // Allocate the empty array.
2402 AllocationResult allocation = AllocateEmptyFixedArray();
2403 if (!allocation.To(&obj)) return false;
2405 set_empty_fixed_array(FixedArray::cast(obj));
2408 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2409 if (!allocation.To(&obj)) return false;
2411 set_null_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kNull);
2415 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2416 if (!allocation.To(&obj)) return false;
2418 set_undefined_value(Oddball::cast(obj));
2419 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2420 DCHECK(!InNewSpace(undefined_value()));
2422 // Set preliminary exception sentinel value before actually initializing it.
2423 set_exception(null_value());
2425 // Allocate the empty descriptor array.
2427 AllocationResult allocation = AllocateEmptyFixedArray();
2428 if (!allocation.To(&obj)) return false;
2430 set_empty_descriptor_array(DescriptorArray::cast(obj));
2432 // Allocate the constant pool array.
2434 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2435 if (!allocation.To(&obj)) return false;
2437 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2439 // Fix the instance_descriptors for the existing maps.
2440 meta_map()->set_code_cache(empty_fixed_array());
2441 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2442 meta_map()->init_back_pointer(undefined_value());
2443 meta_map()->set_instance_descriptors(empty_descriptor_array());
2445 fixed_array_map()->set_code_cache(empty_fixed_array());
2446 fixed_array_map()->set_dependent_code(
2447 DependentCode::cast(empty_fixed_array()));
2448 fixed_array_map()->init_back_pointer(undefined_value());
2449 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2451 undefined_map()->set_code_cache(empty_fixed_array());
2452 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 undefined_map()->init_back_pointer(undefined_value());
2454 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2456 null_map()->set_code_cache(empty_fixed_array());
2457 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2458 null_map()->init_back_pointer(undefined_value());
2459 null_map()->set_instance_descriptors(empty_descriptor_array());
2461 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2462 constant_pool_array_map()->set_dependent_code(
2463 DependentCode::cast(empty_fixed_array()));
2464 constant_pool_array_map()->init_back_pointer(undefined_value());
2465 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2467 // Fix prototype object for existing maps.
2468 meta_map()->set_prototype(null_value());
2469 meta_map()->set_constructor(null_value());
2471 fixed_array_map()->set_prototype(null_value());
2472 fixed_array_map()->set_constructor(null_value());
2474 undefined_map()->set_prototype(null_value());
2475 undefined_map()->set_constructor(null_value());
2477 null_map()->set_prototype(null_value());
2478 null_map()->set_constructor(null_value());
2480 constant_pool_array_map()->set_prototype(null_value());
2481 constant_pool_array_map()->set_constructor(null_value());
2484 #define ALLOCATE_MAP(instance_type, size, field_name) \
2487 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2488 set_##field_name##_map(map); \
2491 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2492 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2494 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2495 DCHECK(fixed_array_map() != fixed_cow_array_map());
2497 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2498 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2499 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2500 mutable_heap_number)
2501 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2502 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2504 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2505 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2506 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2507 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2508 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2509 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2510 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2512 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2513 const StringTypeTable& entry = string_type_table[i];
2515 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2516 if (!allocation.To(&obj)) return false;
2518 // Mark cons string maps as unstable, because their objects can change
2520 Map* map = Map::cast(obj);
2521 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2522 roots_[entry.index] = map;
2525 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2526 undetectable_string_map()->set_is_undetectable();
2528 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2529 undetectable_ascii_string_map()->set_is_undetectable();
2531 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2532 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2533 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2535 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2537 external_##type##_array)
2539 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2540 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2542 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2543 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2545 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2546 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2548 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2550 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2552 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2553 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2554 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2555 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2558 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2559 const StructTable& entry = struct_table[i];
2561 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2562 roots_[entry.index] = map;
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2569 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2570 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2571 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2572 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2573 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2575 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2576 native_context_map()->set_dictionary_map(true);
2577 native_context_map()->set_visitor_id(
2578 StaticVisitorBase::kVisitNativeContext);
2580 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2581 shared_function_info)
2583 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2584 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2585 external_map()->set_is_extensible(false);
2586 #undef ALLOCATE_VARSIZE_MAP
2592 ByteArray* byte_array;
2593 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2594 set_empty_byte_array(byte_array);
2597 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2599 ExternalArray* obj; \
2600 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2602 set_empty_external_##type##_array(obj); \
2605 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2606 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2608 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2610 FixedTypedArrayBase* obj; \
2611 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2613 set_empty_fixed_##type##_array(obj); \
2616 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2617 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2619 DCHECK(!InNewSpace(empty_fixed_array()));
2624 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2625 PretenureFlag pretenure) {
2626 // Statically ensure that it is safe to allocate heap numbers in paged
2628 int size = HeapNumber::kSize;
2629 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2631 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2635 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2636 if (!allocation.To(&result)) return allocation;
2639 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2640 HeapObject::cast(result)->set_map_no_write_barrier(map);
2641 HeapNumber::cast(result)->set_value(value);
2646 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2647 V(Float32x4, float32x4) \
2648 V(Float64x2, float64x2) \
2652 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
2653 AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
2654 PretenureFlag pretenure) { \
2655 STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
2657 AllocationSpace space = \
2658 SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
2660 HeapObject* result; \
2661 { AllocationResult allocation = \
2662 AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
2663 if (!allocation.To(&result)) return allocation; \
2666 result->set_map_no_write_barrier( \
2667 isolate()->native_context()->type##_function()->initial_map()); \
2668 JSObject::cast(result)->set_properties(empty_fixed_array()); \
2669 JSObject::cast(result)->set_elements(empty_fixed_array()); \
2671 HeapObject* storage; \
2672 int storage_size = \
2673 FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
2674 space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
2675 { AllocationResult allocation = \
2676 AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
2677 if (!allocation.To(&storage)) return allocation; \
2681 *isolate()->factory()->fixed_##type##_array_map()); \
2682 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2683 elements->set_length(static_cast<int>(1)); \
2684 memset(elements->DataPtr(), 0, elements->DataSize()); \
2685 Fixed##TYPE##Array::cast(storage)->set(0, value); \
2686 TYPE::cast(result)->set_value(storage); \
2691 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2694 AllocationResult Heap::AllocateCell(Object* value) {
2695 int size = Cell::kSize;
2696 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2700 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2701 if (!allocation.To(&result)) return allocation;
2703 result->set_map_no_write_barrier(cell_map());
2704 Cell::cast(result)->set_value(value);
2709 AllocationResult Heap::AllocatePropertyCell() {
2710 int size = PropertyCell::kSize;
2711 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2714 AllocationResult allocation =
2715 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2716 if (!allocation.To(&result)) return allocation;
2718 result->set_map_no_write_barrier(global_property_cell_map());
2719 PropertyCell* cell = PropertyCell::cast(result);
2720 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2721 SKIP_WRITE_BARRIER);
2722 cell->set_value(the_hole_value());
2723 cell->set_type(HeapType::None());
2728 void Heap::CreateApiObjects() {
2729 HandleScope scope(isolate());
2730 Factory* factory = isolate()->factory();
2731 Handle<Map> new_neander_map =
2732 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2734 // Don't use Smi-only elements optimizations for objects with the neander
2735 // map. There are too many cases where element values are set directly with a
2736 // bottleneck to trap the Smi-only -> fast elements transition, and there
2737 // appears to be no benefit for optimize this case.
2738 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2739 set_neander_map(*new_neander_map);
2741 Handle<JSObject> listeners = factory->NewNeanderObject();
2742 Handle<FixedArray> elements = factory->NewFixedArray(2);
2743 elements->set(0, Smi::FromInt(0));
2744 listeners->set_elements(*elements);
2745 set_message_listeners(*listeners);
2749 void Heap::CreateJSEntryStub() {
2750 JSEntryStub stub(isolate());
2751 set_js_entry_code(*stub.GetCode());
2755 void Heap::CreateJSConstructEntryStub() {
2756 JSConstructEntryStub stub(isolate());
2757 set_js_construct_entry_code(*stub.GetCode());
2761 void Heap::CreateFixedStubs() {
2762 // Here we create roots for fixed stubs. They are needed at GC
2763 // for cooking and uncooking (check out frames.cc).
2764 // The eliminates the need for doing dictionary lookup in the
2765 // stub cache for these stubs.
2766 HandleScope scope(isolate());
2768 // Create stubs that should be there, so we don't unexpectedly have to
2769 // create them if we need them during the creation of another stub.
2770 // Stub creation mixes raw pointers and handles in an unsafe manner so
2771 // we cannot create stubs while we are creating stubs.
2772 CodeStub::GenerateStubsAheadOfTime(isolate());
2774 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2775 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2778 // gcc-4.4 has problem generating correct code of following snippet:
2779 // { JSEntryStub stub;
2780 // js_entry_code_ = *stub.GetCode();
2782 // { JSConstructEntryStub stub;
2783 // js_construct_entry_code_ = *stub.GetCode();
2785 // To workaround the problem, make separate functions without inlining.
2786 Heap::CreateJSEntryStub();
2787 Heap::CreateJSConstructEntryStub();
2791 void Heap::CreateInitialObjects() {
2792 HandleScope scope(isolate());
2793 Factory* factory = isolate()->factory();
2795 // The -0 value must be set before NewNumber works.
2796 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2797 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2800 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2801 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2803 // The hole has not been created yet, but we want to put something
2804 // predictable in the gaps in the string table, so lets make that Smi zero.
2805 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2807 // Allocate initial string table.
2808 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2810 // Finish initializing oddballs after creating the string table.
2811 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2812 factory->nan_value(), Oddball::kUndefined);
2814 // Initialize the null_value.
2815 Oddball::Initialize(isolate(), factory->null_value(), "null",
2816 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2818 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2819 handle(Smi::FromInt(1), isolate()),
2822 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2823 handle(Smi::FromInt(0), isolate()),
2826 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2827 handle(Smi::FromInt(-1), isolate()),
2828 Oddball::kTheHole));
2830 set_uninitialized_value(*factory->NewOddball(
2831 factory->uninitialized_map(), "uninitialized",
2832 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2834 set_arguments_marker(*factory->NewOddball(
2835 factory->arguments_marker_map(), "arguments_marker",
2836 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2838 set_no_interceptor_result_sentinel(*factory->NewOddball(
2839 factory->no_interceptor_result_sentinel_map(),
2840 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2843 set_termination_exception(*factory->NewOddball(
2844 factory->termination_exception_map(), "termination_exception",
2845 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2847 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2848 handle(Smi::FromInt(-5), isolate()),
2849 Oddball::kException));
2851 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2852 Handle<String> str =
2853 factory->InternalizeUtf8String(constant_string_table[i].contents);
2854 roots_[constant_string_table[i].index] = *str;
2857 // Allocate the hidden string which is used to identify the hidden properties
2858 // in JSObjects. The hash code has a special value so that it will not match
2859 // the empty string when searching for the property. It cannot be part of the
2860 // loop above because it needs to be allocated manually with the special
2861 // hash code in place. The hash code for the hidden_string is zero to ensure
2862 // that it will always be at the first entry in property descriptors.
2863 hidden_string_ = *factory->NewOneByteInternalizedString(
2864 OneByteVector("", 0), String::kEmptyStringHash);
2866 // Create the code_stubs dictionary. The initial size is set to avoid
2867 // expanding the dictionary during bootstrapping.
2868 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2870 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2871 // is set to avoid expanding the dictionary during bootstrapping.
2872 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2874 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2875 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2877 set_instanceof_cache_function(Smi::FromInt(0));
2878 set_instanceof_cache_map(Smi::FromInt(0));
2879 set_instanceof_cache_answer(Smi::FromInt(0));
2883 // Allocate the dictionary of intrinsic function names.
2884 Handle<NameDictionary> intrinsic_names =
2885 NameDictionary::New(isolate(), Runtime::kNumFunctions);
2886 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2887 set_intrinsic_function_names(*intrinsic_names);
2889 set_number_string_cache(
2890 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2892 // Allocate cache for single character one byte strings.
2893 set_single_character_string_cache(
2894 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2896 // Allocate cache for string split and regexp-multiple.
2897 set_string_split_cache(*factory->NewFixedArray(
2898 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2899 set_regexp_multiple_cache(*factory->NewFixedArray(
2900 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2902 // Allocate cache for external strings pointing to native source code.
2903 set_natives_source_cache(
2904 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2906 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2908 // The symbol registry is initialized lazily.
2909 set_symbol_registry(undefined_value());
2911 // Allocate object to hold object observation state.
2912 set_observation_state(*factory->NewJSObjectFromMap(
2913 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2915 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2916 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2917 set_microtask_queue(empty_fixed_array());
2919 set_detailed_stack_trace_symbol(*factory->NewPrivateSymbol());
2920 set_elements_transition_symbol(*factory->NewPrivateSymbol());
2921 set_frozen_symbol(*factory->NewPrivateSymbol());
2922 set_megamorphic_symbol(*factory->NewPrivateSymbol());
2923 set_nonexistent_symbol(*factory->NewPrivateSymbol());
2924 set_normal_ic_symbol(*factory->NewPrivateSymbol());
2925 set_observed_symbol(*factory->NewPrivateSymbol());
2926 set_stack_trace_symbol(*factory->NewPrivateSymbol());
2927 set_uninitialized_symbol(*factory->NewPrivateSymbol());
2929 Handle<SeededNumberDictionary> slow_element_dictionary =
2930 SeededNumberDictionary::New(isolate(), 0, TENURED);
2931 slow_element_dictionary->set_requires_slow_elements();
2932 set_empty_slow_element_dictionary(*slow_element_dictionary);
2934 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2936 // Handling of script id generation is in Factory::NewScript.
2937 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2939 set_allocation_sites_scratchpad(
2940 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2941 InitializeAllocationSitesScratchpad();
2943 // Initialize keyed lookup cache.
2944 isolate_->keyed_lookup_cache()->Clear();
2946 // Initialize context slot cache.
2947 isolate_->context_slot_cache()->Clear();
2949 // Initialize descriptor cache.
2950 isolate_->descriptor_lookup_cache()->Clear();
2952 // Initialize compilation cache.
2953 isolate_->compilation_cache()->Clear();
2957 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2958 RootListIndex writable_roots[] = {
2959 kStoreBufferTopRootIndex,
2960 kStackLimitRootIndex,
2961 kNumberStringCacheRootIndex,
2962 kInstanceofCacheFunctionRootIndex,
2963 kInstanceofCacheMapRootIndex,
2964 kInstanceofCacheAnswerRootIndex,
2965 kCodeStubsRootIndex,
2966 kNonMonomorphicCacheRootIndex,
2967 kPolymorphicCodeCacheRootIndex,
2968 kLastScriptIdRootIndex,
2969 kEmptyScriptRootIndex,
2970 kRealStackLimitRootIndex,
2971 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2972 kConstructStubDeoptPCOffsetRootIndex,
2973 kGetterStubDeoptPCOffsetRootIndex,
2974 kSetterStubDeoptPCOffsetRootIndex,
2975 kStringTableRootIndex,
2978 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2979 if (root_index == writable_roots[i]) return true;
2985 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2986 return !RootCanBeWrittenAfterInitialization(root_index) &&
2987 !InNewSpace(roots_array_start()[root_index]);
2991 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2992 Object* key_pattern, ResultsCacheType type) {
2994 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2995 if (type == STRING_SPLIT_SUBSTRINGS) {
2996 DCHECK(key_pattern->IsString());
2997 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2998 cache = heap->string_split_cache();
3000 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3001 DCHECK(key_pattern->IsFixedArray());
3002 cache = heap->regexp_multiple_cache();
3005 uint32_t hash = key_string->Hash();
3006 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3007 ~(kArrayEntriesPerCacheEntry - 1));
3008 if (cache->get(index + kStringOffset) == key_string &&
3009 cache->get(index + kPatternOffset) == key_pattern) {
3010 return cache->get(index + kArrayOffset);
3013 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3014 if (cache->get(index + kStringOffset) == key_string &&
3015 cache->get(index + kPatternOffset) == key_pattern) {
3016 return cache->get(index + kArrayOffset);
3018 return Smi::FromInt(0);
3022 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3023 Handle<Object> key_pattern,
3024 Handle<FixedArray> value_array,
3025 ResultsCacheType type) {
3026 Factory* factory = isolate->factory();
3027 Handle<FixedArray> cache;
3028 if (!key_string->IsInternalizedString()) return;
3029 if (type == STRING_SPLIT_SUBSTRINGS) {
3030 DCHECK(key_pattern->IsString());
3031 if (!key_pattern->IsInternalizedString()) return;
3032 cache = factory->string_split_cache();
3034 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3035 DCHECK(key_pattern->IsFixedArray());
3036 cache = factory->regexp_multiple_cache();
3039 uint32_t hash = key_string->Hash();
3040 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3041 ~(kArrayEntriesPerCacheEntry - 1));
3042 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3043 cache->set(index + kStringOffset, *key_string);
3044 cache->set(index + kPatternOffset, *key_pattern);
3045 cache->set(index + kArrayOffset, *value_array);
3048 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3049 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3050 cache->set(index2 + kStringOffset, *key_string);
3051 cache->set(index2 + kPatternOffset, *key_pattern);
3052 cache->set(index2 + kArrayOffset, *value_array);
3054 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3055 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3056 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3057 cache->set(index + kStringOffset, *key_string);
3058 cache->set(index + kPatternOffset, *key_pattern);
3059 cache->set(index + kArrayOffset, *value_array);
3062 // If the array is a reasonably short list of substrings, convert it into a
3063 // list of internalized strings.
3064 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3065 for (int i = 0; i < value_array->length(); i++) {
3066 Handle<String> str(String::cast(value_array->get(i)), isolate);
3067 Handle<String> internalized_str = factory->InternalizeString(str);
3068 value_array->set(i, *internalized_str);
3071 // Convert backing store to a copy-on-write array.
3072 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3076 void RegExpResultsCache::Clear(FixedArray* cache) {
3077 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3078 cache->set(i, Smi::FromInt(0));
3083 int Heap::FullSizeNumberStringCacheLength() {
3084 // Compute the size of the number string cache based on the max newspace size.
3085 // The number string cache has a minimum size based on twice the initial cache
3086 // size to ensure that it is bigger after being made 'full size'.
3087 int number_string_cache_size = max_semi_space_size_ / 512;
3088 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3089 Min(0x4000, number_string_cache_size));
3090 // There is a string and a number per entry so the length is twice the number
3092 return number_string_cache_size * 2;
3096 void Heap::FlushNumberStringCache() {
3097 // Flush the number to string cache.
3098 int len = number_string_cache()->length();
3099 for (int i = 0; i < len; i++) {
3100 number_string_cache()->set_undefined(i);
3105 void Heap::FlushAllocationSitesScratchpad() {
3106 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3107 allocation_sites_scratchpad()->set_undefined(i);
3109 allocation_sites_scratchpad_length_ = 0;
3113 void Heap::InitializeAllocationSitesScratchpad() {
3114 DCHECK(allocation_sites_scratchpad()->length() ==
3115 kAllocationSiteScratchpadSize);
3116 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3117 allocation_sites_scratchpad()->set_undefined(i);
3122 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3123 ScratchpadSlotMode mode) {
3124 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3125 // We cannot use the normal write-barrier because slots need to be
3126 // recorded with non-incremental marking as well. We have to explicitly
3127 // record the slot to take evacuation candidates into account.
3128 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3129 site, SKIP_WRITE_BARRIER);
3130 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3131 allocation_sites_scratchpad_length_);
3133 if (mode == RECORD_SCRATCHPAD_SLOT) {
3134 // We need to allow slots buffer overflow here since the evacuation
3135 // candidates are not part of the global list of old space pages and
3136 // releasing an evacuation candidate due to a slots buffer overflow
3137 // results in lost pages.
3138 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3139 SlotsBuffer::IGNORE_OVERFLOW);
3141 allocation_sites_scratchpad_length_++;
3146 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3147 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3151 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3152 ExternalArrayType array_type) {
3153 switch (array_type) {
3154 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3155 case kExternal##Type##Array: \
3156 return kExternal##Type##ArrayMapRootIndex;
3158 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3159 #undef ARRAY_TYPE_TO_ROOT_INDEX
3163 return kUndefinedValueRootIndex;
3168 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3169 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3173 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3174 ExternalArrayType array_type) {
3175 switch (array_type) {
3176 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3177 case kExternal##Type##Array: \
3178 return kFixed##Type##ArrayMapRootIndex;
3180 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3181 #undef ARRAY_TYPE_TO_ROOT_INDEX
3185 return kUndefinedValueRootIndex;
3190 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3191 ElementsKind elementsKind) {
3192 switch (elementsKind) {
3193 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3194 case EXTERNAL_##TYPE##_ELEMENTS: \
3195 return kEmptyExternal##Type##ArrayRootIndex;
3197 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3198 #undef ELEMENT_KIND_TO_ROOT_INDEX
3202 return kUndefinedValueRootIndex;
3207 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3208 ElementsKind elementsKind) {
3209 switch (elementsKind) {
3210 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3211 case TYPE##_ELEMENTS: \
3212 return kEmptyFixed##Type##ArrayRootIndex;
3214 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3215 #undef ELEMENT_KIND_TO_ROOT_INDEX
3218 return kUndefinedValueRootIndex;
3223 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3224 return ExternalArray::cast(
3225 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3229 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3230 return FixedTypedArrayBase::cast(
3231 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3235 AllocationResult Heap::AllocateForeign(Address address,
3236 PretenureFlag pretenure) {
3237 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3238 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3239 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3241 AllocationResult allocation = Allocate(foreign_map(), space);
3242 if (!allocation.To(&result)) return allocation;
3243 result->set_foreign_address(address);
3248 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3249 if (length < 0 || length > ByteArray::kMaxLength) {
3250 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3252 int size = ByteArray::SizeFor(length);
3253 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3256 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3257 if (!allocation.To(&result)) return allocation;
3260 result->set_map_no_write_barrier(byte_array_map());
3261 ByteArray::cast(result)->set_length(length);
3266 void Heap::CreateFillerObjectAt(Address addr, int size) {
3267 if (size == 0) return;
3268 HeapObject* filler = HeapObject::FromAddress(addr);
3269 if (size == kPointerSize) {
3270 filler->set_map_no_write_barrier(one_pointer_filler_map());
3271 } else if (size == 2 * kPointerSize) {
3272 filler->set_map_no_write_barrier(two_pointer_filler_map());
3274 filler->set_map_no_write_barrier(free_space_map());
3275 FreeSpace::cast(filler)->set_size(size);
3280 bool Heap::CanMoveObjectStart(HeapObject* object) {
3281 Address address = object->address();
3282 bool is_in_old_pointer_space = InOldPointerSpace(address);
3283 bool is_in_old_data_space = InOldDataSpace(address);
3285 if (lo_space()->Contains(object)) return false;
3287 Page* page = Page::FromAddress(address);
3288 // We can move the object start if:
3289 // (1) the object is not in old pointer or old data space,
3290 // (2) the page of the object was already swept,
3291 // (3) the page was already concurrently swept. This case is an optimization
3292 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3293 // pages is set after sweeping all pages.
3294 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3295 page->WasSwept() || page->SweepingCompleted();
3299 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3300 if (incremental_marking()->IsMarking() &&
3301 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3302 if (mode == FROM_GC) {
3303 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3305 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3311 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3312 int elements_to_trim) {
3313 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3314 const int bytes_to_trim = elements_to_trim * element_size;
3315 Map* map = object->map();
3317 // For now this trick is only applied to objects in new and paged space.
3318 // In large object space the object's start must coincide with chunk
3319 // and thus the trick is just not applicable.
3320 DCHECK(!lo_space()->Contains(object));
3321 DCHECK(object->map() != fixed_cow_array_map());
3323 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3324 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3325 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3327 const int len = object->length();
3328 DCHECK(elements_to_trim <= len);
3330 // Calculate location of new array start.
3331 Address new_start = object->address() + bytes_to_trim;
3333 // Technically in new space this write might be omitted (except for
3334 // debug mode which iterates through the heap), but to play safer
3336 CreateFillerObjectAt(object->address(), bytes_to_trim);
3338 // Initialize header of the trimmed array. Since left trimming is only
3339 // performed on pages which are not concurrently swept creating a filler
3340 // object does not require synchronization.
3341 DCHECK(CanMoveObjectStart(object));
3342 Object** former_start = HeapObject::RawField(object, 0);
3343 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3344 former_start[new_start_index] = map;
3345 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3346 FixedArrayBase* new_object =
3347 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3349 // Maintain consistency of live bytes during incremental marking
3350 marking()->TransferMark(object->address(), new_start);
3351 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3353 // Notify the heap profiler of change in object layout.
3354 OnMoveEvent(new_object, object, new_object->Size());
3359 // Force instantiation of templatized method.
3361 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3363 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3366 template<Heap::InvocationMode mode>
3367 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3368 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3369 const int bytes_to_trim = elements_to_trim * element_size;
3371 // For now this trick is only applied to objects in new and paged space.
3372 DCHECK(!lo_space()->Contains(object));
3373 DCHECK(object->map() != fixed_cow_array_map());
3375 const int len = object->length();
3376 DCHECK(elements_to_trim < len);
3378 // Calculate location of new array end.
3379 Address new_end = object->address() + object->Size() - bytes_to_trim;
3381 // Technically in new space this write might be omitted (except for
3382 // debug mode which iterates through the heap), but to play safer
3384 CreateFillerObjectAt(new_end, bytes_to_trim);
3386 // Initialize header of the trimmed array. We are storing the new length
3387 // using release store after creating a filler for the left-over space to
3388 // avoid races with the sweeper thread.
3389 object->synchronized_set_length(len - elements_to_trim);
3391 // Maintain consistency of live bytes during incremental marking
3392 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3394 // Notify the heap profiler of change in object layout. The array may not be
3395 // moved during GC, and size has to be adjusted nevertheless.
3396 HeapProfiler* profiler = isolate()->heap_profiler();
3397 if (profiler->is_tracking_allocations()) {
3398 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3403 AllocationResult Heap::AllocateExternalArray(int length,
3404 ExternalArrayType array_type,
3405 void* external_pointer,
3406 PretenureFlag pretenure) {
3407 int size = ExternalArray::kAlignedSize;
3408 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3411 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3412 if (!allocation.To(&result)) return allocation;
3415 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3416 ExternalArray::cast(result)->set_length(length);
3417 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3421 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3422 ElementsKind* element_kind) {
3423 switch (array_type) {
3424 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3425 case kExternal##Type##Array: \
3426 *element_size = size; \
3427 *element_kind = TYPE##_ELEMENTS; \
3430 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3431 #undef TYPED_ARRAY_CASE
3434 *element_size = 0; // Bogus
3435 *element_kind = UINT8_ELEMENTS; // Bogus
3441 AllocationResult Heap::AllocateFixedTypedArray(int length,
3442 ExternalArrayType array_type,
3443 PretenureFlag pretenure) {
3445 ElementsKind elements_kind;
3446 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3447 int size = OBJECT_POINTER_ALIGN(length * element_size +
3448 FixedTypedArrayBase::kDataOffset);
3449 #ifndef V8_HOST_ARCH_64_BIT
3450 if (array_type == kExternalFloat64Array) {
3451 size += kPointerSize;
3454 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3457 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3458 if (!allocation.To(&object)) return allocation;
3460 if (array_type == kExternalFloat64Array) {
3461 object = EnsureDoubleAligned(this, object, size);
3464 object->set_map(MapForFixedTypedArray(array_type));
3465 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3466 elements->set_length(length);
3467 memset(elements->DataPtr(), 0, elements->DataSize());
3472 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3473 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3474 AllocationResult allocation =
3475 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3478 if (!allocation.To(&result)) return allocation;
3481 Address address = result->address();
3482 // Code objects which should stay at a fixed address are allocated either
3483 // in the first page of code space (objects on the first page of each space
3484 // are never moved) or in large object space.
3485 if (!code_space_->FirstPage()->Contains(address) &&
3486 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3487 // Discard the first code allocation, which was on a page where it could
3489 CreateFillerObjectAt(result->address(), object_size);
3490 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3491 if (!allocation.To(&result)) return allocation;
3492 OnAllocationEvent(result, object_size);
3496 result->set_map_no_write_barrier(code_map());
3497 Code* code = Code::cast(result);
3498 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3499 isolate_->code_range()->contains(code->address()));
3500 code->set_gc_metadata(Smi::FromInt(0));
3501 code->set_ic_age(global_ic_age_);
3506 AllocationResult Heap::CopyCode(Code* code) {
3507 AllocationResult allocation;
3508 HeapObject* new_constant_pool;
3509 if (FLAG_enable_ool_constant_pool &&
3510 code->constant_pool() != empty_constant_pool_array()) {
3511 // Copy the constant pool, since edits to the copied code may modify
3512 // the constant pool.
3513 allocation = CopyConstantPoolArray(code->constant_pool());
3514 if (!allocation.To(&new_constant_pool)) return allocation;
3516 new_constant_pool = empty_constant_pool_array();
3520 // Allocate an object the same size as the code object.
3521 int obj_size = code->Size();
3522 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3523 if (!allocation.To(&result)) return allocation;
3525 // Copy code object.
3526 Address old_addr = code->address();
3527 Address new_addr = result->address();
3528 CopyBlock(new_addr, old_addr, obj_size);
3529 Code* new_code = Code::cast(result);
3531 // Update the constant pool.
3532 new_code->set_constant_pool(new_constant_pool);
3534 // Relocate the copy.
3535 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3536 isolate_->code_range()->contains(code->address()));
3537 new_code->Relocate(new_addr - old_addr);
3542 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3543 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3544 // do not risk leaving uninitialized Code object (and breaking the heap).
3545 ByteArray* reloc_info_array;
3547 AllocationResult allocation =
3548 AllocateByteArray(reloc_info.length(), TENURED);
3549 if (!allocation.To(&reloc_info_array)) return allocation;
3551 HeapObject* new_constant_pool;
3552 if (FLAG_enable_ool_constant_pool &&
3553 code->constant_pool() != empty_constant_pool_array()) {
3554 // Copy the constant pool, since edits to the copied code may modify
3555 // the constant pool.
3556 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3557 if (!allocation.To(&new_constant_pool)) return allocation;
3559 new_constant_pool = empty_constant_pool_array();
3562 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3564 int new_obj_size = Code::SizeFor(new_body_size);
3566 Address old_addr = code->address();
3568 size_t relocation_offset =
3569 static_cast<size_t>(code->instruction_end() - old_addr);
3572 AllocationResult allocation =
3573 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3574 if (!allocation.To(&result)) return allocation;
3576 // Copy code object.
3577 Address new_addr = result->address();
3579 // Copy header and instructions.
3580 CopyBytes(new_addr, old_addr, relocation_offset);
3582 Code* new_code = Code::cast(result);
3583 new_code->set_relocation_info(reloc_info_array);
3585 // Update constant pool.
3586 new_code->set_constant_pool(new_constant_pool);
3588 // Copy patched rinfo.
3589 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3590 static_cast<size_t>(reloc_info.length()));
3592 // Relocate the copy.
3593 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3594 isolate_->code_range()->contains(code->address()));
3595 new_code->Relocate(new_addr - old_addr);
3598 if (FLAG_verify_heap) code->ObjectVerify();
3604 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3605 AllocationSite* allocation_site) {
3606 memento->set_map_no_write_barrier(allocation_memento_map());
3607 DCHECK(allocation_site->map() == allocation_site_map());
3608 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3609 if (FLAG_allocation_site_pretenuring) {
3610 allocation_site->IncrementMementoCreateCount();
3615 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3616 AllocationSite* allocation_site) {
3617 DCHECK(gc_state_ == NOT_IN_GC);
3618 DCHECK(map->instance_type() != MAP_TYPE);
3619 // If allocation failures are disallowed, we may allocate in a different
3620 // space when new space is full and the object is not a large object.
3621 AllocationSpace retry_space =
3622 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3623 int size = map->instance_size();
3624 if (allocation_site != NULL) {
3625 size += AllocationMemento::kSize;
3628 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3629 if (!allocation.To(&result)) return allocation;
3630 // No need for write barrier since object is white and map is in old space.
3631 result->set_map_no_write_barrier(map);
3632 if (allocation_site != NULL) {
3633 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3634 reinterpret_cast<Address>(result) + map->instance_size());
3635 InitializeAllocationMemento(alloc_memento, allocation_site);
3641 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3643 obj->set_properties(properties);
3644 obj->initialize_elements();
3645 // TODO(1240798): Initialize the object's body using valid initial values
3646 // according to the object's initial map. For example, if the map's
3647 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3648 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3649 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3650 // verification code has to cope with (temporarily) invalid objects. See
3651 // for example, JSArray::JSArrayVerify).
3653 // We cannot always fill with one_pointer_filler_map because objects
3654 // created from API functions expect their internal fields to be initialized
3655 // with undefined_value.
3656 // Pre-allocated fields need to be initialized with undefined_value as well
3657 // so that object accesses before the constructor completes (e.g. in the
3658 // debugger) will not cause a crash.
3659 if (map->constructor()->IsJSFunction() &&
3660 JSFunction::cast(map->constructor())
3661 ->IsInobjectSlackTrackingInProgress()) {
3662 // We might want to shrink the object later.
3663 DCHECK(obj->GetInternalFieldCount() == 0);
3664 filler = Heap::one_pointer_filler_map();
3666 filler = Heap::undefined_value();
3668 obj->InitializeBody(map, Heap::undefined_value(), filler);
3672 AllocationResult Heap::AllocateJSObjectFromMap(
3673 Map* map, PretenureFlag pretenure, bool allocate_properties,
3674 AllocationSite* allocation_site) {
3675 // JSFunctions should be allocated using AllocateFunction to be
3676 // properly initialized.
3677 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3679 // Both types of global objects should be allocated using
3680 // AllocateGlobalObject to be properly initialized.
3681 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3682 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3684 // Allocate the backing storage for the properties.
3685 FixedArray* properties;
3686 if (allocate_properties) {
3687 int prop_size = map->InitialPropertiesLength();
3688 DCHECK(prop_size >= 0);
3690 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3691 if (!allocation.To(&properties)) return allocation;
3694 properties = empty_fixed_array();
3697 // Allocate the JSObject.
3698 int size = map->instance_size();
3699 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3701 AllocationResult allocation = Allocate(map, space, allocation_site);
3702 if (!allocation.To(&js_obj)) return allocation;
3704 // Initialize the JSObject.
3705 InitializeJSObjectFromMap(js_obj, properties, map);
3706 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3707 js_obj->HasFixedTypedArrayElements());
3712 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3713 PretenureFlag pretenure,
3714 AllocationSite* allocation_site) {
3715 DCHECK(constructor->has_initial_map());
3717 // Allocate the object based on the constructors initial map.
3718 AllocationResult allocation = AllocateJSObjectFromMap(
3719 constructor->initial_map(), pretenure, true, allocation_site);
3721 // Make sure result is NOT a global object if valid.
3723 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3729 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3730 // Never used to copy functions. If functions need to be copied we
3731 // have to be careful to clear the literals array.
3732 SLOW_DCHECK(!source->IsJSFunction());
3735 Map* map = source->map();
3736 int object_size = map->instance_size();
3739 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3741 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3743 // If we're forced to always allocate, we use the general allocation
3744 // functions which may leave us with an object in old space.
3745 if (always_allocate()) {
3747 AllocationResult allocation =
3748 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3749 if (!allocation.To(&clone)) return allocation;
3751 Address clone_address = clone->address();
3752 CopyBlock(clone_address, source->address(), object_size);
3753 // Update write barrier for all fields that lie beyond the header.
3754 RecordWrites(clone_address, JSObject::kHeaderSize,
3755 (object_size - JSObject::kHeaderSize) / kPointerSize);
3757 wb_mode = SKIP_WRITE_BARRIER;
3760 int adjusted_object_size =
3761 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3762 AllocationResult allocation =
3763 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3764 if (!allocation.To(&clone)) return allocation;
3766 SLOW_DCHECK(InNewSpace(clone));
3767 // Since we know the clone is allocated in new space, we can copy
3768 // the contents without worrying about updating the write barrier.
3769 CopyBlock(clone->address(), source->address(), object_size);
3772 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3773 reinterpret_cast<Address>(clone) + object_size);
3774 InitializeAllocationMemento(alloc_memento, site);
3778 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3779 source->GetElementsKind());
3780 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3781 FixedArray* properties = FixedArray::cast(source->properties());
3782 // Update elements if necessary.
3783 if (elements->length() > 0) {
3784 FixedArrayBase* elem;
3786 AllocationResult allocation;
3787 if (elements->map() == fixed_cow_array_map()) {
3788 allocation = FixedArray::cast(elements);
3789 } else if (source->HasFastDoubleElements()) {
3790 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3792 allocation = CopyFixedArray(FixedArray::cast(elements));
3794 if (!allocation.To(&elem)) return allocation;
3796 JSObject::cast(clone)->set_elements(elem, wb_mode);
3798 // Update properties if necessary.
3799 if (properties->length() > 0) {
3802 AllocationResult allocation = CopyFixedArray(properties);
3803 if (!allocation.To(&prop)) return allocation;
3805 JSObject::cast(clone)->set_properties(prop, wb_mode);
3807 // Return the new clone.
3812 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3814 // Only works for ascii.
3815 DCHECK(vector.length() == len);
3816 MemCopy(chars, vector.start(), len);
3819 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3821 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3822 unsigned stream_length = vector.length();
3823 while (stream_length != 0) {
3824 unsigned consumed = 0;
3825 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3826 DCHECK(c != unibrow::Utf8::kBadChar);
3827 DCHECK(consumed <= stream_length);
3828 stream_length -= consumed;
3830 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3833 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3834 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3841 DCHECK(stream_length == 0);
3846 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3847 DCHECK(s->length() == len);
3848 String::WriteToFlat(s, chars, 0, len);
3852 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3853 DCHECK(s->length() == len);
3854 String::WriteToFlat(s, chars, 0, len);
3858 template <bool is_one_byte, typename T>
3859 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3860 uint32_t hash_field) {
3862 // Compute map and object size.
3866 DCHECK_LE(0, chars);
3867 DCHECK_GE(String::kMaxLength, chars);
3869 map = ascii_internalized_string_map();
3870 size = SeqOneByteString::SizeFor(chars);
3872 map = internalized_string_map();
3873 size = SeqTwoByteString::SizeFor(chars);
3875 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3880 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3881 if (!allocation.To(&result)) return allocation;
3884 result->set_map_no_write_barrier(map);
3885 // Set length and hash fields of the allocated string.
3886 String* answer = String::cast(result);
3887 answer->set_length(chars);
3888 answer->set_hash_field(hash_field);
3890 DCHECK_EQ(size, answer->Size());
3893 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3895 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3901 // Need explicit instantiations.
3902 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3905 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3908 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3909 Vector<const char>, int, uint32_t);
3912 AllocationResult Heap::AllocateRawOneByteString(int length,
3913 PretenureFlag pretenure) {
3914 DCHECK_LE(0, length);
3915 DCHECK_GE(String::kMaxLength, length);
3916 int size = SeqOneByteString::SizeFor(length);
3917 DCHECK(size <= SeqOneByteString::kMaxSize);
3918 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3922 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3923 if (!allocation.To(&result)) return allocation;
3926 // Partially initialize the object.
3927 result->set_map_no_write_barrier(ascii_string_map());
3928 String::cast(result)->set_length(length);
3929 String::cast(result)->set_hash_field(String::kEmptyHashField);
3930 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3936 AllocationResult Heap::AllocateRawTwoByteString(int length,
3937 PretenureFlag pretenure) {
3938 DCHECK_LE(0, length);
3939 DCHECK_GE(String::kMaxLength, length);
3940 int size = SeqTwoByteString::SizeFor(length);
3941 DCHECK(size <= SeqTwoByteString::kMaxSize);
3942 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3946 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3947 if (!allocation.To(&result)) return allocation;
3950 // Partially initialize the object.
3951 result->set_map_no_write_barrier(string_map());
3952 String::cast(result)->set_length(length);
3953 String::cast(result)->set_hash_field(String::kEmptyHashField);
3954 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3959 AllocationResult Heap::AllocateEmptyFixedArray() {
3960 int size = FixedArray::SizeFor(0);
3963 AllocationResult allocation =
3964 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3965 if (!allocation.To(&result)) return allocation;
3967 // Initialize the object.
3968 result->set_map_no_write_barrier(fixed_array_map());
3969 FixedArray::cast(result)->set_length(0);
3974 AllocationResult Heap::AllocateEmptyExternalArray(
3975 ExternalArrayType array_type) {
3976 return AllocateExternalArray(0, array_type, NULL, TENURED);
3980 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3981 if (!InNewSpace(src)) {
3985 int len = src->length();
3988 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3989 if (!allocation.To(&obj)) return allocation;
3991 obj->set_map_no_write_barrier(fixed_array_map());
3992 FixedArray* result = FixedArray::cast(obj);
3993 result->set_length(len);
3996 DisallowHeapAllocation no_gc;
3997 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3998 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4000 // TODO(mvstanton): The map is set twice because of protection against calling
4001 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4002 // we might then be able to remove this whole method.
4003 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4008 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4009 ExternalArrayType array_type) {
4010 return AllocateFixedTypedArray(0, array_type, TENURED);
4014 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4015 int len = src->length();
4018 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4019 if (!allocation.To(&obj)) return allocation;
4021 if (InNewSpace(obj)) {
4022 obj->set_map_no_write_barrier(map);
4023 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4024 FixedArray::SizeFor(len) - kPointerSize);
4027 obj->set_map_no_write_barrier(map);
4028 FixedArray* result = FixedArray::cast(obj);
4029 result->set_length(len);
4032 DisallowHeapAllocation no_gc;
4033 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4034 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4039 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4041 int len = src->length();
4044 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4045 if (!allocation.To(&obj)) return allocation;
4047 obj->set_map_no_write_barrier(map);
4048 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4049 src->address() + FixedDoubleArray::kLengthOffset,
4050 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4055 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4058 if (src->is_extended_layout()) {
4059 ConstantPoolArray::NumberOfEntries small(src,
4060 ConstantPoolArray::SMALL_SECTION);
4061 ConstantPoolArray::NumberOfEntries extended(
4062 src, ConstantPoolArray::EXTENDED_SECTION);
4063 AllocationResult allocation =
4064 AllocateExtendedConstantPoolArray(small, extended);
4065 if (!allocation.To(&obj)) return allocation;
4067 ConstantPoolArray::NumberOfEntries small(src,
4068 ConstantPoolArray::SMALL_SECTION);
4069 AllocationResult allocation = AllocateConstantPoolArray(small);
4070 if (!allocation.To(&obj)) return allocation;
4072 obj->set_map_no_write_barrier(map);
4073 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4074 src->address() + ConstantPoolArray::kFirstEntryOffset,
4075 src->size() - ConstantPoolArray::kFirstEntryOffset);
4080 AllocationResult Heap::AllocateRawFixedArray(int length,
4081 PretenureFlag pretenure) {
4082 if (length < 0 || length > FixedArray::kMaxLength) {
4083 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4085 int size = FixedArray::SizeFor(length);
4086 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4088 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4092 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4093 PretenureFlag pretenure,
4095 DCHECK(length >= 0);
4096 DCHECK(empty_fixed_array()->IsFixedArray());
4097 if (length == 0) return empty_fixed_array();
4099 DCHECK(!InNewSpace(filler));
4102 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4103 if (!allocation.To(&result)) return allocation;
4106 result->set_map_no_write_barrier(fixed_array_map());
4107 FixedArray* array = FixedArray::cast(result);
4108 array->set_length(length);
4109 MemsetPointer(array->data_start(), filler, length);
4114 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4115 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4119 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4120 if (length == 0) return empty_fixed_array();
4124 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4125 if (!allocation.To(&obj)) return allocation;
4128 obj->set_map_no_write_barrier(fixed_array_map());
4129 FixedArray::cast(obj)->set_length(length);
4134 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4135 int length, PretenureFlag pretenure) {
4136 if (length == 0) return empty_fixed_array();
4138 HeapObject* elements;
4139 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4140 if (!allocation.To(&elements)) return allocation;
4142 elements->set_map_no_write_barrier(fixed_double_array_map());
4143 FixedDoubleArray::cast(elements)->set_length(length);
4148 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4149 PretenureFlag pretenure) {
4150 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4151 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4153 int size = FixedDoubleArray::SizeFor(length);
4154 #ifndef V8_HOST_ARCH_64_BIT
4155 size += kPointerSize;
4157 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4161 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4162 if (!allocation.To(&object)) return allocation;
4165 return EnsureDoubleAligned(this, object, size);
4169 AllocationResult Heap::AllocateConstantPoolArray(
4170 const ConstantPoolArray::NumberOfEntries& small) {
4171 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4172 int size = ConstantPoolArray::SizeFor(small);
4173 #ifndef V8_HOST_ARCH_64_BIT
4174 size += kPointerSize;
4176 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4180 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4181 if (!allocation.To(&object)) return allocation;
4183 object = EnsureDoubleAligned(this, object, size);
4184 object->set_map_no_write_barrier(constant_pool_array_map());
4186 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4187 constant_pool->Init(small);
4188 constant_pool->ClearPtrEntries(isolate());
4189 return constant_pool;
4193 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4194 const ConstantPoolArray::NumberOfEntries& small,
4195 const ConstantPoolArray::NumberOfEntries& extended) {
4196 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4197 CHECK(extended.are_in_range(0, kMaxInt));
4198 int size = ConstantPoolArray::SizeForExtended(small, extended);
4199 #ifndef V8_HOST_ARCH_64_BIT
4200 size += kPointerSize;
4202 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4206 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4207 if (!allocation.To(&object)) return allocation;
4209 object = EnsureDoubleAligned(this, object, size);
4210 object->set_map_no_write_barrier(constant_pool_array_map());
4212 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4213 constant_pool->InitExtended(small, extended);
4214 constant_pool->ClearPtrEntries(isolate());
4215 return constant_pool;
4219 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4220 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4221 int size = ConstantPoolArray::SizeFor(small);
4224 AllocationResult allocation =
4225 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4226 if (!allocation.To(&result)) return allocation;
4228 result->set_map_no_write_barrier(constant_pool_array_map());
4229 ConstantPoolArray::cast(result)->Init(small);
4234 AllocationResult Heap::AllocateSymbol() {
4235 // Statically ensure that it is safe to allocate symbols in paged spaces.
4236 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4239 AllocationResult allocation =
4240 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4241 if (!allocation.To(&result)) return allocation;
4243 result->set_map_no_write_barrier(symbol_map());
4245 // Generate a random hash value.
4249 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4251 } while (hash == 0 && attempts < 30);
4252 if (hash == 0) hash = 1; // never return 0
4254 Symbol::cast(result)
4255 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4256 Symbol::cast(result)->set_name(undefined_value());
4257 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4259 DCHECK(!Symbol::cast(result)->is_private());
4264 AllocationResult Heap::AllocateStruct(InstanceType type) {
4267 #define MAKE_CASE(NAME, Name, name) \
4269 map = name##_map(); \
4271 STRUCT_LIST(MAKE_CASE)
4277 int size = map->instance_size();
4278 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4281 AllocationResult allocation = Allocate(map, space);
4282 if (!allocation.To(&result)) return allocation;
4284 result->InitializeBody(size);
4289 bool Heap::IsHeapIterable() {
4290 // TODO(hpayer): This function is not correct. Allocation folding in old
4291 // space breaks the iterability.
4292 return (old_pointer_space()->swept_precisely() &&
4293 old_data_space()->swept_precisely() &&
4294 new_space_top_after_last_gc_ == new_space()->top());
4298 void Heap::MakeHeapIterable() {
4299 DCHECK(AllowHeapAllocation::IsAllowed());
4300 if (!IsHeapIterable()) {
4301 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4303 if (mark_compact_collector()->sweeping_in_progress()) {
4304 mark_compact_collector()->EnsureSweepingCompleted();
4306 DCHECK(IsHeapIterable());
4310 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4311 incremental_marking()->Step(step_size,
4312 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4314 if (incremental_marking()->IsComplete()) {
4315 bool uncommit = false;
4316 if (gc_count_at_last_idle_gc_ == gc_count_) {
4317 // No GC since the last full GC, the mutator is probably not active.
4318 isolate_->compilation_cache()->Clear();
4321 CollectAllGarbage(kReduceMemoryFootprintMask,
4322 "idle notification: finalize incremental");
4323 mark_sweeps_since_idle_round_started_++;
4324 gc_count_at_last_idle_gc_ = gc_count_;
4326 new_space_.Shrink();
4327 UncommitFromSpace();
4333 bool Heap::IdleNotification(int hint) {
4334 // If incremental marking is off, we do not perform idle notification.
4335 if (!FLAG_incremental_marking) return true;
4337 // Hints greater than this value indicate that
4338 // the embedder is requesting a lot of GC work.
4339 const int kMaxHint = 1000;
4340 const int kMinHintForIncrementalMarking = 10;
4341 // Minimal hint that allows to do full GC.
4342 const int kMinHintForFullGC = 100;
4343 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
4344 // The size factor is in range [5..250]. The numbers here are chosen from
4345 // experiments. If you changes them, make sure to test with
4346 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
4347 intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
4349 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
4350 HistogramTimerScope idle_notification_scope(
4351 isolate_->counters()->gc_idle_notification());
4353 if (contexts_disposed_ > 0) {
4354 contexts_disposed_ = 0;
4355 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
4356 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
4357 incremental_marking()->IsStopped()) {
4358 HistogramTimerScope scope(isolate_->counters()->gc_context());
4359 CollectAllGarbage(kReduceMemoryFootprintMask,
4360 "idle notification: contexts disposed");
4362 AdvanceIdleIncrementalMarking(step_size);
4365 // After context disposal there is likely a lot of garbage remaining, reset
4366 // the idle notification counters in order to trigger more incremental GCs
4367 // on subsequent idle notifications.
4372 // By doing small chunks of GC work in each IdleNotification,
4373 // perform a round of incremental GCs and after that wait until
4374 // the mutator creates enough garbage to justify a new round.
4375 // An incremental GC progresses as follows:
4376 // 1. many incremental marking steps,
4377 // 2. one old space mark-sweep-compact,
4378 // Use mark-sweep-compact events to count incremental GCs in a round.
4380 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4381 if (EnoughGarbageSinceLastIdleRound()) {
4388 int remaining_mark_sweeps =
4389 kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_;
4391 if (incremental_marking()->IsStopped()) {
4392 // If there are no more than two GCs left in this idle round and we are
4393 // allowed to do a full GC, then make those GCs full in order to compact
4395 // TODO(ulan): Once we enable code compaction for incremental marking,
4396 // we can get rid of this special case and always start incremental marking.
4397 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
4398 CollectAllGarbage(kReduceMemoryFootprintMask,
4399 "idle notification: finalize idle round");
4400 mark_sweeps_since_idle_round_started_++;
4401 } else if (hint > kMinHintForIncrementalMarking) {
4402 incremental_marking()->Start();
4405 if (!incremental_marking()->IsStopped() &&
4406 hint > kMinHintForIncrementalMarking) {
4407 AdvanceIdleIncrementalMarking(step_size);
4410 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4415 // If the IdleNotifcation is called with a large hint we will wait for
4416 // the sweepter threads here.
4417 if (hint >= kMinHintForFullGC &&
4418 mark_compact_collector()->sweeping_in_progress()) {
4419 mark_compact_collector()->EnsureSweepingCompleted();
4428 void Heap::Print() {
4429 if (!HasBeenSetUp()) return;
4430 isolate()->PrintStack(stdout);
4431 AllSpaces spaces(this);
4432 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4438 void Heap::ReportCodeStatistics(const char* title) {
4439 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4440 PagedSpace::ResetCodeStatistics(isolate());
4441 // We do not look for code in new space, map space, or old space. If code
4442 // somehow ends up in those spaces, we would miss it here.
4443 code_space_->CollectCodeStatistics();
4444 lo_space_->CollectCodeStatistics();
4445 PagedSpace::ReportCodeStatistics(isolate());
4449 // This function expects that NewSpace's allocated objects histogram is
4450 // populated (via a call to CollectStatistics or else as a side effect of a
4451 // just-completed scavenge collection).
4452 void Heap::ReportHeapStatistics(const char* title) {
4454 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4456 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4457 old_generation_allocation_limit_);
4460 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4461 isolate_->global_handles()->PrintStats();
4464 PrintF("Heap statistics : ");
4465 isolate_->memory_allocator()->ReportStatistics();
4466 PrintF("To space : ");
4467 new_space_.ReportStatistics();
4468 PrintF("Old pointer space : ");
4469 old_pointer_space_->ReportStatistics();
4470 PrintF("Old data space : ");
4471 old_data_space_->ReportStatistics();
4472 PrintF("Code space : ");
4473 code_space_->ReportStatistics();
4474 PrintF("Map space : ");
4475 map_space_->ReportStatistics();
4476 PrintF("Cell space : ");
4477 cell_space_->ReportStatistics();
4478 PrintF("PropertyCell space : ");
4479 property_cell_space_->ReportStatistics();
4480 PrintF("Large object space : ");
4481 lo_space_->ReportStatistics();
4482 PrintF(">>>>>> ========================================= >>>>>>\n");
4487 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4490 bool Heap::Contains(Address addr) {
4491 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4492 return HasBeenSetUp() &&
4493 (new_space_.ToSpaceContains(addr) ||
4494 old_pointer_space_->Contains(addr) ||
4495 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4496 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4497 property_cell_space_->Contains(addr) ||
4498 lo_space_->SlowContains(addr));
4502 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4503 return InSpace(value->address(), space);
4507 bool Heap::InSpace(Address addr, AllocationSpace space) {
4508 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4509 if (!HasBeenSetUp()) return false;
4513 return new_space_.ToSpaceContains(addr);
4514 case OLD_POINTER_SPACE:
4515 return old_pointer_space_->Contains(addr);
4516 case OLD_DATA_SPACE:
4517 return old_data_space_->Contains(addr);
4519 return code_space_->Contains(addr);
4521 return map_space_->Contains(addr);
4523 return cell_space_->Contains(addr);
4524 case PROPERTY_CELL_SPACE:
4525 return property_cell_space_->Contains(addr);
4527 return lo_space_->SlowContains(addr);
4537 void Heap::Verify() {
4538 CHECK(HasBeenSetUp());
4539 HandleScope scope(isolate());
4541 store_buffer()->Verify();
4543 if (mark_compact_collector()->sweeping_in_progress()) {
4544 // We have to wait here for the sweeper threads to have an iterable heap.
4545 mark_compact_collector()->EnsureSweepingCompleted();
4548 VerifyPointersVisitor visitor;
4549 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4551 VerifySmisVisitor smis_visitor;
4552 IterateSmiRoots(&smis_visitor);
4554 new_space_.Verify();
4556 old_pointer_space_->Verify(&visitor);
4557 map_space_->Verify(&visitor);
4559 VerifyPointersVisitor no_dirty_regions_visitor;
4560 old_data_space_->Verify(&no_dirty_regions_visitor);
4561 code_space_->Verify(&no_dirty_regions_visitor);
4562 cell_space_->Verify(&no_dirty_regions_visitor);
4563 property_cell_space_->Verify(&no_dirty_regions_visitor);
4565 lo_space_->Verify();
4570 void Heap::ZapFromSpace() {
4571 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4572 new_space_.FromSpaceEnd());
4573 while (it.has_next()) {
4574 NewSpacePage* page = it.next();
4575 for (Address cursor = page->area_start(), limit = page->area_end();
4576 cursor < limit; cursor += kPointerSize) {
4577 Memory::Address_at(cursor) = kFromSpaceZapValue;
4583 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4584 ObjectSlotCallback callback) {
4585 Address slot_address = start;
4587 // We are not collecting slots on new space objects during mutation
4588 // thus we have to scan for pointers to evacuation candidates when we
4589 // promote objects. But we should not record any slots in non-black
4590 // objects. Grey object's slots would be rescanned.
4591 // White object might not survive until the end of collection
4592 // it would be a violation of the invariant to record it's slots.
4593 bool record_slots = false;
4594 if (incremental_marking()->IsCompacting()) {
4595 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4596 record_slots = Marking::IsBlack(mark_bit);
4599 while (slot_address < end) {
4600 Object** slot = reinterpret_cast<Object**>(slot_address);
4601 Object* object = *slot;
4602 // If the store buffer becomes overfull we mark pages as being exempt from
4603 // the store buffer. These pages are scanned to find pointers that point
4604 // to the new space. In that case we may hit newly promoted objects and
4605 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4606 if (object->IsHeapObject()) {
4607 if (Heap::InFromSpace(object)) {
4608 callback(reinterpret_cast<HeapObject**>(slot),
4609 HeapObject::cast(object));
4610 Object* new_object = *slot;
4611 if (InNewSpace(new_object)) {
4612 SLOW_DCHECK(Heap::InToSpace(new_object));
4613 SLOW_DCHECK(new_object->IsHeapObject());
4614 store_buffer_.EnterDirectlyIntoStoreBuffer(
4615 reinterpret_cast<Address>(slot));
4617 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4618 } else if (record_slots &&
4619 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4620 mark_compact_collector()->RecordSlot(slot, slot, object);
4623 slot_address += kPointerSize;
4629 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4632 bool IsAMapPointerAddress(Object** addr) {
4633 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4634 int mod = a % Map::kSize;
4635 return mod >= Map::kPointerFieldsBeginOffset &&
4636 mod < Map::kPointerFieldsEndOffset;
4640 bool EverythingsAPointer(Object** addr) { return true; }
4643 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4644 Object**** store_buffer_position,
4645 Object*** store_buffer_top,
4646 CheckStoreBufferFilter filter,
4647 Address special_garbage_start,
4648 Address special_garbage_end) {
4649 Map* free_space_map = heap->free_space_map();
4650 for (; current < limit; current++) {
4651 Object* o = *current;
4652 Address current_address = reinterpret_cast<Address>(current);
4654 if (o == free_space_map) {
4655 Address current_address = reinterpret_cast<Address>(current);
4656 FreeSpace* free_space =
4657 FreeSpace::cast(HeapObject::FromAddress(current_address));
4658 int skip = free_space->Size();
4659 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4661 current_address += skip - kPointerSize;
4662 current = reinterpret_cast<Object**>(current_address);
4665 // Skip the current linear allocation space between top and limit which is
4666 // unmarked with the free space map, but can contain junk.
4667 if (current_address == special_garbage_start &&
4668 special_garbage_end != special_garbage_start) {
4669 current_address = special_garbage_end - kPointerSize;
4670 current = reinterpret_cast<Object**>(current_address);
4673 if (!(*filter)(current)) continue;
4674 DCHECK(current_address < special_garbage_start ||
4675 current_address >= special_garbage_end);
4676 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4677 // We have to check that the pointer does not point into new space
4678 // without trying to cast it to a heap object since the hash field of
4679 // a string can contain values like 1 and 3 which are tagged null
4681 if (!heap->InNewSpace(o)) continue;
4682 while (**store_buffer_position < current &&
4683 *store_buffer_position < store_buffer_top) {
4684 (*store_buffer_position)++;
4686 if (**store_buffer_position != current ||
4687 *store_buffer_position == store_buffer_top) {
4688 Object** obj_start = current;
4689 while (!(*obj_start)->IsMap()) obj_start--;
4696 // Check that the store buffer contains all intergenerational pointers by
4697 // scanning a page and ensuring that all pointers to young space are in the
4699 void Heap::OldPointerSpaceCheckStoreBuffer() {
4700 OldSpace* space = old_pointer_space();
4701 PageIterator pages(space);
4703 store_buffer()->SortUniq();
4705 while (pages.has_next()) {
4706 Page* page = pages.next();
4707 Object** current = reinterpret_cast<Object**>(page->area_start());
4709 Address end = page->area_end();
4711 Object*** store_buffer_position = store_buffer()->Start();
4712 Object*** store_buffer_top = store_buffer()->Top();
4714 Object** limit = reinterpret_cast<Object**>(end);
4715 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4716 store_buffer_top, &EverythingsAPointer, space->top(),
4722 void Heap::MapSpaceCheckStoreBuffer() {
4723 MapSpace* space = map_space();
4724 PageIterator pages(space);
4726 store_buffer()->SortUniq();
4728 while (pages.has_next()) {
4729 Page* page = pages.next();
4730 Object** current = reinterpret_cast<Object**>(page->area_start());
4732 Address end = page->area_end();
4734 Object*** store_buffer_position = store_buffer()->Start();
4735 Object*** store_buffer_top = store_buffer()->Top();
4737 Object** limit = reinterpret_cast<Object**>(end);
4738 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4739 store_buffer_top, &IsAMapPointerAddress, space->top(),
4745 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4746 LargeObjectIterator it(lo_space());
4747 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4748 // We only have code, sequential strings, or fixed arrays in large
4749 // object space, and only fixed arrays can possibly contain pointers to
4750 // the young generation.
4751 if (object->IsFixedArray()) {
4752 Object*** store_buffer_position = store_buffer()->Start();
4753 Object*** store_buffer_top = store_buffer()->Top();
4754 Object** current = reinterpret_cast<Object**>(object->address());
4756 reinterpret_cast<Object**>(object->address() + object->Size());
4757 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4758 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4765 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4766 IterateStrongRoots(v, mode);
4767 IterateWeakRoots(v, mode);
4771 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4772 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4773 v->Synchronize(VisitorSynchronization::kStringTable);
4774 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4775 // Scavenge collections have special processing for this.
4776 external_string_table_.Iterate(v);
4778 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4782 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4783 // Acquire execution access since we are going to read stack limit values.
4784 ExecutionAccess access(isolate());
4785 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4786 v->Synchronize(VisitorSynchronization::kSmiRootList);
4790 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4791 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4792 v->Synchronize(VisitorSynchronization::kStrongRootList);
4794 v->VisitPointer(BitCast<Object**>(&hidden_string_));
4795 v->Synchronize(VisitorSynchronization::kInternalizedString);
4797 isolate_->bootstrapper()->Iterate(v);
4798 v->Synchronize(VisitorSynchronization::kBootstrapper);
4799 isolate_->Iterate(v);
4800 v->Synchronize(VisitorSynchronization::kTop);
4801 Relocatable::Iterate(isolate_, v);
4802 v->Synchronize(VisitorSynchronization::kRelocatable);
4804 if (isolate_->deoptimizer_data() != NULL) {
4805 isolate_->deoptimizer_data()->Iterate(v);
4807 v->Synchronize(VisitorSynchronization::kDebug);
4808 isolate_->compilation_cache()->Iterate(v);
4809 v->Synchronize(VisitorSynchronization::kCompilationCache);
4811 // Iterate over local handles in handle scopes.
4812 isolate_->handle_scope_implementer()->Iterate(v);
4813 isolate_->IterateDeferredHandles(v);
4814 v->Synchronize(VisitorSynchronization::kHandleScope);
4816 // Iterate over the builtin code objects and code stubs in the
4817 // heap. Note that it is not necessary to iterate over code objects
4818 // on scavenge collections.
4819 if (mode != VISIT_ALL_IN_SCAVENGE) {
4820 isolate_->builtins()->IterateBuiltins(v);
4822 v->Synchronize(VisitorSynchronization::kBuiltins);
4824 // Iterate over global handles.
4826 case VISIT_ONLY_STRONG:
4827 isolate_->global_handles()->IterateStrongRoots(v);
4829 case VISIT_ALL_IN_SCAVENGE:
4830 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4832 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4834 isolate_->global_handles()->IterateAllRoots(v);
4837 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4839 // Iterate over eternal handles.
4840 if (mode == VISIT_ALL_IN_SCAVENGE) {
4841 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4843 isolate_->eternal_handles()->IterateAllRoots(v);
4845 v->Synchronize(VisitorSynchronization::kEternalHandles);
4847 // Iterate over pointers being held by inactive threads.
4848 isolate_->thread_manager()->Iterate(v);
4849 v->Synchronize(VisitorSynchronization::kThreadManager);
4851 // Iterate over the pointers the Serialization/Deserialization code is
4853 // During garbage collection this keeps the partial snapshot cache alive.
4854 // During deserialization of the startup snapshot this creates the partial
4855 // snapshot cache and deserializes the objects it refers to. During
4856 // serialization this does nothing, since the partial snapshot cache is
4857 // empty. However the next thing we do is create the partial snapshot,
4858 // filling up the partial snapshot cache with objects it needs as we go.
4859 SerializerDeserializer::Iterate(isolate_, v);
4860 // We don't do a v->Synchronize call here, because in debug mode that will
4861 // output a flag to the snapshot. However at this point the serializer and
4862 // deserializer are deliberately a little unsynchronized (see above) so the
4863 // checking of the sync flag in the snapshot would fail.
4867 // TODO(1236194): Since the heap size is configurable on the command line
4868 // and through the API, we should gracefully handle the case that the heap
4869 // size is not big enough to fit all the initial objects.
4870 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4871 int max_executable_size, size_t code_range_size) {
4872 if (HasBeenSetUp()) return false;
4874 // Overwrite default configuration.
4875 if (max_semi_space_size > 0) {
4876 max_semi_space_size_ = max_semi_space_size * MB;
4878 if (max_old_space_size > 0) {
4879 max_old_generation_size_ = max_old_space_size * MB;
4881 if (max_executable_size > 0) {
4882 max_executable_size_ = max_executable_size * MB;
4885 // If max space size flags are specified overwrite the configuration.
4886 if (FLAG_max_semi_space_size > 0) {
4887 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4889 if (FLAG_max_old_space_size > 0) {
4890 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4892 if (FLAG_max_executable_size > 0) {
4893 max_executable_size_ = FLAG_max_executable_size * MB;
4896 if (FLAG_stress_compaction) {
4897 // This will cause more frequent GCs when stressing.
4898 max_semi_space_size_ = Page::kPageSize;
4901 if (Snapshot::HaveASnapshotToStartFrom()) {
4902 // If we are using a snapshot we always reserve the default amount
4903 // of memory for each semispace because code in the snapshot has
4904 // write-barrier code that relies on the size and alignment of new
4905 // space. We therefore cannot use a larger max semispace size
4906 // than the default reserved semispace size.
4907 if (max_semi_space_size_ > reserved_semispace_size_) {
4908 max_semi_space_size_ = reserved_semispace_size_;
4909 if (FLAG_trace_gc) {
4910 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4911 reserved_semispace_size_ >> 10);
4915 // If we are not using snapshots we reserve space for the actual
4916 // max semispace size.
4917 reserved_semispace_size_ = max_semi_space_size_;
4920 // The max executable size must be less than or equal to the max old
4922 if (max_executable_size_ > max_old_generation_size_) {
4923 max_executable_size_ = max_old_generation_size_;
4926 // The new space size must be a power of two to support single-bit testing
4928 max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
4929 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4931 if (FLAG_min_semi_space_size > 0) {
4932 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4933 if (initial_semispace_size > max_semi_space_size_) {
4934 initial_semispace_size_ = max_semi_space_size_;
4935 if (FLAG_trace_gc) {
4937 "Min semi-space size cannot be more than the maximum"
4938 "semi-space size of %d MB\n",
4939 max_semi_space_size_);
4942 initial_semispace_size_ = initial_semispace_size;
4946 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4948 // The old generation is paged and needs at least one page for each space.
4949 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4950 max_old_generation_size_ =
4951 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4952 max_old_generation_size_);
4954 // We rely on being able to allocate new arrays in paged spaces.
4955 DCHECK(Page::kMaxRegularHeapObjectSize >=
4957 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4958 AllocationMemento::kSize));
4960 code_range_size_ = code_range_size * MB;
4967 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4970 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4971 *stats->start_marker = HeapStats::kStartMarker;
4972 *stats->end_marker = HeapStats::kEndMarker;
4973 *stats->new_space_size = new_space_.SizeAsInt();
4974 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4975 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4976 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4977 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4978 *stats->old_data_space_capacity = old_data_space_->Capacity();
4979 *stats->code_space_size = code_space_->SizeOfObjects();
4980 *stats->code_space_capacity = code_space_->Capacity();
4981 *stats->map_space_size = map_space_->SizeOfObjects();
4982 *stats->map_space_capacity = map_space_->Capacity();
4983 *stats->cell_space_size = cell_space_->SizeOfObjects();
4984 *stats->cell_space_capacity = cell_space_->Capacity();
4985 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4986 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4987 *stats->lo_space_size = lo_space_->Size();
4988 isolate_->global_handles()->RecordStats(stats);
4989 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4990 *stats->memory_allocator_capacity =
4991 isolate()->memory_allocator()->Size() +
4992 isolate()->memory_allocator()->Available();
4993 *stats->os_error = base::OS::GetLastError();
4994 isolate()->memory_allocator()->Available();
4995 if (take_snapshot) {
4996 HeapIterator iterator(this);
4997 for (HeapObject* obj = iterator.next(); obj != NULL;
4998 obj = iterator.next()) {
4999 InstanceType type = obj->map()->instance_type();
5000 DCHECK(0 <= type && type <= LAST_TYPE);
5001 stats->objects_per_type[type]++;
5002 stats->size_per_type[type] += obj->Size();
5008 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5009 return old_pointer_space_->SizeOfObjects() +
5010 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5011 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5012 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5016 int64_t Heap::PromotedExternalMemorySize() {
5017 if (amount_of_external_allocated_memory_ <=
5018 amount_of_external_allocated_memory_at_last_global_gc_)
5020 return amount_of_external_allocated_memory_ -
5021 amount_of_external_allocated_memory_at_last_global_gc_;
5025 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5026 int freed_global_handles) {
5027 const int kMaxHandles = 1000;
5028 const int kMinHandles = 100;
5029 double min_factor = 1.1;
5030 double max_factor = 4;
5031 // We set the old generation growing factor to 2 to grow the heap slower on
5032 // memory-constrained devices.
5033 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5036 // If there are many freed global handles, then the next full GC will
5037 // likely collect a lot of garbage. Choose the heap growing factor
5038 // depending on freed global handles.
5039 // TODO(ulan, hpayer): Take into account mutator utilization.
5041 if (freed_global_handles <= kMinHandles) {
5042 factor = max_factor;
5043 } else if (freed_global_handles >= kMaxHandles) {
5044 factor = min_factor;
5046 // Compute factor using linear interpolation between points
5047 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5048 factor = max_factor -
5049 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5050 (kMaxHandles - kMinHandles);
5053 if (FLAG_stress_compaction ||
5054 mark_compact_collector()->reduce_memory_footprint_) {
5055 factor = min_factor;
5058 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5059 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5060 limit += new_space_.Capacity();
5061 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5062 return Min(limit, halfway_to_the_max);
5066 void Heap::EnableInlineAllocation() {
5067 if (!inline_allocation_disabled_) return;
5068 inline_allocation_disabled_ = false;
5070 // Update inline allocation limit for new space.
5071 new_space()->UpdateInlineAllocationLimit(0);
5075 void Heap::DisableInlineAllocation() {
5076 if (inline_allocation_disabled_) return;
5077 inline_allocation_disabled_ = true;
5079 // Update inline allocation limit for new space.
5080 new_space()->UpdateInlineAllocationLimit(0);
5082 // Update inline allocation limit for old spaces.
5083 PagedSpaces spaces(this);
5084 for (PagedSpace* space = spaces.next(); space != NULL;
5085 space = spaces.next()) {
5086 space->EmptyAllocationInfo();
5091 V8_DECLARE_ONCE(initialize_gc_once);
5093 static void InitializeGCOnce() {
5094 InitializeScavengingVisitorsTables();
5095 NewSpaceScavenger::Initialize();
5096 MarkCompactCollector::Initialize();
5100 bool Heap::SetUp() {
5102 allocation_timeout_ = FLAG_gc_interval;
5105 // Initialize heap spaces and initial maps and objects. Whenever something
5106 // goes wrong, just return false. The caller should check the results and
5107 // call Heap::TearDown() to release allocated memory.
5109 // If the heap is not yet configured (e.g. through the API), configure it.
5110 // Configuration is based on the flags new-space-size (really the semispace
5111 // size) and old-space-size if set or the initial values of semispace_size_
5112 // and old_generation_size_ otherwise.
5114 if (!ConfigureHeapDefault()) return false;
5117 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5119 MarkMapPointersAsEncoded(false);
5121 // Set up memory allocator.
5122 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5125 // Set up new space.
5126 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5129 new_space_top_after_last_gc_ = new_space()->top();
5131 // Initialize old pointer space.
5132 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5133 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5134 if (old_pointer_space_ == NULL) return false;
5135 if (!old_pointer_space_->SetUp()) return false;
5137 // Initialize old data space.
5138 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5140 if (old_data_space_ == NULL) return false;
5141 if (!old_data_space_->SetUp()) return false;
5143 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5145 // Initialize the code space, set its maximum capacity to the old
5146 // generation size. It needs executable memory.
5148 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5149 if (code_space_ == NULL) return false;
5150 if (!code_space_->SetUp()) return false;
5152 // Initialize map space.
5153 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5154 if (map_space_ == NULL) return false;
5155 if (!map_space_->SetUp()) return false;
5157 // Initialize simple cell space.
5158 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5159 if (cell_space_ == NULL) return false;
5160 if (!cell_space_->SetUp()) return false;
5162 // Initialize global property cell space.
5163 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5164 PROPERTY_CELL_SPACE);
5165 if (property_cell_space_ == NULL) return false;
5166 if (!property_cell_space_->SetUp()) return false;
5168 // The large object code space may contain code or data. We set the memory
5169 // to be non-executable here for safety, but this means we need to enable it
5170 // explicitly when allocating large code objects.
5171 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5172 if (lo_space_ == NULL) return false;
5173 if (!lo_space_->SetUp()) return false;
5175 // Set up the seed that is used to randomize the string hash function.
5176 DCHECK(hash_seed() == 0);
5177 if (FLAG_randomize_hashes) {
5178 if (FLAG_hash_seed == 0) {
5179 int rnd = isolate()->random_number_generator()->NextInt();
5180 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5182 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5186 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5187 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5189 store_buffer()->SetUp();
5191 mark_compact_collector()->SetUp();
5197 bool Heap::CreateHeapObjects() {
5198 // Create initial maps.
5199 if (!CreateInitialMaps()) return false;
5202 // Create initial objects
5203 CreateInitialObjects();
5204 CHECK_EQ(0, gc_count_);
5206 set_native_contexts_list(undefined_value());
5207 set_array_buffers_list(undefined_value());
5208 set_allocation_sites_list(undefined_value());
5209 weak_object_to_code_table_ = undefined_value();
5214 void Heap::SetStackLimits() {
5215 DCHECK(isolate_ != NULL);
5216 DCHECK(isolate_ == isolate());
5217 // On 64 bit machines, pointers are generally out of range of Smis. We write
5218 // something that looks like an out of range Smi to the GC.
5220 // Set up the special root array entries containing the stack limits.
5221 // These are actually addresses, but the tag makes the GC ignore it.
5222 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5223 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5224 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5225 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5229 void Heap::TearDown() {
5231 if (FLAG_verify_heap) {
5236 UpdateMaximumCommitted();
5238 if (FLAG_print_cumulative_gc_stat) {
5240 PrintF("gc_count=%d ", gc_count_);
5241 PrintF("mark_sweep_count=%d ", ms_count_);
5242 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5243 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5244 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5245 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5246 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5247 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5251 if (FLAG_print_max_heap_committed) {
5253 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5254 MaximumCommittedMemory());
5255 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5256 new_space_.MaximumCommittedMemory());
5257 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5258 old_data_space_->MaximumCommittedMemory());
5259 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5260 old_pointer_space_->MaximumCommittedMemory());
5261 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5262 old_pointer_space_->MaximumCommittedMemory());
5263 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5264 code_space_->MaximumCommittedMemory());
5265 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5266 map_space_->MaximumCommittedMemory());
5267 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5268 cell_space_->MaximumCommittedMemory());
5269 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5270 property_cell_space_->MaximumCommittedMemory());
5271 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5272 lo_space_->MaximumCommittedMemory());
5276 if (FLAG_verify_predictable) {
5277 PrintAlloctionsHash();
5280 TearDownArrayBuffers();
5282 isolate_->global_handles()->TearDown();
5284 external_string_table_.TearDown();
5286 mark_compact_collector()->TearDown();
5288 new_space_.TearDown();
5290 if (old_pointer_space_ != NULL) {
5291 old_pointer_space_->TearDown();
5292 delete old_pointer_space_;
5293 old_pointer_space_ = NULL;
5296 if (old_data_space_ != NULL) {
5297 old_data_space_->TearDown();
5298 delete old_data_space_;
5299 old_data_space_ = NULL;
5302 if (code_space_ != NULL) {
5303 code_space_->TearDown();
5308 if (map_space_ != NULL) {
5309 map_space_->TearDown();
5314 if (cell_space_ != NULL) {
5315 cell_space_->TearDown();
5320 if (property_cell_space_ != NULL) {
5321 property_cell_space_->TearDown();
5322 delete property_cell_space_;
5323 property_cell_space_ = NULL;
5326 if (lo_space_ != NULL) {
5327 lo_space_->TearDown();
5332 store_buffer()->TearDown();
5333 incremental_marking()->TearDown();
5335 isolate_->memory_allocator()->TearDown();
5339 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5340 GCType gc_type, bool pass_isolate) {
5341 DCHECK(callback != NULL);
5342 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5343 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5344 return gc_prologue_callbacks_.Add(pair);
5348 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5349 DCHECK(callback != NULL);
5350 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5351 if (gc_prologue_callbacks_[i].callback == callback) {
5352 gc_prologue_callbacks_.Remove(i);
5360 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5361 GCType gc_type, bool pass_isolate) {
5362 DCHECK(callback != NULL);
5363 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5364 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5365 return gc_epilogue_callbacks_.Add(pair);
5369 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5370 DCHECK(callback != NULL);
5371 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5372 if (gc_epilogue_callbacks_[i].callback == callback) {
5373 gc_epilogue_callbacks_.Remove(i);
5381 // TODO(ishell): Find a better place for this.
5382 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5383 Handle<DependentCode> dep) {
5384 DCHECK(!InNewSpace(*obj));
5385 DCHECK(!InNewSpace(*dep));
5386 // This handle scope keeps the table handle local to this function, which
5387 // allows us to safely skip write barriers in table update operations.
5388 HandleScope scope(isolate());
5389 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5391 table = WeakHashTable::Put(table, obj, dep);
5393 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5394 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5396 set_weak_object_to_code_table(*table);
5397 DCHECK_EQ(*dep, table->Lookup(obj));
5401 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5402 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5403 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5404 return DependentCode::cast(empty_fixed_array());
5408 void Heap::EnsureWeakObjectToCodeTable() {
5409 if (!weak_object_to_code_table()->IsHashTable()) {
5410 set_weak_object_to_code_table(
5411 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5417 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5418 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5423 class PrintHandleVisitor : public ObjectVisitor {
5425 void VisitPointers(Object** start, Object** end) {
5426 for (Object** p = start; p < end; p++)
5427 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5428 reinterpret_cast<void*>(*p));
5433 void Heap::PrintHandles() {
5434 PrintF("Handles:\n");
5435 PrintHandleVisitor v;
5436 isolate_->handle_scope_implementer()->Iterate(&v);
5442 Space* AllSpaces::next() {
5443 switch (counter_++) {
5445 return heap_->new_space();
5446 case OLD_POINTER_SPACE:
5447 return heap_->old_pointer_space();
5448 case OLD_DATA_SPACE:
5449 return heap_->old_data_space();
5451 return heap_->code_space();
5453 return heap_->map_space();
5455 return heap_->cell_space();
5456 case PROPERTY_CELL_SPACE:
5457 return heap_->property_cell_space();
5459 return heap_->lo_space();
5466 PagedSpace* PagedSpaces::next() {
5467 switch (counter_++) {
5468 case OLD_POINTER_SPACE:
5469 return heap_->old_pointer_space();
5470 case OLD_DATA_SPACE:
5471 return heap_->old_data_space();
5473 return heap_->code_space();
5475 return heap_->map_space();
5477 return heap_->cell_space();
5478 case PROPERTY_CELL_SPACE:
5479 return heap_->property_cell_space();
5486 OldSpace* OldSpaces::next() {
5487 switch (counter_++) {
5488 case OLD_POINTER_SPACE:
5489 return heap_->old_pointer_space();
5490 case OLD_DATA_SPACE:
5491 return heap_->old_data_space();
5493 return heap_->code_space();
5500 SpaceIterator::SpaceIterator(Heap* heap)
5502 current_space_(FIRST_SPACE),
5507 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5509 current_space_(FIRST_SPACE),
5511 size_func_(size_func) {}
5514 SpaceIterator::~SpaceIterator() {
5515 // Delete active iterator if any.
5520 bool SpaceIterator::has_next() {
5521 // Iterate until no more spaces.
5522 return current_space_ != LAST_SPACE;
5526 ObjectIterator* SpaceIterator::next() {
5527 if (iterator_ != NULL) {
5530 // Move to the next space
5532 if (current_space_ > LAST_SPACE) {
5537 // Return iterator for the new current space.
5538 return CreateIterator();
5542 // Create an iterator for the space to iterate.
5543 ObjectIterator* SpaceIterator::CreateIterator() {
5544 DCHECK(iterator_ == NULL);
5546 switch (current_space_) {
5548 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5550 case OLD_POINTER_SPACE:
5552 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5554 case OLD_DATA_SPACE:
5555 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5558 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5561 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5564 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5566 case PROPERTY_CELL_SPACE:
5568 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5571 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5575 // Return the newly allocated iterator;
5576 DCHECK(iterator_ != NULL);
5581 class HeapObjectsFilter {
5583 virtual ~HeapObjectsFilter() {}
5584 virtual bool SkipObject(HeapObject* object) = 0;
5588 class UnreachableObjectsFilter : public HeapObjectsFilter {
5590 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5591 MarkReachableObjects();
5594 ~UnreachableObjectsFilter() {
5595 heap_->mark_compact_collector()->ClearMarkbits();
5598 bool SkipObject(HeapObject* object) {
5599 MarkBit mark_bit = Marking::MarkBitFrom(object);
5600 return !mark_bit.Get();
5604 class MarkingVisitor : public ObjectVisitor {
5606 MarkingVisitor() : marking_stack_(10) {}
5608 void VisitPointers(Object** start, Object** end) {
5609 for (Object** p = start; p < end; p++) {
5610 if (!(*p)->IsHeapObject()) continue;
5611 HeapObject* obj = HeapObject::cast(*p);
5612 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5613 if (!mark_bit.Get()) {
5615 marking_stack_.Add(obj);
5620 void TransitiveClosure() {
5621 while (!marking_stack_.is_empty()) {
5622 HeapObject* obj = marking_stack_.RemoveLast();
5628 List<HeapObject*> marking_stack_;
5631 void MarkReachableObjects() {
5632 MarkingVisitor visitor;
5633 heap_->IterateRoots(&visitor, VISIT_ALL);
5634 visitor.TransitiveClosure();
5638 DisallowHeapAllocation no_allocation_;
5642 HeapIterator::HeapIterator(Heap* heap)
5643 : make_heap_iterable_helper_(heap),
5644 no_heap_allocation_(),
5646 filtering_(HeapIterator::kNoFiltering),
5652 HeapIterator::HeapIterator(Heap* heap,
5653 HeapIterator::HeapObjectsFiltering filtering)
5654 : make_heap_iterable_helper_(heap),
5655 no_heap_allocation_(),
5657 filtering_(filtering),
5663 HeapIterator::~HeapIterator() { Shutdown(); }
5666 void HeapIterator::Init() {
5667 // Start the iteration.
5668 space_iterator_ = new SpaceIterator(heap_);
5669 switch (filtering_) {
5670 case kFilterUnreachable:
5671 filter_ = new UnreachableObjectsFilter(heap_);
5676 object_iterator_ = space_iterator_->next();
5680 void HeapIterator::Shutdown() {
5682 // Assert that in filtering mode we have iterated through all
5683 // objects. Otherwise, heap will be left in an inconsistent state.
5684 if (filtering_ != kNoFiltering) {
5685 DCHECK(object_iterator_ == NULL);
5688 // Make sure the last iterator is deallocated.
5689 delete space_iterator_;
5690 space_iterator_ = NULL;
5691 object_iterator_ = NULL;
5697 HeapObject* HeapIterator::next() {
5698 if (filter_ == NULL) return NextObject();
5700 HeapObject* obj = NextObject();
5701 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5706 HeapObject* HeapIterator::NextObject() {
5707 // No iterator means we are done.
5708 if (object_iterator_ == NULL) return NULL;
5710 if (HeapObject* obj = object_iterator_->next_object()) {
5711 // If the current iterator has more objects we are fine.
5714 // Go though the spaces looking for one that has objects.
5715 while (space_iterator_->has_next()) {
5716 object_iterator_ = space_iterator_->next();
5717 if (HeapObject* obj = object_iterator_->next_object()) {
5722 // Done with the last space.
5723 object_iterator_ = NULL;
5728 void HeapIterator::reset() {
5729 // Restart the iterator.
5737 Object* const PathTracer::kAnyGlobalObject = NULL;
5739 class PathTracer::MarkVisitor : public ObjectVisitor {
5741 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5742 void VisitPointers(Object** start, Object** end) {
5743 // Scan all HeapObject pointers in [start, end)
5744 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5745 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5750 PathTracer* tracer_;
5754 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5756 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5757 void VisitPointers(Object** start, Object** end) {
5758 // Scan all HeapObject pointers in [start, end)
5759 for (Object** p = start; p < end; p++) {
5760 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5765 PathTracer* tracer_;
5769 void PathTracer::VisitPointers(Object** start, Object** end) {
5770 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5771 // Visit all HeapObject pointers in [start, end)
5772 for (Object** p = start; !done && (p < end); p++) {
5773 if ((*p)->IsHeapObject()) {
5775 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5781 void PathTracer::Reset() {
5782 found_target_ = false;
5783 object_stack_.Clear();
5787 void PathTracer::TracePathFrom(Object** root) {
5788 DCHECK((search_target_ == kAnyGlobalObject) ||
5789 search_target_->IsHeapObject());
5790 found_target_in_trace_ = false;
5793 MarkVisitor mark_visitor(this);
5794 MarkRecursively(root, &mark_visitor);
5796 UnmarkVisitor unmark_visitor(this);
5797 UnmarkRecursively(root, &unmark_visitor);
5803 static bool SafeIsNativeContext(HeapObject* obj) {
5804 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5808 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5809 if (!(*p)->IsHeapObject()) return;
5811 HeapObject* obj = HeapObject::cast(*p);
5813 MapWord map_word = obj->map_word();
5814 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5816 if (found_target_in_trace_) return; // stop if target found
5817 object_stack_.Add(obj);
5818 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5819 (obj == search_target_)) {
5820 found_target_in_trace_ = true;
5821 found_target_ = true;
5825 bool is_native_context = SafeIsNativeContext(obj);
5828 Map* map = Map::cast(map_word.ToMap());
5830 MapWord marked_map_word =
5831 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5832 obj->set_map_word(marked_map_word);
5834 // Scan the object body.
5835 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5836 // This is specialized to scan Context's properly.
5838 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5840 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5841 Context::FIRST_WEAK_SLOT * kPointerSize);
5842 mark_visitor->VisitPointers(start, end);
5844 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5847 // Scan the map after the body because the body is a lot more interesting
5848 // when doing leak detection.
5849 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5851 if (!found_target_in_trace_) { // don't pop if found the target
5852 object_stack_.RemoveLast();
5857 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5858 if (!(*p)->IsHeapObject()) return;
5860 HeapObject* obj = HeapObject::cast(*p);
5862 MapWord map_word = obj->map_word();
5863 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5865 MapWord unmarked_map_word =
5866 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5867 obj->set_map_word(unmarked_map_word);
5869 Map* map = Map::cast(unmarked_map_word.ToMap());
5871 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5873 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5877 void PathTracer::ProcessResults() {
5878 if (found_target_) {
5879 OFStream os(stdout);
5880 os << "=====================================\n"
5881 << "==== Path to object ====\n"
5882 << "=====================================\n\n";
5884 DCHECK(!object_stack_.is_empty());
5885 for (int i = 0; i < object_stack_.length(); i++) {
5886 if (i > 0) os << "\n |\n |\n V\n\n";
5887 object_stack_[i]->Print(os);
5889 os << "=====================================\n";
5894 // Triggers a depth-first traversal of reachable objects from one
5895 // given root object and finds a path to a specific heap object and
5897 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5898 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5899 tracer.VisitPointer(&root);
5903 // Triggers a depth-first traversal of reachable objects from roots
5904 // and finds a path to a specific heap object and prints it.
5905 void Heap::TracePathToObject(Object* target) {
5906 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5907 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5911 // Triggers a depth-first traversal of reachable objects from roots
5912 // and finds a path to any global object and prints it. Useful for
5913 // determining the source for leaks of global objects.
5914 void Heap::TracePathToGlobal() {
5915 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5917 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5922 void Heap::UpdateCumulativeGCStatistics(double duration,
5923 double spent_in_mutator,
5924 double marking_time) {
5925 if (FLAG_print_cumulative_gc_stat) {
5926 total_gc_time_ms_ += duration;
5927 max_gc_pause_ = Max(max_gc_pause_, duration);
5928 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5929 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5930 } else if (FLAG_trace_gc_verbose) {
5931 total_gc_time_ms_ += duration;
5934 marking_time_ += marking_time;
5938 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5939 DisallowHeapAllocation no_gc;
5940 // Uses only lower 32 bits if pointers are larger.
5941 uintptr_t addr_hash =
5942 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5943 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5947 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5948 DisallowHeapAllocation no_gc;
5949 int index = (Hash(map, name) & kHashMask);
5950 for (int i = 0; i < kEntriesPerBucket; i++) {
5951 Key& key = keys_[index + i];
5952 if ((key.map == *map) && key.name->Equals(*name)) {
5953 return field_offsets_[index + i];
5960 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5962 DisallowHeapAllocation no_gc;
5963 if (!name->IsUniqueName()) {
5964 if (!StringTable::InternalizeStringIfExists(
5965 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5969 // This cache is cleared only between mark compact passes, so we expect the
5970 // cache to only contain old space names.
5971 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5973 int index = (Hash(map, name) & kHashMask);
5974 // After a GC there will be free slots, so we use them in order (this may
5975 // help to get the most frequently used one in position 0).
5976 for (int i = 0; i < kEntriesPerBucket; i++) {
5977 Key& key = keys_[index];
5978 Object* free_entry_indicator = NULL;
5979 if (key.map == free_entry_indicator) {
5982 field_offsets_[index + i] = field_offset;
5986 // No free entry found in this bucket, so we move them all down one and
5987 // put the new entry at position zero.
5988 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5989 Key& key = keys_[index + i];
5990 Key& key2 = keys_[index + i - 1];
5992 field_offsets_[index + i] = field_offsets_[index + i - 1];
5995 // Write the new first entry.
5996 Key& key = keys_[index];
5999 field_offsets_[index] = field_offset;
6003 void KeyedLookupCache::Clear() {
6004 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6008 void DescriptorLookupCache::Clear() {
6009 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6013 void ExternalStringTable::CleanUp() {
6015 for (int i = 0; i < new_space_strings_.length(); ++i) {
6016 if (new_space_strings_[i] == heap_->the_hole_value()) {
6019 DCHECK(new_space_strings_[i]->IsExternalString());
6020 if (heap_->InNewSpace(new_space_strings_[i])) {
6021 new_space_strings_[last++] = new_space_strings_[i];
6023 old_space_strings_.Add(new_space_strings_[i]);
6026 new_space_strings_.Rewind(last);
6027 new_space_strings_.Trim();
6030 for (int i = 0; i < old_space_strings_.length(); ++i) {
6031 if (old_space_strings_[i] == heap_->the_hole_value()) {
6034 DCHECK(old_space_strings_[i]->IsExternalString());
6035 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6036 old_space_strings_[last++] = old_space_strings_[i];
6038 old_space_strings_.Rewind(last);
6039 old_space_strings_.Trim();
6041 if (FLAG_verify_heap) {
6048 void ExternalStringTable::TearDown() {
6049 for (int i = 0; i < new_space_strings_.length(); ++i) {
6050 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6052 new_space_strings_.Free();
6053 for (int i = 0; i < old_space_strings_.length(); ++i) {
6054 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6056 old_space_strings_.Free();
6060 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6061 chunk->set_next_chunk(chunks_queued_for_free_);
6062 chunks_queued_for_free_ = chunk;
6066 void Heap::FreeQueuedChunks() {
6067 if (chunks_queued_for_free_ == NULL) return;
6070 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6071 next = chunk->next_chunk();
6072 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6074 if (chunk->owner()->identity() == LO_SPACE) {
6075 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6076 // If FromAnyPointerAddress encounters a slot that belongs to a large
6077 // chunk queued for deletion it will fail to find the chunk because
6078 // it try to perform a search in the list of pages owned by of the large
6079 // object space and queued chunks were detached from that list.
6080 // To work around this we split large chunk into normal kPageSize aligned
6081 // pieces and initialize size, owner and flags field of every piece.
6082 // If FromAnyPointerAddress encounters a slot that belongs to one of
6083 // these smaller pieces it will treat it as a slot on a normal Page.
6084 Address chunk_end = chunk->address() + chunk->size();
6085 MemoryChunk* inner =
6086 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6087 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6088 while (inner <= inner_last) {
6089 // Size of a large chunk is always a multiple of
6090 // OS::AllocateAlignment() so there is always
6091 // enough space for a fake MemoryChunk header.
6092 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6093 // Guard against overflow.
6094 if (area_end < inner->address()) area_end = chunk_end;
6095 inner->SetArea(inner->address(), area_end);
6096 inner->set_size(Page::kPageSize);
6097 inner->set_owner(lo_space());
6098 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6099 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6103 isolate_->heap()->store_buffer()->Compact();
6104 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6105 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6106 next = chunk->next_chunk();
6107 isolate_->memory_allocator()->Free(chunk);
6109 chunks_queued_for_free_ = NULL;
6113 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6114 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6115 // Tag the page pointer to make it findable in the dump file.
6117 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6119 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6121 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6122 reinterpret_cast<Address>(p);
6123 remembered_unmapped_pages_index_++;
6124 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6128 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6129 memset(object_counts_, 0, sizeof(object_counts_));
6130 memset(object_sizes_, 0, sizeof(object_sizes_));
6131 if (clear_last_time_stats) {
6132 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6133 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6138 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6141 void Heap::CheckpointObjectStats() {
6142 base::LockGuard<base::Mutex> lock_guard(
6143 checkpoint_object_stats_mutex.Pointer());
6144 Counters* counters = isolate()->counters();
6145 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6146 counters->count_of_##name()->Increment( \
6147 static_cast<int>(object_counts_[name])); \
6148 counters->count_of_##name()->Decrement( \
6149 static_cast<int>(object_counts_last_time_[name])); \
6150 counters->size_of_##name()->Increment( \
6151 static_cast<int>(object_sizes_[name])); \
6152 counters->size_of_##name()->Decrement( \
6153 static_cast<int>(object_sizes_last_time_[name]));
6154 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6155 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6157 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6158 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6159 counters->count_of_CODE_TYPE_##name()->Increment( \
6160 static_cast<int>(object_counts_[index])); \
6161 counters->count_of_CODE_TYPE_##name()->Decrement( \
6162 static_cast<int>(object_counts_last_time_[index])); \
6163 counters->size_of_CODE_TYPE_##name()->Increment( \
6164 static_cast<int>(object_sizes_[index])); \
6165 counters->size_of_CODE_TYPE_##name()->Decrement( \
6166 static_cast<int>(object_sizes_last_time_[index]));
6167 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6168 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6169 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6170 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6171 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6172 static_cast<int>(object_counts_[index])); \
6173 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6174 static_cast<int>(object_counts_last_time_[index])); \
6175 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6176 static_cast<int>(object_sizes_[index])); \
6177 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6178 static_cast<int>(object_sizes_last_time_[index]));
6179 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6180 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6181 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6183 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6184 counters->count_of_CODE_AGE_##name()->Increment( \
6185 static_cast<int>(object_counts_[index])); \
6186 counters->count_of_CODE_AGE_##name()->Decrement( \
6187 static_cast<int>(object_counts_last_time_[index])); \
6188 counters->size_of_CODE_AGE_##name()->Increment( \
6189 static_cast<int>(object_sizes_[index])); \
6190 counters->size_of_CODE_AGE_##name()->Decrement( \
6191 static_cast<int>(object_sizes_last_time_[index]));
6192 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6193 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6195 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6196 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6200 } // namespace v8::internal