1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h" // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h" // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
45 #include "src/regexp-macro-assembler.h"
46 #include "src/mips64/regexp-macro-assembler-mips64.h"
54 : amount_of_external_allocated_memory_(0),
55 amount_of_external_allocated_memory_at_last_global_gc_(0),
58 // semispace_size_ should be a power of 2 and old_generation_size_ should
59 // be a multiple of Page::kPageSize.
60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62 initial_semispace_size_(Page::kPageSize),
63 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64 max_executable_size_(256ul * (kPointerSize / 4) * MB),
65 // Variables set based on semispace_size_ and old_generation_size_ in
67 // Will be 4 * reserved_semispace_size_ to ensure that young
68 // generation can be aligned to its size.
69 maximum_committed_(0),
70 survived_since_last_expansion_(0),
72 always_allocate_scope_depth_(0),
73 contexts_disposed_(0),
75 flush_monomorphic_ics_(false),
76 scan_on_scavenge_pages_(0),
78 old_pointer_space_(NULL),
79 old_data_space_(NULL),
83 property_cell_space_(NULL),
86 gc_post_processing_depth_(0),
87 allocations_count_(0),
88 raw_allocations_hash_(0),
89 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
92 remembered_unmapped_pages_index_(0),
93 unflattened_strings_length_(0),
95 allocation_timeout_(0),
97 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98 old_gen_exhausted_(false),
99 inline_allocation_disabled_(false),
100 store_buffer_rebuilder_(store_buffer()),
101 hidden_string_(NULL),
102 gc_safe_size_of_old_object_(NULL),
103 total_regexp_code_generated_(0),
105 high_survival_rate_period_length_(0),
106 promoted_objects_size_(0),
108 semi_space_copied_object_size_(0),
109 semi_space_copied_rate_(0),
110 nodes_died_in_new_space_(0),
111 nodes_copied_in_new_space_(0),
113 maximum_size_scavenges_(0),
115 total_gc_time_ms_(0.0),
116 max_alive_after_gc_(0),
117 min_in_mutator_(kMaxInt),
120 mark_compact_collector_(this),
123 incremental_marking_(this),
124 gc_count_at_last_idle_gc_(0),
125 full_codegen_bytes_generated_(0),
126 crankshaft_codegen_bytes_generated_(0),
127 gcs_since_last_deopt_(0),
129 no_weak_object_verification_scope_depth_(0),
131 allocation_sites_scratchpad_length_(0),
132 promotion_queue_(this),
134 external_string_table_(this),
135 chunks_queued_for_free_(NULL),
136 gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
144 // Ensure old_generation_size_ is a multiple of kPageSize.
145 DCHECK(MB >= Page::kPageSize);
147 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
148 set_native_contexts_list(NULL);
149 set_array_buffers_list(Smi::FromInt(0));
150 set_allocation_sites_list(Smi::FromInt(0));
151 set_encountered_weak_collections(Smi::FromInt(0));
152 // Put a dummy entry in the remembered pages so we can find the list the
153 // minidump even if there are no real unmapped pages.
154 RememberUnmappedPage(NULL, false);
156 ClearObjectStats(true);
160 intptr_t Heap::Capacity() {
161 if (!HasBeenSetUp()) return 0;
163 return new_space_.Capacity() + old_pointer_space_->Capacity() +
164 old_data_space_->Capacity() + code_space_->Capacity() +
165 map_space_->Capacity() + cell_space_->Capacity() +
166 property_cell_space_->Capacity();
170 intptr_t Heap::CommittedMemory() {
171 if (!HasBeenSetUp()) return 0;
173 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
174 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
176 property_cell_space_->CommittedMemory() + lo_space_->Size();
180 size_t Heap::CommittedPhysicalMemory() {
181 if (!HasBeenSetUp()) return 0;
183 return new_space_.CommittedPhysicalMemory() +
184 old_pointer_space_->CommittedPhysicalMemory() +
185 old_data_space_->CommittedPhysicalMemory() +
186 code_space_->CommittedPhysicalMemory() +
187 map_space_->CommittedPhysicalMemory() +
188 cell_space_->CommittedPhysicalMemory() +
189 property_cell_space_->CommittedPhysicalMemory() +
190 lo_space_->CommittedPhysicalMemory();
194 intptr_t Heap::CommittedMemoryExecutable() {
195 if (!HasBeenSetUp()) return 0;
197 return isolate()->memory_allocator()->SizeExecutable();
201 void Heap::UpdateMaximumCommitted() {
202 if (!HasBeenSetUp()) return;
204 intptr_t current_committed_memory = CommittedMemory();
205 if (current_committed_memory > maximum_committed_) {
206 maximum_committed_ = current_committed_memory;
211 intptr_t Heap::Available() {
212 if (!HasBeenSetUp()) return 0;
214 return new_space_.Available() + old_pointer_space_->Available() +
215 old_data_space_->Available() + code_space_->Available() +
216 map_space_->Available() + cell_space_->Available() +
217 property_cell_space_->Available();
221 bool Heap::HasBeenSetUp() {
222 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
224 property_cell_space_ != NULL && lo_space_ != NULL;
228 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
229 if (IntrusiveMarking::IsMarked(object)) {
230 return IntrusiveMarking::SizeOfMarkedObject(object);
232 return object->SizeFromMap(object->map());
236 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
237 const char** reason) {
238 // Is global GC requested?
239 if (space != NEW_SPACE) {
240 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241 *reason = "GC in old space requested";
242 return MARK_COMPACTOR;
245 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246 *reason = "GC in old space forced by flags";
247 return MARK_COMPACTOR;
250 // Is enough data promoted to justify a global GC?
251 if (OldGenerationAllocationLimitReached()) {
252 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253 *reason = "promotion limit reached";
254 return MARK_COMPACTOR;
257 // Have allocation in OLD and LO failed?
258 if (old_gen_exhausted_) {
260 ->gc_compactor_caused_by_oldspace_exhaustion()
262 *reason = "old generations exhausted";
263 return MARK_COMPACTOR;
266 // Is there enough space left in OLD to guarantee that a scavenge can
269 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270 // for object promotion. It counts only the bytes that the memory
271 // allocator has not yet allocated from the OS and assigned to any space,
272 // and does not count available bytes already in the old space or code
273 // space. Undercounting is safe---we may get an unrequested full GC when
274 // a scavenge would have succeeded.
275 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
277 ->gc_compactor_caused_by_oldspace_exhaustion()
279 *reason = "scavenge might not succeed";
280 return MARK_COMPACTOR;
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
291 void Heap::ReportStatisticsBeforeGC() {
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set. The following logic is used to avoid
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297 if (FLAG_heap_stats) {
298 ReportHeapStatistics("Before GC");
299 } else if (FLAG_log_gc) {
300 new_space_.ReportStatistics();
302 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
313 void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
315 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
317 ", available: %6" V8_PTR_PREFIX "d KB\n",
318 isolate_->memory_allocator()->Size() / KB,
319 isolate_->memory_allocator()->Available() / KB);
320 PrintPID("New space, used: %6" V8_PTR_PREFIX
322 ", available: %6" V8_PTR_PREFIX
324 ", committed: %6" V8_PTR_PREFIX "d KB\n",
325 new_space_.Size() / KB, new_space_.Available() / KB,
326 new_space_.CommittedMemory() / KB);
327 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
329 ", available: %6" V8_PTR_PREFIX
331 ", committed: %6" V8_PTR_PREFIX "d KB\n",
332 old_pointer_space_->SizeOfObjects() / KB,
333 old_pointer_space_->Available() / KB,
334 old_pointer_space_->CommittedMemory() / KB);
335 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
337 ", available: %6" V8_PTR_PREFIX
339 ", committed: %6" V8_PTR_PREFIX "d KB\n",
340 old_data_space_->SizeOfObjects() / KB,
341 old_data_space_->Available() / KB,
342 old_data_space_->CommittedMemory() / KB);
343 PrintPID("Code space, used: %6" V8_PTR_PREFIX
345 ", available: %6" V8_PTR_PREFIX
347 ", committed: %6" V8_PTR_PREFIX "d KB\n",
348 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX
352 ", available: %6" V8_PTR_PREFIX
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
356 map_space_->CommittedMemory() / KB);
357 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
359 ", available: %6" V8_PTR_PREFIX
361 ", committed: %6" V8_PTR_PREFIX "d KB\n",
362 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
363 cell_space_->CommittedMemory() / KB);
364 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
366 ", available: %6" V8_PTR_PREFIX
368 ", committed: %6" V8_PTR_PREFIX "d KB\n",
369 property_cell_space_->SizeOfObjects() / KB,
370 property_cell_space_->Available() / KB,
371 property_cell_space_->CommittedMemory() / KB);
372 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
374 ", available: %6" V8_PTR_PREFIX
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
378 lo_space_->CommittedMemory() / KB);
379 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
381 ", available: %6" V8_PTR_PREFIX
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 this->SizeOfObjects() / KB, this->Available() / KB,
385 this->CommittedMemory() / KB);
386 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
394 void Heap::ReportStatisticsAfterGC() {
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
398 if (FLAG_heap_stats) {
399 new_space_.CollectStatistics();
400 ReportHeapStatistics("After GC");
401 } else if (FLAG_log_gc) {
402 new_space_.ReportStatistics();
405 if (FLAG_log_gc) new_space_.ReportStatistics();
410 void Heap::GarbageCollectionPrologue() {
412 AllowHeapAllocation for_the_first_part_of_prologue;
413 ClearJSFunctionResultCaches();
415 unflattened_strings_length_ = 0;
417 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
418 mark_compact_collector()->EnableCodeFlushing(true);
422 if (FLAG_verify_heap) {
428 // Reset GC statistics.
429 promoted_objects_size_ = 0;
430 semi_space_copied_object_size_ = 0;
431 nodes_died_in_new_space_ = 0;
432 nodes_copied_in_new_space_ = 0;
435 UpdateMaximumCommitted();
438 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
440 if (FLAG_gc_verbose) Print();
442 ReportStatisticsBeforeGC();
445 store_buffer()->GCPrologue();
447 if (isolate()->concurrent_osr_enabled()) {
448 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
451 if (new_space_.IsAtMaximumCapacity()) {
452 maximum_size_scavenges_++;
454 maximum_size_scavenges_ = 0;
456 CheckNewSpaceExpansionCriteria();
460 intptr_t Heap::SizeOfObjects() {
462 AllSpaces spaces(this);
463 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464 total += space->SizeOfObjects();
470 void Heap::ClearAllICsByKind(Code::Kind kind) {
471 HeapObjectIterator it(code_space());
473 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474 Code* code = Code::cast(object);
475 Code::Kind current_kind = code->kind();
476 if (current_kind == Code::FUNCTION ||
477 current_kind == Code::OPTIMIZED_FUNCTION) {
478 code->ClearInlineCaches(kind);
484 void Heap::RepairFreeListsAfterBoot() {
485 PagedSpaces spaces(this);
486 for (PagedSpace* space = spaces.next(); space != NULL;
487 space = spaces.next()) {
488 space->RepairFreeListsAfterBoot();
493 void Heap::ProcessPretenuringFeedback() {
494 if (FLAG_allocation_site_pretenuring) {
495 int tenure_decisions = 0;
496 int dont_tenure_decisions = 0;
497 int allocation_mementos_found = 0;
498 int allocation_sites = 0;
499 int active_allocation_sites = 0;
501 // If the scratchpad overflowed, we have to iterate over the allocation
503 // TODO(hpayer): We iterate over the whole list of allocation sites when
504 // we grew to the maximum semi-space size to deopt maybe tenured
505 // allocation sites. We could hold the maybe tenured allocation sites
506 // in a seperate data structure if this is a performance problem.
507 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508 bool use_scratchpad =
509 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
510 !deopt_maybe_tenured;
513 Object* list_element = allocation_sites_list();
514 bool trigger_deoptimization = false;
515 bool maximum_size_scavenge = MaximumSizeScavenge();
516 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517 : list_element->IsAllocationSite()) {
518 AllocationSite* site =
520 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521 : AllocationSite::cast(list_element);
522 allocation_mementos_found += site->memento_found_count();
523 if (site->memento_found_count() > 0) {
524 active_allocation_sites++;
525 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526 trigger_deoptimization = true;
528 if (site->GetPretenureMode() == TENURED) {
531 dont_tenure_decisions++;
536 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537 site->set_deopt_dependent_code(true);
538 trigger_deoptimization = true;
541 if (use_scratchpad) {
544 list_element = site->weak_next();
548 if (trigger_deoptimization) {
549 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
552 FlushAllocationSitesScratchpad();
554 if (FLAG_trace_pretenuring_statistics &&
555 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556 dont_tenure_decisions > 0)) {
558 "GC: (mode, #visited allocation sites, #active allocation sites, "
559 "#mementos, #tenure decisions, #donttenure decisions) "
560 "(%s, %d, %d, %d, %d, %d)\n",
561 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562 active_allocation_sites, allocation_mementos_found, tenure_decisions,
563 dont_tenure_decisions);
569 void Heap::DeoptMarkedAllocationSites() {
570 // TODO(hpayer): If iterating over the allocation sites list becomes a
571 // performance issue, use a cache heap data structure instead (similar to the
572 // allocation sites scratchpad).
573 Object* list_element = allocation_sites_list();
574 while (list_element->IsAllocationSite()) {
575 AllocationSite* site = AllocationSite::cast(list_element);
576 if (site->deopt_dependent_code()) {
577 site->dependent_code()->MarkCodeForDeoptimization(
578 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
579 site->set_deopt_dependent_code(false);
581 list_element = site->weak_next();
583 Deoptimizer::DeoptimizeMarkedCode(isolate_);
587 void Heap::GarbageCollectionEpilogue() {
588 store_buffer()->GCEpilogue();
590 // In release mode, we only zap the from space under heap verification.
591 if (Heap::ShouldZapGarbage()) {
595 // Process pretenuring feedback and update allocation sites.
596 ProcessPretenuringFeedback();
599 if (FLAG_verify_heap) {
604 AllowHeapAllocation for_the_rest_of_the_epilogue;
607 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608 if (FLAG_print_handles) PrintHandles();
609 if (FLAG_gc_verbose) Print();
610 if (FLAG_code_stats) ReportCodeStatistics("After GC");
612 if (FLAG_deopt_every_n_garbage_collections > 0) {
613 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614 // the topmost optimized frame can be deoptimized safely, because it
615 // might not have a lazy bailout point right after its current PC.
616 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
617 Deoptimizer::DeoptimizeAll(isolate());
618 gcs_since_last_deopt_ = 0;
622 UpdateMaximumCommitted();
624 isolate_->counters()->alive_after_last_gc()->Set(
625 static_cast<int>(SizeOfObjects()));
627 isolate_->counters()->string_table_capacity()->Set(
628 string_table()->Capacity());
629 isolate_->counters()->number_of_symbols()->Set(
630 string_table()->NumberOfElements());
632 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
633 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
635 (crankshaft_codegen_bytes_generated_ +
636 full_codegen_bytes_generated_)));
639 if (CommittedMemory() > 0) {
640 isolate_->counters()->external_fragmentation_total()->AddSample(
641 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
643 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
648 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
651 isolate_->counters()->heap_fraction_code_space()->AddSample(
652 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
654 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656 isolate_->counters()->heap_fraction_cell_space()->AddSample(
657 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
659 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
665 isolate_->counters()->heap_sample_total_committed()->AddSample(
666 static_cast<int>(CommittedMemory() / KB));
667 isolate_->counters()->heap_sample_total_used()->AddSample(
668 static_cast<int>(SizeOfObjects() / KB));
669 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670 static_cast<int>(map_space()->CommittedMemory() / KB));
671 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672 static_cast<int>(cell_space()->CommittedMemory() / KB));
674 ->heap_sample_property_cell_space_committed()
676 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678 static_cast<int>(code_space()->CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681 static_cast<int>(MaximumCommittedMemory() / KB));
684 #define UPDATE_COUNTERS_FOR_SPACE(space) \
685 isolate_->counters()->space##_bytes_available()->Set( \
686 static_cast<int>(space()->Available())); \
687 isolate_->counters()->space##_bytes_committed()->Set( \
688 static_cast<int>(space()->CommittedMemory())); \
689 isolate_->counters()->space##_bytes_used()->Set( \
690 static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
692 if (space()->CommittedMemory() > 0) { \
693 isolate_->counters()->external_fragmentation_##space()->AddSample( \
694 static_cast<int>(100 - \
695 (space()->SizeOfObjects() * 100.0) / \
696 space()->CommittedMemory())); \
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
715 ReportStatisticsAfterGC();
718 // Remember the last top pointer so that we can later find out
719 // whether we allocated in new space since the last GC.
720 new_space_top_after_last_gc_ = new_space()->top();
724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725 const v8::GCCallbackFlags gc_callback_flags) {
726 // Since we are ignoring the return value, the exact choice of space does
727 // not matter, so long as we do not specify NEW_SPACE, which would not
729 mark_compact_collector_.SetFlags(flags);
730 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
731 mark_compact_collector_.SetFlags(kNoGCFlags);
735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736 // Since we are ignoring the return value, the exact choice of space does
737 // not matter, so long as we do not specify NEW_SPACE, which would not
739 // Major GC would invoke weak handle callbacks on weakly reachable
740 // handles, but won't collect weakly reachable objects until next
741 // major GC. Therefore if we collect aggressively and weak handle callback
742 // has been invoked, we rerun major GC to release objects which become
744 // Note: as weak callbacks can execute arbitrary code, we cannot
745 // hope that eventually there will be no weak callbacks invocations.
746 // Therefore stop recollecting after several attempts.
747 if (isolate()->concurrent_recompilation_enabled()) {
748 // The optimizing compiler may be unnecessarily holding on to memory.
749 DisallowHeapAllocation no_recursive_gc;
750 isolate()->optimizing_compiler_thread()->Flush();
752 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
753 kReduceMemoryFootprintMask);
754 isolate_->compilation_cache()->Clear();
755 const int kMaxNumberOfAttempts = 7;
756 const int kMinNumberOfAttempts = 2;
757 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759 attempt + 1 >= kMinNumberOfAttempts) {
763 mark_compact_collector()->SetFlags(kNoGCFlags);
766 incremental_marking()->UncommitMarkingDeque();
770 void Heap::EnsureFillerObjectAtTop() {
771 // There may be an allocation memento behind every object in new space.
772 // If we evacuate a not full new space or if we are on the last page of
773 // the new space, then there may be uninitialized memory behind the top
774 // pointer of the new space page. We store a filler object there to
775 // identify the unused space.
776 Address from_top = new_space_.top();
777 Address from_limit = new_space_.limit();
778 if (from_top < from_limit) {
779 int remaining_in_page = static_cast<int>(from_limit - from_top);
780 CreateFillerObjectAt(from_top, remaining_in_page);
785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786 const char* collector_reason,
787 const v8::GCCallbackFlags gc_callback_flags) {
788 // The VM is in the GC state until exiting this function.
789 VMState<GC> state(isolate_);
792 // Reset the allocation timeout to the GC interval, but make sure to
793 // allow at least a few allocations after a collection. The reason
794 // for this is that we have a lot of allocation sequences and we
795 // assume that a garbage collection will allow the subsequent
796 // allocation attempts to go through.
797 allocation_timeout_ = Max(6, FLAG_gc_interval);
800 EnsureFillerObjectAtTop();
802 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803 if (FLAG_trace_incremental_marking) {
804 PrintF("[IncrementalMarking] Scavenge during marking.\n");
808 if (collector == MARK_COMPACTOR &&
809 !mark_compact_collector()->abort_incremental_marking() &&
810 !incremental_marking()->IsStopped() &&
811 !incremental_marking()->should_hurry() &&
812 FLAG_incremental_marking_steps) {
813 // Make progress in incremental marking.
814 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
816 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
817 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818 if (FLAG_trace_incremental_marking) {
819 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
821 collector = SCAVENGER;
822 collector_reason = "incremental marking delaying mark-sweep";
826 bool next_gc_likely_to_collect_more = false;
829 tracer()->Start(collector, gc_reason, collector_reason);
830 DCHECK(AllowHeapAllocation::IsAllowed());
831 DisallowHeapAllocation no_allocation_during_gc;
832 GarbageCollectionPrologue();
835 HistogramTimerScope histogram_timer_scope(
836 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837 : isolate_->counters()->gc_compactor());
838 next_gc_likely_to_collect_more =
839 PerformGarbageCollection(collector, gc_callback_flags);
842 GarbageCollectionEpilogue();
846 // Start incremental marking for the next cycle. The heap snapshot
847 // generator needs incremental marking to stay off after it aborted.
848 if (!mark_compact_collector()->abort_incremental_marking() &&
849 WorthActivatingIncrementalMarking()) {
850 incremental_marking()->Start();
853 return next_gc_likely_to_collect_more;
857 int Heap::NotifyContextDisposed() {
858 if (isolate()->concurrent_recompilation_enabled()) {
859 // Flush the queued recompilation tasks.
860 isolate()->optimizing_compiler_thread()->Flush();
862 flush_monomorphic_ics_ = true;
864 return ++contexts_disposed_;
868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
870 if (len == 0) return;
872 DCHECK(array->map() != fixed_cow_array_map());
873 Object** dst_objects = array->data_start() + dst_index;
874 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875 if (!InNewSpace(array)) {
876 for (int i = 0; i < len; i++) {
877 // TODO(hpayer): check store buffer for entries
878 if (InNewSpace(dst_objects[i])) {
879 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
883 incremental_marking()->RecordWrites(array);
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
891 void VisitPointers(Object** start, Object** end) {
892 // Visit all HeapObject pointers in [start, end).
893 for (Object** p = start; p < end; p++) {
894 if ((*p)->IsHeapObject()) {
895 // Check that the string is actually internalized.
896 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897 (*p)->IsInternalizedString());
904 static void VerifyStringTable(Heap* heap) {
905 StringTableVerifier verifier;
906 heap->string_table()->IterateElements(&verifier);
908 #endif // VERIFY_HEAP
911 static bool AbortIncrementalMarkingAndCollectGarbage(
912 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
913 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914 bool result = heap->CollectGarbage(space, gc_reason);
915 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921 bool gc_performed = true;
923 static const int kThreshold = 20;
924 while (gc_performed && counter++ < kThreshold) {
925 gc_performed = false;
926 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928 if (sizes[space] != 0) {
929 AllocationResult allocation;
930 if (space == NEW_SPACE) {
931 allocation = new_space()->AllocateRaw(sizes[space]);
933 allocation = paged_space(space)->AllocateRaw(sizes[space]);
936 if (!allocation.To(&node)) {
937 if (space == NEW_SPACE) {
938 Heap::CollectGarbage(NEW_SPACE,
939 "failed to reserve space in the new space");
941 AbortIncrementalMarkingAndCollectGarbage(
942 this, static_cast<AllocationSpace>(space),
943 "failed to reserve space in paged space");
948 // Mark with a free list node, in case we have a GC before
950 node->set_size(this, sizes[space]);
951 locations_out[space] = node->address();
958 // Failed to reserve the space after several attempts.
959 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
964 void Heap::EnsureFromSpaceIsCommitted() {
965 if (new_space_.CommitFromSpaceIfNeeded()) return;
967 // Committing memory to from space failed.
968 // Memory is exhausted and we will die.
969 V8::FatalProcessOutOfMemory("Committing semi space failed.");
973 void Heap::ClearJSFunctionResultCaches() {
974 if (isolate_->bootstrapper()->IsActive()) return;
976 Object* context = native_contexts_list();
977 while (!context->IsUndefined()) {
978 // Get the caches for this context. GC can happen when the context
979 // is not fully initialized, so the caches can be undefined.
980 Object* caches_or_undefined =
981 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
982 if (!caches_or_undefined->IsUndefined()) {
983 FixedArray* caches = FixedArray::cast(caches_or_undefined);
985 int length = caches->length();
986 for (int i = 0; i < length; i++) {
987 JSFunctionResultCache::cast(caches->get(i))->Clear();
990 // Get the next context:
991 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
996 void Heap::ClearNormalizedMapCaches() {
997 if (isolate_->bootstrapper()->IsActive() &&
998 !incremental_marking()->IsMarking()) {
1002 Object* context = native_contexts_list();
1003 while (!context->IsUndefined()) {
1004 // GC can happen when the context is not fully initialized,
1005 // so the cache can be undefined.
1007 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1008 if (!cache->IsUndefined()) {
1009 NormalizedMapCache::cast(cache)->Clear();
1011 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1016 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1017 if (start_new_space_size == 0) return;
1019 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1020 static_cast<double>(start_new_space_size) * 100);
1022 semi_space_copied_rate_ =
1023 (static_cast<double>(semi_space_copied_object_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1026 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1028 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1029 high_survival_rate_period_length_++;
1031 high_survival_rate_period_length_ = 0;
1035 bool Heap::PerformGarbageCollection(
1036 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1037 int freed_global_handles = 0;
1039 if (collector != SCAVENGER) {
1040 PROFILE(isolate_, CodeMovingGCEvent());
1044 if (FLAG_verify_heap) {
1045 VerifyStringTable(this);
1050 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1053 GCCallbacksScope scope(this);
1054 if (scope.CheckReenter()) {
1055 AllowHeapAllocation allow_allocation;
1056 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1057 VMState<EXTERNAL> state(isolate_);
1058 HandleScope handle_scope(isolate_);
1059 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1063 EnsureFromSpaceIsCommitted();
1065 int start_new_space_size = Heap::new_space()->SizeAsInt();
1067 if (IsHighSurvivalRate()) {
1068 // We speed up the incremental marker if it is running so that it
1069 // does not fall behind the rate of promotion, which would cause a
1070 // constantly growing old space.
1071 incremental_marking()->NotifyOfHighPromotionRate();
1074 if (collector == MARK_COMPACTOR) {
1075 // Perform mark-sweep with optional compaction.
1077 sweep_generation_++;
1078 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1079 // allocates and triggers GC. The real limit is set at after
1080 // PostGarbageCollectionProcessing.
1081 old_generation_allocation_limit_ =
1082 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1083 old_gen_exhausted_ = false;
1088 UpdateSurvivalStatistics(start_new_space_size);
1090 isolate_->counters()->objs_since_last_young()->Set(0);
1092 // Callbacks that fire after this point might trigger nested GCs and
1093 // restart incremental marking, the assertion can't be moved down.
1094 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1096 gc_post_processing_depth_++;
1098 AllowHeapAllocation allow_allocation;
1099 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1100 freed_global_handles =
1101 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1103 gc_post_processing_depth_--;
1105 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1107 // Update relocatables.
1108 Relocatable::PostGarbageCollectionProcessing(isolate_);
1110 if (collector == MARK_COMPACTOR) {
1111 // Register the amount of external allocated memory.
1112 amount_of_external_allocated_memory_at_last_global_gc_ =
1113 amount_of_external_allocated_memory_;
1114 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1115 PromotedSpaceSizeOfObjects(), freed_global_handles);
1119 GCCallbacksScope scope(this);
1120 if (scope.CheckReenter()) {
1121 AllowHeapAllocation allow_allocation;
1122 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1123 VMState<EXTERNAL> state(isolate_);
1124 HandleScope handle_scope(isolate_);
1125 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1130 if (FLAG_verify_heap) {
1131 VerifyStringTable(this);
1135 return freed_global_handles > 0;
1139 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1140 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1141 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1142 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1143 v8::GCPrologueCallback callback =
1144 reinterpret_cast<v8::GCPrologueCallback>(
1145 gc_prologue_callbacks_[i].callback);
1146 callback(gc_type, flags);
1148 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1149 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1156 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1157 GCCallbackFlags gc_callback_flags) {
1158 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1160 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_epilogue_callbacks_[i].callback);
1164 callback(gc_type, gc_callback_flags);
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1174 void Heap::MarkCompact() {
1175 gc_state_ = MARK_COMPACT;
1176 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1178 uint64_t size_of_objects_before_gc = SizeOfObjects();
1180 mark_compact_collector_.Prepare();
1184 MarkCompactPrologue();
1186 mark_compact_collector_.CollectGarbage();
1188 LOG(isolate_, ResourceEvent("markcompact", "end"));
1190 gc_state_ = NOT_IN_GC;
1192 isolate_->counters()->objs_since_last_full()->Set(0);
1194 flush_monomorphic_ics_ = false;
1196 if (FLAG_allocation_site_pretenuring) {
1197 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1202 void Heap::MarkCompactPrologue() {
1203 // At any old GC clear the keyed lookup cache to enable collection of unused
1205 isolate_->keyed_lookup_cache()->Clear();
1206 isolate_->context_slot_cache()->Clear();
1207 isolate_->descriptor_lookup_cache()->Clear();
1208 RegExpResultsCache::Clear(string_split_cache());
1209 RegExpResultsCache::Clear(regexp_multiple_cache());
1211 isolate_->compilation_cache()->MarkCompactPrologue();
1213 CompletelyClearInstanceofCache();
1215 FlushNumberStringCache();
1216 if (FLAG_cleanup_code_caches_at_gc) {
1217 polymorphic_code_cache()->set_cache(undefined_value());
1220 ClearNormalizedMapCaches();
1224 // Helper class for copying HeapObjects
1225 class ScavengeVisitor : public ObjectVisitor {
1227 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1229 void VisitPointer(Object** p) { ScavengePointer(p); }
1231 void VisitPointers(Object** start, Object** end) {
1232 // Copy all HeapObject pointers in [start, end)
1233 for (Object** p = start; p < end; p++) ScavengePointer(p);
1237 void ScavengePointer(Object** p) {
1238 Object* object = *p;
1239 if (!heap_->InNewSpace(object)) return;
1240 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1241 reinterpret_cast<HeapObject*>(object));
1249 // Visitor class to verify pointers in code or data space do not point into
1251 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1253 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1254 void VisitPointers(Object** start, Object** end) {
1255 for (Object** current = start; current < end; current++) {
1256 if ((*current)->IsHeapObject()) {
1257 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1267 static void VerifyNonPointerSpacePointers(Heap* heap) {
1268 // Verify that there are no pointers to new space in spaces where we
1269 // do not expect them.
1270 VerifyNonPointerSpacePointersVisitor v(heap);
1271 HeapObjectIterator code_it(heap->code_space());
1272 for (HeapObject* object = code_it.Next(); object != NULL;
1273 object = code_it.Next())
1274 object->Iterate(&v);
1276 HeapObjectIterator data_it(heap->old_data_space());
1277 for (HeapObject* object = data_it.Next(); object != NULL;
1278 object = data_it.Next())
1279 object->Iterate(&v);
1281 #endif // VERIFY_HEAP
1284 void Heap::CheckNewSpaceExpansionCriteria() {
1285 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1286 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1287 // Grow the size of new space if there is room to grow, enough data
1288 // has survived scavenge since the last expansion and we are not in
1289 // high promotion mode.
1291 survived_since_last_expansion_ = 0;
1296 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1297 return heap->InNewSpace(*p) &&
1298 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1302 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1303 StoreBufferEvent event) {
1304 heap->store_buffer_rebuilder_.Callback(page, event);
1308 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1309 if (event == kStoreBufferStartScanningPagesEvent) {
1310 start_of_current_page_ = NULL;
1311 current_page_ = NULL;
1312 } else if (event == kStoreBufferScanningPageEvent) {
1313 if (current_page_ != NULL) {
1314 // If this page already overflowed the store buffer during this iteration.
1315 if (current_page_->scan_on_scavenge()) {
1316 // Then we should wipe out the entries that have been added for it.
1317 store_buffer_->SetTop(start_of_current_page_);
1318 } else if (store_buffer_->Top() - start_of_current_page_ >=
1319 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1320 // Did we find too many pointers in the previous page? The heuristic is
1321 // that no page can take more then 1/5 the remaining slots in the store
1323 current_page_->set_scan_on_scavenge(true);
1324 store_buffer_->SetTop(start_of_current_page_);
1326 // In this case the page we scanned took a reasonable number of slots in
1327 // the store buffer. It has now been rehabilitated and is no longer
1328 // marked scan_on_scavenge.
1329 DCHECK(!current_page_->scan_on_scavenge());
1332 start_of_current_page_ = store_buffer_->Top();
1333 current_page_ = page;
1334 } else if (event == kStoreBufferFullEvent) {
1335 // The current page overflowed the store buffer again. Wipe out its entries
1336 // in the store buffer and mark it scan-on-scavenge again. This may happen
1337 // several times while scanning.
1338 if (current_page_ == NULL) {
1339 // Store Buffer overflowed while scanning promoted objects. These are not
1340 // in any particular page, though they are likely to be clustered by the
1341 // allocation routines.
1342 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1344 // Store Buffer overflowed while scanning a particular old space page for
1345 // pointers to new space.
1346 DCHECK(current_page_ == page);
1347 DCHECK(page != NULL);
1348 current_page_->set_scan_on_scavenge(true);
1349 DCHECK(start_of_current_page_ != store_buffer_->Top());
1350 store_buffer_->SetTop(start_of_current_page_);
1358 void PromotionQueue::Initialize() {
1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1360 // entries (where each is a pair of intptr_t). This allows us to simplify
1361 // the test fpr when to switch pages.
1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1367 emergency_stack_ = NULL;
1371 void PromotionQueue::RelocateQueueHead() {
1372 DCHECK(emergency_stack_ == NULL);
1374 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1375 intptr_t* head_start = rear_;
1376 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1379 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1381 emergency_stack_ = new List<Entry>(2 * entries_count);
1383 while (head_start != head_end) {
1384 int size = static_cast<int>(*(head_start++));
1385 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1386 emergency_stack_->Add(Entry(obj, size));
1392 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1394 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1396 virtual Object* RetainAs(Object* object) {
1397 if (!heap_->InFromSpace(object)) {
1401 MapWord map_word = HeapObject::cast(object)->map_word();
1402 if (map_word.IsForwardingAddress()) {
1403 return map_word.ToForwardingAddress();
1413 void Heap::Scavenge() {
1414 RelocationLock relocation_lock(this);
1417 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1420 gc_state_ = SCAVENGE;
1422 // Implements Cheney's copying algorithm
1423 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1425 // Clear descriptor cache.
1426 isolate_->descriptor_lookup_cache()->Clear();
1428 // Used for updating survived_since_last_expansion_ at function end.
1429 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1431 SelectScavengingVisitorsTable();
1433 incremental_marking()->PrepareForScavenge();
1435 // Flip the semispaces. After flipping, to space is empty, from space has
1438 new_space_.ResetAllocationInfo();
1440 // We need to sweep newly copied objects which can be either in the
1441 // to space or promoted to the old generation. For to-space
1442 // objects, we treat the bottom of the to space as a queue. Newly
1443 // copied and unswept objects lie between a 'front' mark and the
1444 // allocation pointer.
1446 // Promoted objects can go into various old-generation spaces, and
1447 // can be allocated internally in the spaces (from the free list).
1448 // We treat the top of the to space as a queue of addresses of
1449 // promoted objects. The addresses of newly promoted and unswept
1450 // objects lie between a 'front' mark and a 'rear' mark that is
1451 // updated as a side effect of promoting an object.
1453 // There is guaranteed to be enough room at the top of the to space
1454 // for the addresses of promoted objects: every object promoted
1455 // frees up its size in bytes from the top of the new space, and
1456 // objects are at least one pointer in size.
1457 Address new_space_front = new_space_.ToSpaceStart();
1458 promotion_queue_.Initialize();
1461 store_buffer()->Clean();
1464 ScavengeVisitor scavenge_visitor(this);
1466 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1468 // Copy objects reachable from the old generation.
1470 StoreBufferRebuildScope scope(this, store_buffer(),
1471 &ScavengeStoreBufferCallback);
1472 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1475 // Copy objects reachable from simple cells by scavenging cell values
1477 HeapObjectIterator cell_iterator(cell_space_);
1478 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1479 heap_object = cell_iterator.Next()) {
1480 if (heap_object->IsCell()) {
1481 Cell* cell = Cell::cast(heap_object);
1482 Address value_address = cell->ValueAddress();
1483 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1487 // Copy objects reachable from global property cells by scavenging global
1488 // property cell values directly.
1489 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1490 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1491 heap_object != NULL;
1492 heap_object = js_global_property_cell_iterator.Next()) {
1493 if (heap_object->IsPropertyCell()) {
1494 PropertyCell* cell = PropertyCell::cast(heap_object);
1495 Address value_address = cell->ValueAddress();
1496 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1497 Address type_address = cell->TypeAddress();
1498 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1502 // Copy objects reachable from the encountered weak collections list.
1503 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1505 // Copy objects reachable from the code flushing candidates list.
1506 MarkCompactCollector* collector = mark_compact_collector();
1507 if (collector->is_code_flushing_enabled()) {
1508 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1511 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1513 while (isolate()->global_handles()->IterateObjectGroups(
1514 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1515 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1517 isolate()->global_handles()->RemoveObjectGroups();
1518 isolate()->global_handles()->RemoveImplicitRefGroups();
1520 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1521 &IsUnscavengedHeapObject);
1522 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1524 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1526 UpdateNewSpaceReferencesInExternalStringTable(
1527 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1529 promotion_queue_.Destroy();
1531 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1533 ScavengeWeakObjectRetainer weak_object_retainer(this);
1534 ProcessWeakReferences(&weak_object_retainer);
1536 DCHECK(new_space_front == new_space_.top());
1539 new_space_.set_age_mark(new_space_.top());
1541 new_space_.LowerInlineAllocationLimit(
1542 new_space_.inline_allocation_limit_step());
1544 // Update how much has survived scavenge.
1545 IncrementYoungSurvivorsCounter(static_cast<int>(
1546 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1548 LOG(isolate_, ResourceEvent("scavenge", "end"));
1550 gc_state_ = NOT_IN_GC;
1552 gc_idle_time_handler_.NotifyScavenge();
1556 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1558 MapWord first_word = HeapObject::cast(*p)->map_word();
1560 if (!first_word.IsForwardingAddress()) {
1561 // Unreachable external string can be finalized.
1562 heap->FinalizeExternalString(String::cast(*p));
1566 // String is still reachable.
1567 return String::cast(first_word.ToForwardingAddress());
1571 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1572 ExternalStringTableUpdaterCallback updater_func) {
1574 if (FLAG_verify_heap) {
1575 external_string_table_.Verify();
1579 if (external_string_table_.new_space_strings_.is_empty()) return;
1581 Object** start = &external_string_table_.new_space_strings_[0];
1582 Object** end = start + external_string_table_.new_space_strings_.length();
1583 Object** last = start;
1585 for (Object** p = start; p < end; ++p) {
1586 DCHECK(InFromSpace(*p));
1587 String* target = updater_func(this, p);
1589 if (target == NULL) continue;
1591 DCHECK(target->IsExternalString());
1593 if (InNewSpace(target)) {
1594 // String is still in new space. Update the table entry.
1598 // String got promoted. Move it to the old string list.
1599 external_string_table_.AddOldString(target);
1603 DCHECK(last <= end);
1604 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1608 void Heap::UpdateReferencesInExternalStringTable(
1609 ExternalStringTableUpdaterCallback updater_func) {
1610 // Update old space string references.
1611 if (external_string_table_.old_space_strings_.length() > 0) {
1612 Object** start = &external_string_table_.old_space_strings_[0];
1613 Object** end = start + external_string_table_.old_space_strings_.length();
1614 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1617 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1621 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1622 ProcessArrayBuffers(retainer);
1623 ProcessNativeContexts(retainer);
1624 // TODO(mvstanton): AllocationSites only need to be processed during
1625 // MARK_COMPACT, as they live in old space. Verify and address.
1626 ProcessAllocationSites(retainer);
1630 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1631 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1632 // Update the head of the list of contexts.
1633 set_native_contexts_list(head);
1637 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1638 Object* array_buffer_obj =
1639 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1640 set_array_buffers_list(array_buffer_obj);
1644 void Heap::TearDownArrayBuffers() {
1645 Object* undefined = undefined_value();
1646 for (Object* o = array_buffers_list(); o != undefined;) {
1647 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1648 Runtime::FreeArrayBuffer(isolate(), buffer);
1649 o = buffer->weak_next();
1651 set_array_buffers_list(undefined);
1655 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1656 Object* allocation_site_obj =
1657 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1658 set_allocation_sites_list(allocation_site_obj);
1662 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1663 DisallowHeapAllocation no_allocation_scope;
1664 Object* cur = allocation_sites_list();
1665 bool marked = false;
1666 while (cur->IsAllocationSite()) {
1667 AllocationSite* casted = AllocationSite::cast(cur);
1668 if (casted->GetPretenureMode() == flag) {
1669 casted->ResetPretenureDecision();
1670 casted->set_deopt_dependent_code(true);
1673 cur = casted->weak_next();
1675 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1679 void Heap::EvaluateOldSpaceLocalPretenuring(
1680 uint64_t size_of_objects_before_gc) {
1681 uint64_t size_of_objects_after_gc = SizeOfObjects();
1682 double old_generation_survival_rate =
1683 (static_cast<double>(size_of_objects_after_gc) * 100) /
1684 static_cast<double>(size_of_objects_before_gc);
1686 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1687 // Too many objects died in the old generation, pretenuring of wrong
1688 // allocation sites may be the cause for that. We have to deopt all
1689 // dependent code registered in the allocation sites to re-evaluate
1690 // our pretenuring decisions.
1691 ResetAllAllocationSitesDependentCode(TENURED);
1692 if (FLAG_trace_pretenuring) {
1694 "Deopt all allocation sites dependent code due to low survival "
1695 "rate in the old generation %f\n",
1696 old_generation_survival_rate);
1702 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1703 DisallowHeapAllocation no_allocation;
1704 // All external strings are listed in the external string table.
1706 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1708 explicit ExternalStringTableVisitorAdapter(
1709 v8::ExternalResourceVisitor* visitor)
1710 : visitor_(visitor) {}
1711 virtual void VisitPointers(Object** start, Object** end) {
1712 for (Object** p = start; p < end; p++) {
1713 DCHECK((*p)->IsExternalString());
1714 visitor_->VisitExternalString(
1715 Utils::ToLocal(Handle<String>(String::cast(*p))));
1720 v8::ExternalResourceVisitor* visitor_;
1721 } external_string_table_visitor(visitor);
1723 external_string_table_.Iterate(&external_string_table_visitor);
1727 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1729 static inline void VisitPointer(Heap* heap, Object** p) {
1730 Object* object = *p;
1731 if (!heap->InNewSpace(object)) return;
1732 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1733 reinterpret_cast<HeapObject*>(object));
1738 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1739 Address new_space_front) {
1741 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1742 // The addresses new_space_front and new_space_.top() define a
1743 // queue of unprocessed copied objects. Process them until the
1745 while (new_space_front != new_space_.top()) {
1746 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1747 HeapObject* object = HeapObject::FromAddress(new_space_front);
1749 NewSpaceScavenger::IterateBody(object->map(), object);
1752 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1756 // Promote and process all the to-be-promoted objects.
1758 StoreBufferRebuildScope scope(this, store_buffer(),
1759 &ScavengeStoreBufferCallback);
1760 while (!promotion_queue()->is_empty()) {
1763 promotion_queue()->remove(&target, &size);
1765 // Promoted object might be already partially visited
1766 // during old space pointer iteration. Thus we search specificly
1767 // for pointers to from semispace instead of looking for pointers
1769 DCHECK(!target->IsMap());
1770 IterateAndMarkPointersToFromSpace(
1771 target->address(), target->address() + size, &ScavengeObject);
1775 // Take another spin if there are now unswept objects in new space
1776 // (there are currently no more unswept promoted objects).
1777 } while (new_space_front != new_space_.top());
1779 return new_space_front;
1783 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1785 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1787 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1788 kDoubleAlignmentMask) == 0); // NOLINT
1791 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1794 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1796 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1797 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1798 return HeapObject::FromAddress(object->address() + kPointerSize);
1800 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1807 enum LoggingAndProfiling {
1808 LOGGING_AND_PROFILING_ENABLED,
1809 LOGGING_AND_PROFILING_DISABLED
1813 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1816 template <MarksHandling marks_handling,
1817 LoggingAndProfiling logging_and_profiling_mode>
1818 class ScavengingVisitor : public StaticVisitorBase {
1820 static void Initialize() {
1821 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1822 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1823 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1824 table_.Register(kVisitByteArray, &EvacuateByteArray);
1825 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1826 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1827 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1828 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1831 kVisitNativeContext,
1832 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1837 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1838 ConsString::kSize>);
1842 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1843 SlicedString::kSize>);
1847 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1851 kVisitSharedFunctionInfo,
1852 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1853 SharedFunctionInfo::kSize>);
1855 table_.Register(kVisitJSWeakCollection,
1856 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1858 table_.Register(kVisitJSArrayBuffer,
1859 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1861 table_.Register(kVisitJSTypedArray,
1862 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1864 table_.Register(kVisitJSDataView,
1865 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1867 table_.Register(kVisitJSRegExp,
1868 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1870 if (marks_handling == IGNORE_MARKS) {
1873 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1874 JSFunction::kSize>);
1876 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1879 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1880 kVisitDataObject, kVisitDataObjectGeneric>();
1882 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1883 kVisitJSObject, kVisitJSObjectGeneric>();
1885 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1886 kVisitStruct, kVisitStructGeneric>();
1889 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1894 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1896 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1897 bool should_record = false;
1899 should_record = FLAG_heap_stats;
1901 should_record = should_record || FLAG_log_gc;
1902 if (should_record) {
1903 if (heap->new_space()->Contains(obj)) {
1904 heap->new_space()->RecordAllocation(obj);
1906 heap->new_space()->RecordPromotion(obj);
1911 // Helper function used by CopyObject to copy a source object to an
1912 // allocated target object and update the forwarding pointer in the source
1913 // object. Returns the target object.
1914 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1915 HeapObject* target, int size)) {
1916 // If we migrate into to-space, then the to-space top pointer should be
1917 // right after the target object. Incorporate double alignment
1919 DCHECK(!heap->InToSpace(target) ||
1920 target->address() + size == heap->new_space()->top() ||
1921 target->address() + size + kPointerSize == heap->new_space()->top());
1923 // Make sure that we do not overwrite the promotion queue which is at
1924 // the end of to-space.
1925 DCHECK(!heap->InToSpace(target) ||
1926 heap->promotion_queue()->IsBelowPromotionQueue(
1927 heap->new_space()->top()));
1929 // Copy the content of source to target.
1930 heap->CopyBlock(target->address(), source->address(), size);
1932 // Set the forwarding address.
1933 source->set_map_word(MapWord::FromForwardingAddress(target));
1935 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1936 // Update NewSpace stats if necessary.
1937 RecordCopiedObject(heap, target);
1938 heap->OnMoveEvent(target, source, size);
1941 if (marks_handling == TRANSFER_MARKS) {
1942 if (Marking::TransferColor(source, target)) {
1943 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1948 template <int alignment>
1949 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1950 HeapObject* object, int object_size) {
1951 Heap* heap = map->GetHeap();
1953 int allocation_size = object_size;
1954 if (alignment != kObjectAlignment) {
1955 DCHECK(alignment == kDoubleAlignment);
1956 allocation_size += kPointerSize;
1959 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1960 AllocationResult allocation =
1961 heap->new_space()->AllocateRaw(allocation_size);
1963 HeapObject* target = NULL; // Initialization to please compiler.
1964 if (allocation.To(&target)) {
1965 // Order is important here: Set the promotion limit before storing a
1966 // filler for double alignment or migrating the object. Otherwise we
1967 // may end up overwriting promotion queue entries when we migrate the
1969 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1971 if (alignment != kObjectAlignment) {
1972 target = EnsureDoubleAligned(heap, target, allocation_size);
1975 // Order is important: slot might be inside of the target if target
1976 // was allocated over a dead object and slot comes from the store
1979 MigrateObject(heap, object, target, object_size);
1981 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1988 template <ObjectContents object_contents, int alignment>
1989 static inline bool PromoteObject(Map* map, HeapObject** slot,
1990 HeapObject* object, int object_size) {
1991 Heap* heap = map->GetHeap();
1993 int allocation_size = object_size;
1994 if (alignment != kObjectAlignment) {
1995 DCHECK(alignment == kDoubleAlignment);
1996 allocation_size += kPointerSize;
1999 AllocationResult allocation;
2000 if (object_contents == DATA_OBJECT) {
2001 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2002 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2004 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2005 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2008 HeapObject* target = NULL; // Initialization to please compiler.
2009 if (allocation.To(&target)) {
2010 if (alignment != kObjectAlignment) {
2011 target = EnsureDoubleAligned(heap, target, allocation_size);
2014 // Order is important: slot might be inside of the target if target
2015 // was allocated over a dead object and slot comes from the store
2018 MigrateObject(heap, object, target, object_size);
2020 if (object_contents == POINTER_OBJECT) {
2021 if (map->instance_type() == JS_FUNCTION_TYPE) {
2022 heap->promotion_queue()->insert(target,
2023 JSFunction::kNonWeakFieldsEndOffset);
2025 heap->promotion_queue()->insert(target, object_size);
2028 heap->IncrementPromotedObjectsSize(object_size);
2035 template <ObjectContents object_contents, int alignment>
2036 static inline void EvacuateObject(Map* map, HeapObject** slot,
2037 HeapObject* object, int object_size) {
2038 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2039 SLOW_DCHECK(object->Size() == object_size);
2040 Heap* heap = map->GetHeap();
2042 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2043 // A semi-space copy may fail due to fragmentation. In that case, we
2044 // try to promote the object.
2045 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2050 if (PromoteObject<object_contents, alignment>(map, slot, object,
2055 // If promotion failed, we try to copy the object to the other semi-space
2056 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2062 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2063 HeapObject* object) {
2064 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2065 JSFunction::kSize>(map, slot, object);
2067 MapWord map_word = object->map_word();
2068 DCHECK(map_word.IsForwardingAddress());
2069 HeapObject* target = map_word.ToForwardingAddress();
2071 MarkBit mark_bit = Marking::MarkBitFrom(target);
2072 if (Marking::IsBlack(mark_bit)) {
2073 // This object is black and it might not be rescanned by marker.
2074 // We should explicitly record code entry slot for compaction because
2075 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2076 // miss it as it is not HeapObject-tagged.
2077 Address code_entry_slot =
2078 target->address() + JSFunction::kCodeEntryOffset;
2079 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2080 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2081 code_entry_slot, code);
2086 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2087 HeapObject* object) {
2088 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2089 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2094 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2095 HeapObject* object) {
2096 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2097 int object_size = FixedDoubleArray::SizeFor(length);
2098 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2103 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2104 HeapObject* object) {
2105 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2106 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2111 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2112 HeapObject* object) {
2113 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2114 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2119 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2120 HeapObject* object) {
2121 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2122 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2127 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2128 HeapObject* object) {
2129 int object_size = SeqOneByteString::cast(object)
2130 ->SeqOneByteStringSize(map->instance_type());
2131 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2136 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2137 HeapObject* object) {
2138 int object_size = SeqTwoByteString::cast(object)
2139 ->SeqTwoByteStringSize(map->instance_type());
2140 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2145 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2146 HeapObject* object) {
2147 DCHECK(IsShortcutCandidate(map->instance_type()));
2149 Heap* heap = map->GetHeap();
2151 if (marks_handling == IGNORE_MARKS &&
2152 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2154 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2158 if (!heap->InNewSpace(first)) {
2159 object->set_map_word(MapWord::FromForwardingAddress(first));
2163 MapWord first_word = first->map_word();
2164 if (first_word.IsForwardingAddress()) {
2165 HeapObject* target = first_word.ToForwardingAddress();
2168 object->set_map_word(MapWord::FromForwardingAddress(target));
2172 heap->DoScavengeObject(first->map(), slot, first);
2173 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2177 int object_size = ConsString::kSize;
2178 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2182 template <ObjectContents object_contents>
2183 class ObjectEvacuationStrategy {
2185 template <int object_size>
2186 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2187 HeapObject* object) {
2188 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2192 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2193 int object_size = map->instance_size();
2194 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2199 static VisitorDispatchTable<ScavengingCallback> table_;
2203 template <MarksHandling marks_handling,
2204 LoggingAndProfiling logging_and_profiling_mode>
2205 VisitorDispatchTable<ScavengingCallback>
2206 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2209 static void InitializeScavengingVisitorsTables() {
2210 ScavengingVisitor<TRANSFER_MARKS,
2211 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2212 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2213 ScavengingVisitor<TRANSFER_MARKS,
2214 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2215 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2219 void Heap::SelectScavengingVisitorsTable() {
2220 bool logging_and_profiling =
2221 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2222 isolate()->cpu_profiler()->is_profiling() ||
2223 (isolate()->heap_profiler() != NULL &&
2224 isolate()->heap_profiler()->is_tracking_object_moves());
2226 if (!incremental_marking()->IsMarking()) {
2227 if (!logging_and_profiling) {
2228 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2229 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2231 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2232 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2235 if (!logging_and_profiling) {
2236 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2237 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2239 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2240 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2243 if (incremental_marking()->IsCompacting()) {
2244 // When compacting forbid short-circuiting of cons-strings.
2245 // Scavenging code relies on the fact that new space object
2246 // can't be evacuated into evacuation candidate but
2247 // short-circuiting violates this assumption.
2248 scavenging_visitors_table_.Register(
2249 StaticVisitorBase::kVisitShortcutCandidate,
2250 scavenging_visitors_table_.GetVisitorById(
2251 StaticVisitorBase::kVisitConsString));
2257 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2258 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2259 MapWord first_word = object->map_word();
2260 SLOW_DCHECK(!first_word.IsForwardingAddress());
2261 Map* map = first_word.ToMap();
2262 map->GetHeap()->DoScavengeObject(map, p, object);
2266 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2267 int instance_size) {
2269 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2270 if (!allocation.To(&result)) return allocation;
2272 // Map::cast cannot be used due to uninitialized map field.
2273 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2274 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2275 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2276 reinterpret_cast<Map*>(result)->set_visitor_id(
2277 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2278 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2279 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2280 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2281 reinterpret_cast<Map*>(result)->set_bit_field(0);
2282 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2283 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2284 Map::OwnsDescriptors::encode(true);
2285 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2290 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2292 ElementsKind elements_kind) {
2294 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2295 if (!allocation.To(&result)) return allocation;
2297 result->set_map_no_write_barrier(meta_map());
2298 Map* map = Map::cast(result);
2299 map->set_instance_type(instance_type);
2300 map->set_visitor_id(
2301 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2302 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2303 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2304 map->set_instance_size(instance_size);
2305 map->set_inobject_properties(0);
2306 map->set_pre_allocated_property_fields(0);
2307 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2308 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2309 SKIP_WRITE_BARRIER);
2310 map->init_back_pointer(undefined_value());
2311 map->set_unused_property_fields(0);
2312 map->set_instance_descriptors(empty_descriptor_array());
2313 map->set_bit_field(0);
2314 map->set_bit_field2(1 << Map::kIsExtensible);
2315 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2316 Map::OwnsDescriptors::encode(true);
2317 map->set_bit_field3(bit_field3);
2318 map->set_elements_kind(elements_kind);
2324 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2325 AllocationSpace space) {
2328 AllocationResult allocation = AllocateRaw(size, space, space);
2329 if (!allocation.To(&obj)) return allocation;
2332 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2333 DCHECK(chunk->owner()->identity() == space);
2335 CreateFillerObjectAt(obj->address(), size);
2340 const Heap::StringTypeTable Heap::string_type_table[] = {
2341 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2342 { type, size, k##camel_name##MapRootIndex } \
2344 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2345 #undef STRING_TYPE_ELEMENT
2349 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2350 #define CONSTANT_STRING_ELEMENT(name, contents) \
2351 { contents, k##name##RootIndex } \
2353 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2354 #undef CONSTANT_STRING_ELEMENT
2358 const Heap::StructTable Heap::struct_table[] = {
2359 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2360 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2362 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2363 #undef STRUCT_TABLE_ELEMENT
2367 bool Heap::CreateInitialMaps() {
2370 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2371 if (!allocation.To(&obj)) return false;
2373 // Map::cast cannot be used due to uninitialized map field.
2374 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2375 set_meta_map(new_meta_map);
2376 new_meta_map->set_map(new_meta_map);
2378 { // Partial map allocation
2379 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2382 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2383 set_##field_name##_map(map); \
2386 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2387 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2388 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2389 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2390 constant_pool_array);
2392 #undef ALLOCATE_PARTIAL_MAP
2395 // Allocate the empty array.
2397 AllocationResult allocation = AllocateEmptyFixedArray();
2398 if (!allocation.To(&obj)) return false;
2400 set_empty_fixed_array(FixedArray::cast(obj));
2403 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2404 if (!allocation.To(&obj)) return false;
2406 set_null_value(Oddball::cast(obj));
2407 Oddball::cast(obj)->set_kind(Oddball::kNull);
2410 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2411 if (!allocation.To(&obj)) return false;
2413 set_undefined_value(Oddball::cast(obj));
2414 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2415 DCHECK(!InNewSpace(undefined_value()));
2417 // Set preliminary exception sentinel value before actually initializing it.
2418 set_exception(null_value());
2420 // Allocate the empty descriptor array.
2422 AllocationResult allocation = AllocateEmptyFixedArray();
2423 if (!allocation.To(&obj)) return false;
2425 set_empty_descriptor_array(DescriptorArray::cast(obj));
2427 // Allocate the constant pool array.
2429 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2430 if (!allocation.To(&obj)) return false;
2432 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2434 // Fix the instance_descriptors for the existing maps.
2435 meta_map()->set_code_cache(empty_fixed_array());
2436 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2437 meta_map()->init_back_pointer(undefined_value());
2438 meta_map()->set_instance_descriptors(empty_descriptor_array());
2440 fixed_array_map()->set_code_cache(empty_fixed_array());
2441 fixed_array_map()->set_dependent_code(
2442 DependentCode::cast(empty_fixed_array()));
2443 fixed_array_map()->init_back_pointer(undefined_value());
2444 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2446 undefined_map()->set_code_cache(empty_fixed_array());
2447 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2448 undefined_map()->init_back_pointer(undefined_value());
2449 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2451 null_map()->set_code_cache(empty_fixed_array());
2452 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 null_map()->init_back_pointer(undefined_value());
2454 null_map()->set_instance_descriptors(empty_descriptor_array());
2456 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2457 constant_pool_array_map()->set_dependent_code(
2458 DependentCode::cast(empty_fixed_array()));
2459 constant_pool_array_map()->init_back_pointer(undefined_value());
2460 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2462 // Fix prototype object for existing maps.
2463 meta_map()->set_prototype(null_value());
2464 meta_map()->set_constructor(null_value());
2466 fixed_array_map()->set_prototype(null_value());
2467 fixed_array_map()->set_constructor(null_value());
2469 undefined_map()->set_prototype(null_value());
2470 undefined_map()->set_constructor(null_value());
2472 null_map()->set_prototype(null_value());
2473 null_map()->set_constructor(null_value());
2475 constant_pool_array_map()->set_prototype(null_value());
2476 constant_pool_array_map()->set_constructor(null_value());
2479 #define ALLOCATE_MAP(instance_type, size, field_name) \
2482 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2483 set_##field_name##_map(map); \
2486 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2487 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2489 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2490 DCHECK(fixed_array_map() != fixed_cow_array_map());
2492 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2493 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2494 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2495 mutable_heap_number)
2496 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2497 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2503 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2504 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2505 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2507 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2508 const StringTypeTable& entry = string_type_table[i];
2510 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2511 if (!allocation.To(&obj)) return false;
2513 // Mark cons string maps as unstable, because their objects can change
2515 Map* map = Map::cast(obj);
2516 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2517 roots_[entry.index] = map;
2520 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2521 undetectable_string_map()->set_is_undetectable();
2523 ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2524 undetectable_one_byte_string_map()->set_is_undetectable();
2526 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2527 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2528 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2530 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2531 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2532 external_##type##_array)
2534 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2535 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2537 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2538 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2540 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2541 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2543 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2545 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2547 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2548 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2549 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2550 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2553 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2554 const StructTable& entry = struct_table[i];
2556 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2557 roots_[entry.index] = map;
2560 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2567 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2570 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2571 native_context_map()->set_dictionary_map(true);
2572 native_context_map()->set_visitor_id(
2573 StaticVisitorBase::kVisitNativeContext);
2575 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2576 shared_function_info)
2578 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2579 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2580 external_map()->set_is_extensible(false);
2581 #undef ALLOCATE_VARSIZE_MAP
2587 ByteArray* byte_array;
2588 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2589 set_empty_byte_array(byte_array);
2592 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2594 ExternalArray* obj; \
2595 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2597 set_empty_external_##type##_array(obj); \
2600 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2601 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2603 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2605 FixedTypedArrayBase* obj; \
2606 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2608 set_empty_fixed_##type##_array(obj); \
2611 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2612 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2614 DCHECK(!InNewSpace(empty_fixed_array()));
2619 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2620 PretenureFlag pretenure) {
2621 // Statically ensure that it is safe to allocate heap numbers in paged
2623 int size = HeapNumber::kSize;
2624 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2626 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2630 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2631 if (!allocation.To(&result)) return allocation;
2634 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2635 HeapObject::cast(result)->set_map_no_write_barrier(map);
2636 HeapNumber::cast(result)->set_value(value);
2641 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2642 V(Float32x4, float32x4) \
2643 V(Float64x2, float64x2) \
2647 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
2648 AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
2649 PretenureFlag pretenure) { \
2650 STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
2652 AllocationSpace space = \
2653 SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
2655 HeapObject* result; \
2656 { AllocationResult allocation = \
2657 AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
2658 if (!allocation.To(&result)) return allocation; \
2661 result->set_map_no_write_barrier( \
2662 isolate()->native_context()->type##_function()->initial_map()); \
2663 JSObject::cast(result)->set_properties(empty_fixed_array()); \
2664 JSObject::cast(result)->set_elements(empty_fixed_array()); \
2666 HeapObject* storage; \
2667 int storage_size = \
2668 FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
2669 space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
2670 { AllocationResult allocation = \
2671 AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
2672 if (!allocation.To(&storage)) return allocation; \
2676 *isolate()->factory()->fixed_##type##_array_map()); \
2677 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2678 elements->set_length(static_cast<int>(1)); \
2679 memset(elements->DataPtr(), 0, elements->DataSize()); \
2680 Fixed##TYPE##Array::cast(storage)->set(0, value); \
2681 TYPE::cast(result)->set_value(storage); \
2686 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2689 AllocationResult Heap::AllocateCell(Object* value) {
2690 int size = Cell::kSize;
2691 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2695 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2696 if (!allocation.To(&result)) return allocation;
2698 result->set_map_no_write_barrier(cell_map());
2699 Cell::cast(result)->set_value(value);
2704 AllocationResult Heap::AllocatePropertyCell() {
2705 int size = PropertyCell::kSize;
2706 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2709 AllocationResult allocation =
2710 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2711 if (!allocation.To(&result)) return allocation;
2713 result->set_map_no_write_barrier(global_property_cell_map());
2714 PropertyCell* cell = PropertyCell::cast(result);
2715 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2716 SKIP_WRITE_BARRIER);
2717 cell->set_value(the_hole_value());
2718 cell->set_type(HeapType::None());
2723 void Heap::CreateApiObjects() {
2724 HandleScope scope(isolate());
2725 Factory* factory = isolate()->factory();
2726 Handle<Map> new_neander_map =
2727 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2729 // Don't use Smi-only elements optimizations for objects with the neander
2730 // map. There are too many cases where element values are set directly with a
2731 // bottleneck to trap the Smi-only -> fast elements transition, and there
2732 // appears to be no benefit for optimize this case.
2733 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2734 set_neander_map(*new_neander_map);
2736 Handle<JSObject> listeners = factory->NewNeanderObject();
2737 Handle<FixedArray> elements = factory->NewFixedArray(2);
2738 elements->set(0, Smi::FromInt(0));
2739 listeners->set_elements(*elements);
2740 set_message_listeners(*listeners);
2744 void Heap::CreateJSEntryStub() {
2745 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2746 set_js_entry_code(*stub.GetCode());
2750 void Heap::CreateJSConstructEntryStub() {
2751 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2752 set_js_construct_entry_code(*stub.GetCode());
2756 void Heap::CreateFixedStubs() {
2757 // Here we create roots for fixed stubs. They are needed at GC
2758 // for cooking and uncooking (check out frames.cc).
2759 // The eliminates the need for doing dictionary lookup in the
2760 // stub cache for these stubs.
2761 HandleScope scope(isolate());
2763 // Create stubs that should be there, so we don't unexpectedly have to
2764 // create them if we need them during the creation of another stub.
2765 // Stub creation mixes raw pointers and handles in an unsafe manner so
2766 // we cannot create stubs while we are creating stubs.
2767 CodeStub::GenerateStubsAheadOfTime(isolate());
2769 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2770 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2773 // gcc-4.4 has problem generating correct code of following snippet:
2774 // { JSEntryStub stub;
2775 // js_entry_code_ = *stub.GetCode();
2777 // { JSConstructEntryStub stub;
2778 // js_construct_entry_code_ = *stub.GetCode();
2780 // To workaround the problem, make separate functions without inlining.
2781 Heap::CreateJSEntryStub();
2782 Heap::CreateJSConstructEntryStub();
2786 void Heap::CreateInitialObjects() {
2787 HandleScope scope(isolate());
2788 Factory* factory = isolate()->factory();
2790 // The -0 value must be set before NewNumber works.
2791 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2792 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2795 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2796 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2798 // The hole has not been created yet, but we want to put something
2799 // predictable in the gaps in the string table, so lets make that Smi zero.
2800 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2802 // Allocate initial string table.
2803 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2805 // Finish initializing oddballs after creating the string table.
2806 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2807 factory->nan_value(), Oddball::kUndefined);
2809 // Initialize the null_value.
2810 Oddball::Initialize(isolate(), factory->null_value(), "null",
2811 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2813 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2814 handle(Smi::FromInt(1), isolate()),
2817 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2818 handle(Smi::FromInt(0), isolate()),
2821 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2822 handle(Smi::FromInt(-1), isolate()),
2823 Oddball::kTheHole));
2825 set_uninitialized_value(*factory->NewOddball(
2826 factory->uninitialized_map(), "uninitialized",
2827 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2829 set_arguments_marker(*factory->NewOddball(
2830 factory->arguments_marker_map(), "arguments_marker",
2831 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2833 set_no_interceptor_result_sentinel(*factory->NewOddball(
2834 factory->no_interceptor_result_sentinel_map(),
2835 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2838 set_termination_exception(*factory->NewOddball(
2839 factory->termination_exception_map(), "termination_exception",
2840 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2842 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2843 handle(Smi::FromInt(-5), isolate()),
2844 Oddball::kException));
2846 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2847 Handle<String> str =
2848 factory->InternalizeUtf8String(constant_string_table[i].contents);
2849 roots_[constant_string_table[i].index] = *str;
2852 // Allocate the hidden string which is used to identify the hidden properties
2853 // in JSObjects. The hash code has a special value so that it will not match
2854 // the empty string when searching for the property. It cannot be part of the
2855 // loop above because it needs to be allocated manually with the special
2856 // hash code in place. The hash code for the hidden_string is zero to ensure
2857 // that it will always be at the first entry in property descriptors.
2858 hidden_string_ = *factory->NewOneByteInternalizedString(
2859 OneByteVector("", 0), String::kEmptyStringHash);
2861 // Create the code_stubs dictionary. The initial size is set to avoid
2862 // expanding the dictionary during bootstrapping.
2863 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2865 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2866 // is set to avoid expanding the dictionary during bootstrapping.
2867 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2869 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2870 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2872 set_instanceof_cache_function(Smi::FromInt(0));
2873 set_instanceof_cache_map(Smi::FromInt(0));
2874 set_instanceof_cache_answer(Smi::FromInt(0));
2878 // Allocate the dictionary of intrinsic function names.
2879 Handle<NameDictionary> intrinsic_names =
2880 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2881 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2882 set_intrinsic_function_names(*intrinsic_names);
2884 set_number_string_cache(
2885 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2887 // Allocate cache for single character one byte strings.
2888 set_single_character_string_cache(
2889 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2891 // Allocate cache for string split and regexp-multiple.
2892 set_string_split_cache(*factory->NewFixedArray(
2893 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2894 set_regexp_multiple_cache(*factory->NewFixedArray(
2895 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2897 // Allocate cache for external strings pointing to native source code.
2898 set_natives_source_cache(
2899 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2901 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2903 // The symbol registry is initialized lazily.
2904 set_symbol_registry(undefined_value());
2906 // Allocate object to hold object observation state.
2907 set_observation_state(*factory->NewJSObjectFromMap(
2908 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2910 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2911 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2912 set_microtask_queue(empty_fixed_array());
2914 set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2915 set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2916 set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2917 set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2918 set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2919 set_generic_symbol(*factory->NewPrivateOwnSymbol());
2920 set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2921 set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2922 set_observed_symbol(*factory->NewPrivateOwnSymbol());
2923 set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2924 set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2925 set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2927 Handle<SeededNumberDictionary> slow_element_dictionary =
2928 SeededNumberDictionary::New(isolate(), 0, TENURED);
2929 slow_element_dictionary->set_requires_slow_elements();
2930 set_empty_slow_element_dictionary(*slow_element_dictionary);
2932 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2934 // Handling of script id generation is in Factory::NewScript.
2935 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2937 set_allocation_sites_scratchpad(
2938 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2939 InitializeAllocationSitesScratchpad();
2941 // Initialize keyed lookup cache.
2942 isolate_->keyed_lookup_cache()->Clear();
2944 // Initialize context slot cache.
2945 isolate_->context_slot_cache()->Clear();
2947 // Initialize descriptor cache.
2948 isolate_->descriptor_lookup_cache()->Clear();
2950 // Initialize compilation cache.
2951 isolate_->compilation_cache()->Clear();
2955 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2956 RootListIndex writable_roots[] = {
2957 kStoreBufferTopRootIndex,
2958 kStackLimitRootIndex,
2959 kNumberStringCacheRootIndex,
2960 kInstanceofCacheFunctionRootIndex,
2961 kInstanceofCacheMapRootIndex,
2962 kInstanceofCacheAnswerRootIndex,
2963 kCodeStubsRootIndex,
2964 kNonMonomorphicCacheRootIndex,
2965 kPolymorphicCodeCacheRootIndex,
2966 kLastScriptIdRootIndex,
2967 kEmptyScriptRootIndex,
2968 kRealStackLimitRootIndex,
2969 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2970 kConstructStubDeoptPCOffsetRootIndex,
2971 kGetterStubDeoptPCOffsetRootIndex,
2972 kSetterStubDeoptPCOffsetRootIndex,
2973 kStringTableRootIndex,
2976 for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2977 if (root_index == writable_roots[i]) return true;
2983 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2984 return !RootCanBeWrittenAfterInitialization(root_index) &&
2985 !InNewSpace(roots_array_start()[root_index]);
2989 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2990 Object* key_pattern, ResultsCacheType type) {
2992 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2993 if (type == STRING_SPLIT_SUBSTRINGS) {
2994 DCHECK(key_pattern->IsString());
2995 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2996 cache = heap->string_split_cache();
2998 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2999 DCHECK(key_pattern->IsFixedArray());
3000 cache = heap->regexp_multiple_cache();
3003 uint32_t hash = key_string->Hash();
3004 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3005 ~(kArrayEntriesPerCacheEntry - 1));
3006 if (cache->get(index + kStringOffset) == key_string &&
3007 cache->get(index + kPatternOffset) == key_pattern) {
3008 return cache->get(index + kArrayOffset);
3011 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3012 if (cache->get(index + kStringOffset) == key_string &&
3013 cache->get(index + kPatternOffset) == key_pattern) {
3014 return cache->get(index + kArrayOffset);
3016 return Smi::FromInt(0);
3020 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3021 Handle<Object> key_pattern,
3022 Handle<FixedArray> value_array,
3023 ResultsCacheType type) {
3024 Factory* factory = isolate->factory();
3025 Handle<FixedArray> cache;
3026 if (!key_string->IsInternalizedString()) return;
3027 if (type == STRING_SPLIT_SUBSTRINGS) {
3028 DCHECK(key_pattern->IsString());
3029 if (!key_pattern->IsInternalizedString()) return;
3030 cache = factory->string_split_cache();
3032 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3033 DCHECK(key_pattern->IsFixedArray());
3034 cache = factory->regexp_multiple_cache();
3037 uint32_t hash = key_string->Hash();
3038 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3039 ~(kArrayEntriesPerCacheEntry - 1));
3040 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3041 cache->set(index + kStringOffset, *key_string);
3042 cache->set(index + kPatternOffset, *key_pattern);
3043 cache->set(index + kArrayOffset, *value_array);
3046 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3047 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3048 cache->set(index2 + kStringOffset, *key_string);
3049 cache->set(index2 + kPatternOffset, *key_pattern);
3050 cache->set(index2 + kArrayOffset, *value_array);
3052 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3053 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3054 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3055 cache->set(index + kStringOffset, *key_string);
3056 cache->set(index + kPatternOffset, *key_pattern);
3057 cache->set(index + kArrayOffset, *value_array);
3060 // If the array is a reasonably short list of substrings, convert it into a
3061 // list of internalized strings.
3062 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3063 for (int i = 0; i < value_array->length(); i++) {
3064 Handle<String> str(String::cast(value_array->get(i)), isolate);
3065 Handle<String> internalized_str = factory->InternalizeString(str);
3066 value_array->set(i, *internalized_str);
3069 // Convert backing store to a copy-on-write array.
3070 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3074 void RegExpResultsCache::Clear(FixedArray* cache) {
3075 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3076 cache->set(i, Smi::FromInt(0));
3081 int Heap::FullSizeNumberStringCacheLength() {
3082 // Compute the size of the number string cache based on the max newspace size.
3083 // The number string cache has a minimum size based on twice the initial cache
3084 // size to ensure that it is bigger after being made 'full size'.
3085 int number_string_cache_size = max_semi_space_size_ / 512;
3086 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3087 Min(0x4000, number_string_cache_size));
3088 // There is a string and a number per entry so the length is twice the number
3090 return number_string_cache_size * 2;
3094 void Heap::FlushNumberStringCache() {
3095 // Flush the number to string cache.
3096 int len = number_string_cache()->length();
3097 for (int i = 0; i < len; i++) {
3098 number_string_cache()->set_undefined(i);
3103 void Heap::FlushAllocationSitesScratchpad() {
3104 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3105 allocation_sites_scratchpad()->set_undefined(i);
3107 allocation_sites_scratchpad_length_ = 0;
3111 void Heap::InitializeAllocationSitesScratchpad() {
3112 DCHECK(allocation_sites_scratchpad()->length() ==
3113 kAllocationSiteScratchpadSize);
3114 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3115 allocation_sites_scratchpad()->set_undefined(i);
3120 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3121 ScratchpadSlotMode mode) {
3122 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3123 // We cannot use the normal write-barrier because slots need to be
3124 // recorded with non-incremental marking as well. We have to explicitly
3125 // record the slot to take evacuation candidates into account.
3126 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3127 site, SKIP_WRITE_BARRIER);
3128 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3129 allocation_sites_scratchpad_length_);
3131 if (mode == RECORD_SCRATCHPAD_SLOT) {
3132 // We need to allow slots buffer overflow here since the evacuation
3133 // candidates are not part of the global list of old space pages and
3134 // releasing an evacuation candidate due to a slots buffer overflow
3135 // results in lost pages.
3136 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3137 SlotsBuffer::IGNORE_OVERFLOW);
3139 allocation_sites_scratchpad_length_++;
3144 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3145 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3149 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3150 ExternalArrayType array_type) {
3151 switch (array_type) {
3152 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3153 case kExternal##Type##Array: \
3154 return kExternal##Type##ArrayMapRootIndex;
3156 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3157 #undef ARRAY_TYPE_TO_ROOT_INDEX
3161 return kUndefinedValueRootIndex;
3166 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3167 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3171 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3172 ExternalArrayType array_type) {
3173 switch (array_type) {
3174 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3175 case kExternal##Type##Array: \
3176 return kFixed##Type##ArrayMapRootIndex;
3178 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3179 #undef ARRAY_TYPE_TO_ROOT_INDEX
3183 return kUndefinedValueRootIndex;
3188 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3189 ElementsKind elementsKind) {
3190 switch (elementsKind) {
3191 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3192 case EXTERNAL_##TYPE##_ELEMENTS: \
3193 return kEmptyExternal##Type##ArrayRootIndex;
3195 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3196 #undef ELEMENT_KIND_TO_ROOT_INDEX
3200 return kUndefinedValueRootIndex;
3205 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3206 ElementsKind elementsKind) {
3207 switch (elementsKind) {
3208 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3209 case TYPE##_ELEMENTS: \
3210 return kEmptyFixed##Type##ArrayRootIndex;
3212 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3213 #undef ELEMENT_KIND_TO_ROOT_INDEX
3216 return kUndefinedValueRootIndex;
3221 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3222 return ExternalArray::cast(
3223 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3227 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3228 return FixedTypedArrayBase::cast(
3229 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3233 AllocationResult Heap::AllocateForeign(Address address,
3234 PretenureFlag pretenure) {
3235 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3236 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3237 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3239 AllocationResult allocation = Allocate(foreign_map(), space);
3240 if (!allocation.To(&result)) return allocation;
3241 result->set_foreign_address(address);
3246 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3247 if (length < 0 || length > ByteArray::kMaxLength) {
3248 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3250 int size = ByteArray::SizeFor(length);
3251 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3254 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3255 if (!allocation.To(&result)) return allocation;
3258 result->set_map_no_write_barrier(byte_array_map());
3259 ByteArray::cast(result)->set_length(length);
3264 void Heap::CreateFillerObjectAt(Address addr, int size) {
3265 if (size == 0) return;
3266 HeapObject* filler = HeapObject::FromAddress(addr);
3267 if (size == kPointerSize) {
3268 filler->set_map_no_write_barrier(one_pointer_filler_map());
3269 } else if (size == 2 * kPointerSize) {
3270 filler->set_map_no_write_barrier(two_pointer_filler_map());
3272 filler->set_map_no_write_barrier(free_space_map());
3273 FreeSpace::cast(filler)->set_size(size);
3278 bool Heap::CanMoveObjectStart(HeapObject* object) {
3279 Address address = object->address();
3280 bool is_in_old_pointer_space = InOldPointerSpace(address);
3281 bool is_in_old_data_space = InOldDataSpace(address);
3283 if (lo_space()->Contains(object)) return false;
3285 Page* page = Page::FromAddress(address);
3286 // We can move the object start if:
3287 // (1) the object is not in old pointer or old data space,
3288 // (2) the page of the object was already swept,
3289 // (3) the page was already concurrently swept. This case is an optimization
3290 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3291 // pages is set after sweeping all pages.
3292 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3293 page->WasSwept() || page->SweepingCompleted();
3297 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3298 if (incremental_marking()->IsMarking() &&
3299 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3300 if (mode == FROM_GC) {
3301 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3303 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3309 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3310 int elements_to_trim) {
3311 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3312 const int bytes_to_trim = elements_to_trim * element_size;
3313 Map* map = object->map();
3315 // For now this trick is only applied to objects in new and paged space.
3316 // In large object space the object's start must coincide with chunk
3317 // and thus the trick is just not applicable.
3318 DCHECK(!lo_space()->Contains(object));
3319 DCHECK(object->map() != fixed_cow_array_map());
3321 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3322 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3323 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3325 const int len = object->length();
3326 DCHECK(elements_to_trim <= len);
3328 // Calculate location of new array start.
3329 Address new_start = object->address() + bytes_to_trim;
3331 // Technically in new space this write might be omitted (except for
3332 // debug mode which iterates through the heap), but to play safer
3334 CreateFillerObjectAt(object->address(), bytes_to_trim);
3336 // Initialize header of the trimmed array. Since left trimming is only
3337 // performed on pages which are not concurrently swept creating a filler
3338 // object does not require synchronization.
3339 DCHECK(CanMoveObjectStart(object));
3340 Object** former_start = HeapObject::RawField(object, 0);
3341 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3342 former_start[new_start_index] = map;
3343 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3344 FixedArrayBase* new_object =
3345 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3347 // Maintain consistency of live bytes during incremental marking
3348 marking()->TransferMark(object->address(), new_start);
3349 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3351 // Notify the heap profiler of change in object layout.
3352 OnMoveEvent(new_object, object, new_object->Size());
3357 // Force instantiation of templatized method.
3359 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3361 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3364 template<Heap::InvocationMode mode>
3365 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3366 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3367 const int bytes_to_trim = elements_to_trim * element_size;
3369 // For now this trick is only applied to objects in new and paged space.
3370 DCHECK(object->map() != fixed_cow_array_map());
3372 const int len = object->length();
3373 DCHECK(elements_to_trim < len);
3375 // Calculate location of new array end.
3376 Address new_end = object->address() + object->Size() - bytes_to_trim;
3378 // Technically in new space this write might be omitted (except for
3379 // debug mode which iterates through the heap), but to play safer
3381 // We do not create a filler for objects in large object space.
3382 // TODO(hpayer): We should shrink the large object page if the size
3383 // of the object changed significantly.
3384 if (!lo_space()->Contains(object)) {
3385 CreateFillerObjectAt(new_end, bytes_to_trim);
3388 // Initialize header of the trimmed array. We are storing the new length
3389 // using release store after creating a filler for the left-over space to
3390 // avoid races with the sweeper thread.
3391 object->synchronized_set_length(len - elements_to_trim);
3393 // Maintain consistency of live bytes during incremental marking
3394 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3396 // Notify the heap profiler of change in object layout. The array may not be
3397 // moved during GC, and size has to be adjusted nevertheless.
3398 HeapProfiler* profiler = isolate()->heap_profiler();
3399 if (profiler->is_tracking_allocations()) {
3400 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3405 AllocationResult Heap::AllocateExternalArray(int length,
3406 ExternalArrayType array_type,
3407 void* external_pointer,
3408 PretenureFlag pretenure) {
3409 int size = ExternalArray::kAlignedSize;
3410 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3413 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3414 if (!allocation.To(&result)) return allocation;
3417 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3418 ExternalArray::cast(result)->set_length(length);
3419 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3423 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3424 ElementsKind* element_kind) {
3425 switch (array_type) {
3426 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3427 case kExternal##Type##Array: \
3428 *element_size = size; \
3429 *element_kind = TYPE##_ELEMENTS; \
3432 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3433 #undef TYPED_ARRAY_CASE
3436 *element_size = 0; // Bogus
3437 *element_kind = UINT8_ELEMENTS; // Bogus
3443 AllocationResult Heap::AllocateFixedTypedArray(int length,
3444 ExternalArrayType array_type,
3445 PretenureFlag pretenure) {
3447 ElementsKind elements_kind;
3448 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3449 int size = OBJECT_POINTER_ALIGN(length * element_size +
3450 FixedTypedArrayBase::kDataOffset);
3451 #ifndef V8_HOST_ARCH_64_BIT
3452 if (array_type == kExternalFloat64Array) {
3453 size += kPointerSize;
3456 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3459 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3460 if (!allocation.To(&object)) return allocation;
3462 if (array_type == kExternalFloat64Array) {
3463 object = EnsureDoubleAligned(this, object, size);
3466 object->set_map(MapForFixedTypedArray(array_type));
3467 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3468 elements->set_length(length);
3469 memset(elements->DataPtr(), 0, elements->DataSize());
3474 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3475 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3476 AllocationResult allocation =
3477 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3480 if (!allocation.To(&result)) return allocation;
3483 Address address = result->address();
3484 // Code objects which should stay at a fixed address are allocated either
3485 // in the first page of code space (objects on the first page of each space
3486 // are never moved) or in large object space.
3487 if (!code_space_->FirstPage()->Contains(address) &&
3488 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3489 // Discard the first code allocation, which was on a page where it could
3491 CreateFillerObjectAt(result->address(), object_size);
3492 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3493 if (!allocation.To(&result)) return allocation;
3494 OnAllocationEvent(result, object_size);
3498 result->set_map_no_write_barrier(code_map());
3499 Code* code = Code::cast(result);
3500 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3501 isolate_->code_range()->contains(code->address()));
3502 code->set_gc_metadata(Smi::FromInt(0));
3503 code->set_ic_age(global_ic_age_);
3508 AllocationResult Heap::CopyCode(Code* code) {
3509 AllocationResult allocation;
3510 HeapObject* new_constant_pool;
3511 if (FLAG_enable_ool_constant_pool &&
3512 code->constant_pool() != empty_constant_pool_array()) {
3513 // Copy the constant pool, since edits to the copied code may modify
3514 // the constant pool.
3515 allocation = CopyConstantPoolArray(code->constant_pool());
3516 if (!allocation.To(&new_constant_pool)) return allocation;
3518 new_constant_pool = empty_constant_pool_array();
3522 // Allocate an object the same size as the code object.
3523 int obj_size = code->Size();
3524 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3525 if (!allocation.To(&result)) return allocation;
3527 // Copy code object.
3528 Address old_addr = code->address();
3529 Address new_addr = result->address();
3530 CopyBlock(new_addr, old_addr, obj_size);
3531 Code* new_code = Code::cast(result);
3533 // Update the constant pool.
3534 new_code->set_constant_pool(new_constant_pool);
3536 // Relocate the copy.
3537 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3538 isolate_->code_range()->contains(code->address()));
3539 new_code->Relocate(new_addr - old_addr);
3544 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3545 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3546 // do not risk leaving uninitialized Code object (and breaking the heap).
3547 ByteArray* reloc_info_array;
3549 AllocationResult allocation =
3550 AllocateByteArray(reloc_info.length(), TENURED);
3551 if (!allocation.To(&reloc_info_array)) return allocation;
3553 HeapObject* new_constant_pool;
3554 if (FLAG_enable_ool_constant_pool &&
3555 code->constant_pool() != empty_constant_pool_array()) {
3556 // Copy the constant pool, since edits to the copied code may modify
3557 // the constant pool.
3558 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3559 if (!allocation.To(&new_constant_pool)) return allocation;
3561 new_constant_pool = empty_constant_pool_array();
3564 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3566 int new_obj_size = Code::SizeFor(new_body_size);
3568 Address old_addr = code->address();
3570 size_t relocation_offset =
3571 static_cast<size_t>(code->instruction_end() - old_addr);
3574 AllocationResult allocation =
3575 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3576 if (!allocation.To(&result)) return allocation;
3578 // Copy code object.
3579 Address new_addr = result->address();
3581 // Copy header and instructions.
3582 CopyBytes(new_addr, old_addr, relocation_offset);
3584 Code* new_code = Code::cast(result);
3585 new_code->set_relocation_info(reloc_info_array);
3587 // Update constant pool.
3588 new_code->set_constant_pool(new_constant_pool);
3590 // Copy patched rinfo.
3591 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3592 static_cast<size_t>(reloc_info.length()));
3594 // Relocate the copy.
3595 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3596 isolate_->code_range()->contains(code->address()));
3597 new_code->Relocate(new_addr - old_addr);
3600 if (FLAG_verify_heap) code->ObjectVerify();
3606 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3607 AllocationSite* allocation_site) {
3608 memento->set_map_no_write_barrier(allocation_memento_map());
3609 DCHECK(allocation_site->map() == allocation_site_map());
3610 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3611 if (FLAG_allocation_site_pretenuring) {
3612 allocation_site->IncrementMementoCreateCount();
3617 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3618 AllocationSite* allocation_site) {
3619 DCHECK(gc_state_ == NOT_IN_GC);
3620 DCHECK(map->instance_type() != MAP_TYPE);
3621 // If allocation failures are disallowed, we may allocate in a different
3622 // space when new space is full and the object is not a large object.
3623 AllocationSpace retry_space =
3624 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3625 int size = map->instance_size();
3626 if (allocation_site != NULL) {
3627 size += AllocationMemento::kSize;
3630 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3631 if (!allocation.To(&result)) return allocation;
3632 // No need for write barrier since object is white and map is in old space.
3633 result->set_map_no_write_barrier(map);
3634 if (allocation_site != NULL) {
3635 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3636 reinterpret_cast<Address>(result) + map->instance_size());
3637 InitializeAllocationMemento(alloc_memento, allocation_site);
3643 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3645 obj->set_properties(properties);
3646 obj->initialize_elements();
3647 // TODO(1240798): Initialize the object's body using valid initial values
3648 // according to the object's initial map. For example, if the map's
3649 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3650 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3651 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3652 // verification code has to cope with (temporarily) invalid objects. See
3653 // for example, JSArray::JSArrayVerify).
3655 // We cannot always fill with one_pointer_filler_map because objects
3656 // created from API functions expect their internal fields to be initialized
3657 // with undefined_value.
3658 // Pre-allocated fields need to be initialized with undefined_value as well
3659 // so that object accesses before the constructor completes (e.g. in the
3660 // debugger) will not cause a crash.
3661 if (map->constructor()->IsJSFunction() &&
3662 JSFunction::cast(map->constructor())
3663 ->IsInobjectSlackTrackingInProgress()) {
3664 // We might want to shrink the object later.
3665 DCHECK(obj->GetInternalFieldCount() == 0);
3666 filler = Heap::one_pointer_filler_map();
3668 filler = Heap::undefined_value();
3670 obj->InitializeBody(map, Heap::undefined_value(), filler);
3674 AllocationResult Heap::AllocateJSObjectFromMap(
3675 Map* map, PretenureFlag pretenure, bool allocate_properties,
3676 AllocationSite* allocation_site) {
3677 // JSFunctions should be allocated using AllocateFunction to be
3678 // properly initialized.
3679 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3681 // Both types of global objects should be allocated using
3682 // AllocateGlobalObject to be properly initialized.
3683 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3684 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3686 // Allocate the backing storage for the properties.
3687 FixedArray* properties;
3688 if (allocate_properties) {
3689 int prop_size = map->InitialPropertiesLength();
3690 DCHECK(prop_size >= 0);
3692 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3693 if (!allocation.To(&properties)) return allocation;
3696 properties = empty_fixed_array();
3699 // Allocate the JSObject.
3700 int size = map->instance_size();
3701 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3703 AllocationResult allocation = Allocate(map, space, allocation_site);
3704 if (!allocation.To(&js_obj)) return allocation;
3706 // Initialize the JSObject.
3707 InitializeJSObjectFromMap(js_obj, properties, map);
3708 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3709 js_obj->HasFixedTypedArrayElements());
3714 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3715 PretenureFlag pretenure,
3716 AllocationSite* allocation_site) {
3717 DCHECK(constructor->has_initial_map());
3719 // Allocate the object based on the constructors initial map.
3720 AllocationResult allocation = AllocateJSObjectFromMap(
3721 constructor->initial_map(), pretenure, true, allocation_site);
3723 // Make sure result is NOT a global object if valid.
3725 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3731 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3732 // Never used to copy functions. If functions need to be copied we
3733 // have to be careful to clear the literals array.
3734 SLOW_DCHECK(!source->IsJSFunction());
3737 Map* map = source->map();
3738 int object_size = map->instance_size();
3741 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3743 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3745 // If we're forced to always allocate, we use the general allocation
3746 // functions which may leave us with an object in old space.
3747 if (always_allocate()) {
3749 AllocationResult allocation =
3750 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3751 if (!allocation.To(&clone)) return allocation;
3753 Address clone_address = clone->address();
3754 CopyBlock(clone_address, source->address(), object_size);
3755 // Update write barrier for all fields that lie beyond the header.
3756 RecordWrites(clone_address, JSObject::kHeaderSize,
3757 (object_size - JSObject::kHeaderSize) / kPointerSize);
3759 wb_mode = SKIP_WRITE_BARRIER;
3762 int adjusted_object_size =
3763 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3764 AllocationResult allocation =
3765 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3766 if (!allocation.To(&clone)) return allocation;
3768 SLOW_DCHECK(InNewSpace(clone));
3769 // Since we know the clone is allocated in new space, we can copy
3770 // the contents without worrying about updating the write barrier.
3771 CopyBlock(clone->address(), source->address(), object_size);
3774 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3775 reinterpret_cast<Address>(clone) + object_size);
3776 InitializeAllocationMemento(alloc_memento, site);
3780 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3781 source->GetElementsKind());
3782 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3783 FixedArray* properties = FixedArray::cast(source->properties());
3784 // Update elements if necessary.
3785 if (elements->length() > 0) {
3786 FixedArrayBase* elem;
3788 AllocationResult allocation;
3789 if (elements->map() == fixed_cow_array_map()) {
3790 allocation = FixedArray::cast(elements);
3791 } else if (source->HasFastDoubleElements()) {
3792 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3794 allocation = CopyFixedArray(FixedArray::cast(elements));
3796 if (!allocation.To(&elem)) return allocation;
3798 JSObject::cast(clone)->set_elements(elem, wb_mode);
3800 // Update properties if necessary.
3801 if (properties->length() > 0) {
3804 AllocationResult allocation = CopyFixedArray(properties);
3805 if (!allocation.To(&prop)) return allocation;
3807 JSObject::cast(clone)->set_properties(prop, wb_mode);
3809 // Return the new clone.
3814 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3816 // Only works for one byte strings.
3817 DCHECK(vector.length() == len);
3818 MemCopy(chars, vector.start(), len);
3821 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3823 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3824 unsigned stream_length = vector.length();
3825 while (stream_length != 0) {
3826 unsigned consumed = 0;
3827 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3828 DCHECK(c != unibrow::Utf8::kBadChar);
3829 DCHECK(consumed <= stream_length);
3830 stream_length -= consumed;
3832 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3835 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3836 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3843 DCHECK(stream_length == 0);
3848 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3849 DCHECK(s->length() == len);
3850 String::WriteToFlat(s, chars, 0, len);
3854 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3855 DCHECK(s->length() == len);
3856 String::WriteToFlat(s, chars, 0, len);
3860 template <bool is_one_byte, typename T>
3861 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3862 uint32_t hash_field) {
3864 // Compute map and object size.
3868 DCHECK_LE(0, chars);
3869 DCHECK_GE(String::kMaxLength, chars);
3871 map = one_byte_internalized_string_map();
3872 size = SeqOneByteString::SizeFor(chars);
3874 map = internalized_string_map();
3875 size = SeqTwoByteString::SizeFor(chars);
3877 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3882 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3883 if (!allocation.To(&result)) return allocation;
3886 result->set_map_no_write_barrier(map);
3887 // Set length and hash fields of the allocated string.
3888 String* answer = String::cast(result);
3889 answer->set_length(chars);
3890 answer->set_hash_field(hash_field);
3892 DCHECK_EQ(size, answer->Size());
3895 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3897 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3903 // Need explicit instantiations.
3904 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3907 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3910 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3911 Vector<const char>, int, uint32_t);
3914 AllocationResult Heap::AllocateRawOneByteString(int length,
3915 PretenureFlag pretenure) {
3916 DCHECK_LE(0, length);
3917 DCHECK_GE(String::kMaxLength, length);
3918 int size = SeqOneByteString::SizeFor(length);
3919 DCHECK(size <= SeqOneByteString::kMaxSize);
3920 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3924 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3925 if (!allocation.To(&result)) return allocation;
3928 // Partially initialize the object.
3929 result->set_map_no_write_barrier(one_byte_string_map());
3930 String::cast(result)->set_length(length);
3931 String::cast(result)->set_hash_field(String::kEmptyHashField);
3932 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3938 AllocationResult Heap::AllocateRawTwoByteString(int length,
3939 PretenureFlag pretenure) {
3940 DCHECK_LE(0, length);
3941 DCHECK_GE(String::kMaxLength, length);
3942 int size = SeqTwoByteString::SizeFor(length);
3943 DCHECK(size <= SeqTwoByteString::kMaxSize);
3944 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3948 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3949 if (!allocation.To(&result)) return allocation;
3952 // Partially initialize the object.
3953 result->set_map_no_write_barrier(string_map());
3954 String::cast(result)->set_length(length);
3955 String::cast(result)->set_hash_field(String::kEmptyHashField);
3956 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3961 AllocationResult Heap::AllocateEmptyFixedArray() {
3962 int size = FixedArray::SizeFor(0);
3965 AllocationResult allocation =
3966 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3967 if (!allocation.To(&result)) return allocation;
3969 // Initialize the object.
3970 result->set_map_no_write_barrier(fixed_array_map());
3971 FixedArray::cast(result)->set_length(0);
3976 AllocationResult Heap::AllocateEmptyExternalArray(
3977 ExternalArrayType array_type) {
3978 return AllocateExternalArray(0, array_type, NULL, TENURED);
3982 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3983 if (!InNewSpace(src)) {
3987 int len = src->length();
3990 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3991 if (!allocation.To(&obj)) return allocation;
3993 obj->set_map_no_write_barrier(fixed_array_map());
3994 FixedArray* result = FixedArray::cast(obj);
3995 result->set_length(len);
3998 DisallowHeapAllocation no_gc;
3999 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4000 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4002 // TODO(mvstanton): The map is set twice because of protection against calling
4003 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4004 // we might then be able to remove this whole method.
4005 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4010 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4011 ExternalArrayType array_type) {
4012 return AllocateFixedTypedArray(0, array_type, TENURED);
4016 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4017 int len = src->length();
4020 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4021 if (!allocation.To(&obj)) return allocation;
4023 if (InNewSpace(obj)) {
4024 obj->set_map_no_write_barrier(map);
4025 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4026 FixedArray::SizeFor(len) - kPointerSize);
4029 obj->set_map_no_write_barrier(map);
4030 FixedArray* result = FixedArray::cast(obj);
4031 result->set_length(len);
4034 DisallowHeapAllocation no_gc;
4035 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4036 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4041 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4043 int len = src->length();
4046 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4047 if (!allocation.To(&obj)) return allocation;
4049 obj->set_map_no_write_barrier(map);
4050 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4051 src->address() + FixedDoubleArray::kLengthOffset,
4052 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4057 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4060 if (src->is_extended_layout()) {
4061 ConstantPoolArray::NumberOfEntries small(src,
4062 ConstantPoolArray::SMALL_SECTION);
4063 ConstantPoolArray::NumberOfEntries extended(
4064 src, ConstantPoolArray::EXTENDED_SECTION);
4065 AllocationResult allocation =
4066 AllocateExtendedConstantPoolArray(small, extended);
4067 if (!allocation.To(&obj)) return allocation;
4069 ConstantPoolArray::NumberOfEntries small(src,
4070 ConstantPoolArray::SMALL_SECTION);
4071 AllocationResult allocation = AllocateConstantPoolArray(small);
4072 if (!allocation.To(&obj)) return allocation;
4074 obj->set_map_no_write_barrier(map);
4075 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4076 src->address() + ConstantPoolArray::kFirstEntryOffset,
4077 src->size() - ConstantPoolArray::kFirstEntryOffset);
4082 AllocationResult Heap::AllocateRawFixedArray(int length,
4083 PretenureFlag pretenure) {
4084 if (length < 0 || length > FixedArray::kMaxLength) {
4085 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4087 int size = FixedArray::SizeFor(length);
4088 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4090 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4094 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4095 PretenureFlag pretenure,
4097 DCHECK(length >= 0);
4098 DCHECK(empty_fixed_array()->IsFixedArray());
4099 if (length == 0) return empty_fixed_array();
4101 DCHECK(!InNewSpace(filler));
4104 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4105 if (!allocation.To(&result)) return allocation;
4108 result->set_map_no_write_barrier(fixed_array_map());
4109 FixedArray* array = FixedArray::cast(result);
4110 array->set_length(length);
4111 MemsetPointer(array->data_start(), filler, length);
4116 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4117 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4121 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4122 if (length == 0) return empty_fixed_array();
4126 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4127 if (!allocation.To(&obj)) return allocation;
4130 obj->set_map_no_write_barrier(fixed_array_map());
4131 FixedArray::cast(obj)->set_length(length);
4136 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4137 int length, PretenureFlag pretenure) {
4138 if (length == 0) return empty_fixed_array();
4140 HeapObject* elements;
4141 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4142 if (!allocation.To(&elements)) return allocation;
4144 elements->set_map_no_write_barrier(fixed_double_array_map());
4145 FixedDoubleArray::cast(elements)->set_length(length);
4150 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4151 PretenureFlag pretenure) {
4152 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4153 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4155 int size = FixedDoubleArray::SizeFor(length);
4156 #ifndef V8_HOST_ARCH_64_BIT
4157 size += kPointerSize;
4159 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4163 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4164 if (!allocation.To(&object)) return allocation;
4167 return EnsureDoubleAligned(this, object, size);
4171 AllocationResult Heap::AllocateConstantPoolArray(
4172 const ConstantPoolArray::NumberOfEntries& small) {
4173 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4174 int size = ConstantPoolArray::SizeFor(small);
4175 #ifndef V8_HOST_ARCH_64_BIT
4176 size += kPointerSize;
4178 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4182 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4183 if (!allocation.To(&object)) return allocation;
4185 object = EnsureDoubleAligned(this, object, size);
4186 object->set_map_no_write_barrier(constant_pool_array_map());
4188 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4189 constant_pool->Init(small);
4190 constant_pool->ClearPtrEntries(isolate());
4191 return constant_pool;
4195 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4196 const ConstantPoolArray::NumberOfEntries& small,
4197 const ConstantPoolArray::NumberOfEntries& extended) {
4198 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4199 CHECK(extended.are_in_range(0, kMaxInt));
4200 int size = ConstantPoolArray::SizeForExtended(small, extended);
4201 #ifndef V8_HOST_ARCH_64_BIT
4202 size += kPointerSize;
4204 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4208 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4209 if (!allocation.To(&object)) return allocation;
4211 object = EnsureDoubleAligned(this, object, size);
4212 object->set_map_no_write_barrier(constant_pool_array_map());
4214 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4215 constant_pool->InitExtended(small, extended);
4216 constant_pool->ClearPtrEntries(isolate());
4217 return constant_pool;
4221 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4222 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4223 int size = ConstantPoolArray::SizeFor(small);
4226 AllocationResult allocation =
4227 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4228 if (!allocation.To(&result)) return allocation;
4230 result->set_map_no_write_barrier(constant_pool_array_map());
4231 ConstantPoolArray::cast(result)->Init(small);
4236 AllocationResult Heap::AllocateSymbol() {
4237 // Statically ensure that it is safe to allocate symbols in paged spaces.
4238 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4241 AllocationResult allocation =
4242 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4243 if (!allocation.To(&result)) return allocation;
4245 result->set_map_no_write_barrier(symbol_map());
4247 // Generate a random hash value.
4251 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4253 } while (hash == 0 && attempts < 30);
4254 if (hash == 0) hash = 1; // never return 0
4256 Symbol::cast(result)
4257 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4258 Symbol::cast(result)->set_name(undefined_value());
4259 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4261 DCHECK(!Symbol::cast(result)->is_private());
4266 AllocationResult Heap::AllocateStruct(InstanceType type) {
4269 #define MAKE_CASE(NAME, Name, name) \
4271 map = name##_map(); \
4273 STRUCT_LIST(MAKE_CASE)
4279 int size = map->instance_size();
4280 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4283 AllocationResult allocation = Allocate(map, space);
4284 if (!allocation.To(&result)) return allocation;
4286 result->InitializeBody(size);
4291 bool Heap::IsHeapIterable() {
4292 // TODO(hpayer): This function is not correct. Allocation folding in old
4293 // space breaks the iterability.
4294 return new_space_top_after_last_gc_ == new_space()->top();
4298 void Heap::MakeHeapIterable() {
4299 DCHECK(AllowHeapAllocation::IsAllowed());
4300 if (!IsHeapIterable()) {
4301 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4303 if (mark_compact_collector()->sweeping_in_progress()) {
4304 mark_compact_collector()->EnsureSweepingCompleted();
4306 DCHECK(IsHeapIterable());
4310 void Heap::IdleMarkCompact(const char* message) {
4311 bool uncommit = false;
4312 if (gc_count_at_last_idle_gc_ == gc_count_) {
4313 // No GC since the last full GC, the mutator is probably not active.
4314 isolate_->compilation_cache()->Clear();
4317 CollectAllGarbage(kReduceMemoryFootprintMask, message);
4318 gc_idle_time_handler_.NotifyIdleMarkCompact();
4319 gc_count_at_last_idle_gc_ = gc_count_;
4321 new_space_.Shrink();
4322 UncommitFromSpace();
4327 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4328 incremental_marking()->Step(step_size,
4329 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4331 if (incremental_marking()->IsComplete()) {
4332 IdleMarkCompact("idle notification: finalize incremental");
4337 bool Heap::WorthActivatingIncrementalMarking() {
4338 return incremental_marking()->IsStopped() &&
4339 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
4343 bool Heap::IdleNotification(int idle_time_in_ms) {
4344 // If incremental marking is off, we do not perform idle notification.
4345 if (!FLAG_incremental_marking) return true;
4346 base::ElapsedTimer timer;
4348 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4350 HistogramTimerScope idle_notification_scope(
4351 isolate_->counters()->gc_idle_notification());
4353 GCIdleTimeHandler::HeapState heap_state;
4354 heap_state.contexts_disposed = contexts_disposed_;
4355 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4356 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4357 // TODO(ulan): Start incremental marking only for large heaps.
4358 heap_state.can_start_incremental_marking =
4359 incremental_marking()->ShouldActivate();
4360 heap_state.sweeping_in_progress =
4361 mark_compact_collector()->sweeping_in_progress();
4362 heap_state.mark_compact_speed_in_bytes_per_ms =
4363 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4364 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4365 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4366 heap_state.scavenge_speed_in_bytes_per_ms =
4367 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4368 heap_state.available_new_space_memory = new_space_.Available();
4369 heap_state.new_space_capacity = new_space_.Capacity();
4370 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4371 static_cast<size_t>(
4372 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4374 GCIdleTimeAction action =
4375 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4377 bool result = false;
4378 switch (action.type) {
4382 case DO_INCREMENTAL_MARKING:
4383 if (incremental_marking()->IsStopped()) {
4384 incremental_marking()->Start();
4386 AdvanceIdleIncrementalMarking(action.parameter);
4389 HistogramTimerScope scope(isolate_->counters()->gc_context());
4390 if (contexts_disposed_) {
4391 CollectAllGarbage(kReduceMemoryFootprintMask,
4392 "idle notification: contexts disposed");
4393 gc_idle_time_handler_.NotifyIdleMarkCompact();
4394 gc_count_at_last_idle_gc_ = gc_count_;
4396 IdleMarkCompact("idle notification: finalize idle round");
4401 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4403 case DO_FINALIZE_SWEEPING:
4404 mark_compact_collector()->EnsureSweepingCompleted();
4410 int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4411 if (actual_time_ms <= idle_time_in_ms) {
4412 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4413 idle_time_in_ms - actual_time_ms);
4415 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4416 actual_time_ms - idle_time_in_ms);
4419 if (FLAG_trace_idle_notification) {
4420 PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4421 idle_time_in_ms, actual_time_ms);
4426 contexts_disposed_ = 0;
4433 void Heap::Print() {
4434 if (!HasBeenSetUp()) return;
4435 isolate()->PrintStack(stdout);
4436 AllSpaces spaces(this);
4437 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4443 void Heap::ReportCodeStatistics(const char* title) {
4444 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4445 PagedSpace::ResetCodeStatistics(isolate());
4446 // We do not look for code in new space, map space, or old space. If code
4447 // somehow ends up in those spaces, we would miss it here.
4448 code_space_->CollectCodeStatistics();
4449 lo_space_->CollectCodeStatistics();
4450 PagedSpace::ReportCodeStatistics(isolate());
4454 // This function expects that NewSpace's allocated objects histogram is
4455 // populated (via a call to CollectStatistics or else as a side effect of a
4456 // just-completed scavenge collection).
4457 void Heap::ReportHeapStatistics(const char* title) {
4459 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4461 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4462 old_generation_allocation_limit_);
4465 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4466 isolate_->global_handles()->PrintStats();
4469 PrintF("Heap statistics : ");
4470 isolate_->memory_allocator()->ReportStatistics();
4471 PrintF("To space : ");
4472 new_space_.ReportStatistics();
4473 PrintF("Old pointer space : ");
4474 old_pointer_space_->ReportStatistics();
4475 PrintF("Old data space : ");
4476 old_data_space_->ReportStatistics();
4477 PrintF("Code space : ");
4478 code_space_->ReportStatistics();
4479 PrintF("Map space : ");
4480 map_space_->ReportStatistics();
4481 PrintF("Cell space : ");
4482 cell_space_->ReportStatistics();
4483 PrintF("PropertyCell space : ");
4484 property_cell_space_->ReportStatistics();
4485 PrintF("Large object space : ");
4486 lo_space_->ReportStatistics();
4487 PrintF(">>>>>> ========================================= >>>>>>\n");
4492 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4495 bool Heap::Contains(Address addr) {
4496 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4497 return HasBeenSetUp() &&
4498 (new_space_.ToSpaceContains(addr) ||
4499 old_pointer_space_->Contains(addr) ||
4500 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4501 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4502 property_cell_space_->Contains(addr) ||
4503 lo_space_->SlowContains(addr));
4507 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4508 return InSpace(value->address(), space);
4512 bool Heap::InSpace(Address addr, AllocationSpace space) {
4513 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4514 if (!HasBeenSetUp()) return false;
4518 return new_space_.ToSpaceContains(addr);
4519 case OLD_POINTER_SPACE:
4520 return old_pointer_space_->Contains(addr);
4521 case OLD_DATA_SPACE:
4522 return old_data_space_->Contains(addr);
4524 return code_space_->Contains(addr);
4526 return map_space_->Contains(addr);
4528 return cell_space_->Contains(addr);
4529 case PROPERTY_CELL_SPACE:
4530 return property_cell_space_->Contains(addr);
4532 return lo_space_->SlowContains(addr);
4542 void Heap::Verify() {
4543 CHECK(HasBeenSetUp());
4544 HandleScope scope(isolate());
4546 store_buffer()->Verify();
4548 if (mark_compact_collector()->sweeping_in_progress()) {
4549 // We have to wait here for the sweeper threads to have an iterable heap.
4550 mark_compact_collector()->EnsureSweepingCompleted();
4553 VerifyPointersVisitor visitor;
4554 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4556 VerifySmisVisitor smis_visitor;
4557 IterateSmiRoots(&smis_visitor);
4559 new_space_.Verify();
4561 old_pointer_space_->Verify(&visitor);
4562 map_space_->Verify(&visitor);
4564 VerifyPointersVisitor no_dirty_regions_visitor;
4565 old_data_space_->Verify(&no_dirty_regions_visitor);
4566 code_space_->Verify(&no_dirty_regions_visitor);
4567 cell_space_->Verify(&no_dirty_regions_visitor);
4568 property_cell_space_->Verify(&no_dirty_regions_visitor);
4570 lo_space_->Verify();
4575 void Heap::ZapFromSpace() {
4576 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4577 new_space_.FromSpaceEnd());
4578 while (it.has_next()) {
4579 NewSpacePage* page = it.next();
4580 for (Address cursor = page->area_start(), limit = page->area_end();
4581 cursor < limit; cursor += kPointerSize) {
4582 Memory::Address_at(cursor) = kFromSpaceZapValue;
4588 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4589 ObjectSlotCallback callback) {
4590 Address slot_address = start;
4592 // We are not collecting slots on new space objects during mutation
4593 // thus we have to scan for pointers to evacuation candidates when we
4594 // promote objects. But we should not record any slots in non-black
4595 // objects. Grey object's slots would be rescanned.
4596 // White object might not survive until the end of collection
4597 // it would be a violation of the invariant to record it's slots.
4598 bool record_slots = false;
4599 if (incremental_marking()->IsCompacting()) {
4600 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4601 record_slots = Marking::IsBlack(mark_bit);
4604 while (slot_address < end) {
4605 Object** slot = reinterpret_cast<Object**>(slot_address);
4606 Object* object = *slot;
4607 // If the store buffer becomes overfull we mark pages as being exempt from
4608 // the store buffer. These pages are scanned to find pointers that point
4609 // to the new space. In that case we may hit newly promoted objects and
4610 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4611 if (object->IsHeapObject()) {
4612 if (Heap::InFromSpace(object)) {
4613 callback(reinterpret_cast<HeapObject**>(slot),
4614 HeapObject::cast(object));
4615 Object* new_object = *slot;
4616 if (InNewSpace(new_object)) {
4617 SLOW_DCHECK(Heap::InToSpace(new_object));
4618 SLOW_DCHECK(new_object->IsHeapObject());
4619 store_buffer_.EnterDirectlyIntoStoreBuffer(
4620 reinterpret_cast<Address>(slot));
4622 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4623 } else if (record_slots &&
4624 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4625 mark_compact_collector()->RecordSlot(slot, slot, object);
4628 slot_address += kPointerSize;
4634 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4637 bool IsAMapPointerAddress(Object** addr) {
4638 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4639 int mod = a % Map::kSize;
4640 return mod >= Map::kPointerFieldsBeginOffset &&
4641 mod < Map::kPointerFieldsEndOffset;
4645 bool EverythingsAPointer(Object** addr) { return true; }
4648 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4649 Object**** store_buffer_position,
4650 Object*** store_buffer_top,
4651 CheckStoreBufferFilter filter,
4652 Address special_garbage_start,
4653 Address special_garbage_end) {
4654 Map* free_space_map = heap->free_space_map();
4655 for (; current < limit; current++) {
4656 Object* o = *current;
4657 Address current_address = reinterpret_cast<Address>(current);
4659 if (o == free_space_map) {
4660 Address current_address = reinterpret_cast<Address>(current);
4661 FreeSpace* free_space =
4662 FreeSpace::cast(HeapObject::FromAddress(current_address));
4663 int skip = free_space->Size();
4664 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4666 current_address += skip - kPointerSize;
4667 current = reinterpret_cast<Object**>(current_address);
4670 // Skip the current linear allocation space between top and limit which is
4671 // unmarked with the free space map, but can contain junk.
4672 if (current_address == special_garbage_start &&
4673 special_garbage_end != special_garbage_start) {
4674 current_address = special_garbage_end - kPointerSize;
4675 current = reinterpret_cast<Object**>(current_address);
4678 if (!(*filter)(current)) continue;
4679 DCHECK(current_address < special_garbage_start ||
4680 current_address >= special_garbage_end);
4681 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4682 // We have to check that the pointer does not point into new space
4683 // without trying to cast it to a heap object since the hash field of
4684 // a string can contain values like 1 and 3 which are tagged null
4686 if (!heap->InNewSpace(o)) continue;
4687 while (**store_buffer_position < current &&
4688 *store_buffer_position < store_buffer_top) {
4689 (*store_buffer_position)++;
4691 if (**store_buffer_position != current ||
4692 *store_buffer_position == store_buffer_top) {
4693 Object** obj_start = current;
4694 while (!(*obj_start)->IsMap()) obj_start--;
4701 // Check that the store buffer contains all intergenerational pointers by
4702 // scanning a page and ensuring that all pointers to young space are in the
4704 void Heap::OldPointerSpaceCheckStoreBuffer() {
4705 OldSpace* space = old_pointer_space();
4706 PageIterator pages(space);
4708 store_buffer()->SortUniq();
4710 while (pages.has_next()) {
4711 Page* page = pages.next();
4712 Object** current = reinterpret_cast<Object**>(page->area_start());
4714 Address end = page->area_end();
4716 Object*** store_buffer_position = store_buffer()->Start();
4717 Object*** store_buffer_top = store_buffer()->Top();
4719 Object** limit = reinterpret_cast<Object**>(end);
4720 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4721 store_buffer_top, &EverythingsAPointer, space->top(),
4727 void Heap::MapSpaceCheckStoreBuffer() {
4728 MapSpace* space = map_space();
4729 PageIterator pages(space);
4731 store_buffer()->SortUniq();
4733 while (pages.has_next()) {
4734 Page* page = pages.next();
4735 Object** current = reinterpret_cast<Object**>(page->area_start());
4737 Address end = page->area_end();
4739 Object*** store_buffer_position = store_buffer()->Start();
4740 Object*** store_buffer_top = store_buffer()->Top();
4742 Object** limit = reinterpret_cast<Object**>(end);
4743 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4744 store_buffer_top, &IsAMapPointerAddress, space->top(),
4750 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4751 LargeObjectIterator it(lo_space());
4752 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4753 // We only have code, sequential strings, or fixed arrays in large
4754 // object space, and only fixed arrays can possibly contain pointers to
4755 // the young generation.
4756 if (object->IsFixedArray()) {
4757 Object*** store_buffer_position = store_buffer()->Start();
4758 Object*** store_buffer_top = store_buffer()->Top();
4759 Object** current = reinterpret_cast<Object**>(object->address());
4761 reinterpret_cast<Object**>(object->address() + object->Size());
4762 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4763 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4770 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4771 IterateStrongRoots(v, mode);
4772 IterateWeakRoots(v, mode);
4776 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4777 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4778 v->Synchronize(VisitorSynchronization::kStringTable);
4779 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4780 // Scavenge collections have special processing for this.
4781 external_string_table_.Iterate(v);
4783 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4787 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4788 // Acquire execution access since we are going to read stack limit values.
4789 ExecutionAccess access(isolate());
4790 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4791 v->Synchronize(VisitorSynchronization::kSmiRootList);
4795 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4796 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4797 v->Synchronize(VisitorSynchronization::kStrongRootList);
4799 v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4800 v->Synchronize(VisitorSynchronization::kInternalizedString);
4802 isolate_->bootstrapper()->Iterate(v);
4803 v->Synchronize(VisitorSynchronization::kBootstrapper);
4804 isolate_->Iterate(v);
4805 v->Synchronize(VisitorSynchronization::kTop);
4806 Relocatable::Iterate(isolate_, v);
4807 v->Synchronize(VisitorSynchronization::kRelocatable);
4809 if (isolate_->deoptimizer_data() != NULL) {
4810 isolate_->deoptimizer_data()->Iterate(v);
4812 v->Synchronize(VisitorSynchronization::kDebug);
4813 isolate_->compilation_cache()->Iterate(v);
4814 v->Synchronize(VisitorSynchronization::kCompilationCache);
4816 // Iterate over local handles in handle scopes.
4817 isolate_->handle_scope_implementer()->Iterate(v);
4818 isolate_->IterateDeferredHandles(v);
4819 v->Synchronize(VisitorSynchronization::kHandleScope);
4821 // Iterate over the builtin code objects and code stubs in the
4822 // heap. Note that it is not necessary to iterate over code objects
4823 // on scavenge collections.
4824 if (mode != VISIT_ALL_IN_SCAVENGE) {
4825 isolate_->builtins()->IterateBuiltins(v);
4827 v->Synchronize(VisitorSynchronization::kBuiltins);
4829 // Iterate over global handles.
4831 case VISIT_ONLY_STRONG:
4832 isolate_->global_handles()->IterateStrongRoots(v);
4834 case VISIT_ALL_IN_SCAVENGE:
4835 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4837 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4839 isolate_->global_handles()->IterateAllRoots(v);
4842 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4844 // Iterate over eternal handles.
4845 if (mode == VISIT_ALL_IN_SCAVENGE) {
4846 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4848 isolate_->eternal_handles()->IterateAllRoots(v);
4850 v->Synchronize(VisitorSynchronization::kEternalHandles);
4852 // Iterate over pointers being held by inactive threads.
4853 isolate_->thread_manager()->Iterate(v);
4854 v->Synchronize(VisitorSynchronization::kThreadManager);
4856 // Iterate over the pointers the Serialization/Deserialization code is
4858 // During garbage collection this keeps the partial snapshot cache alive.
4859 // During deserialization of the startup snapshot this creates the partial
4860 // snapshot cache and deserializes the objects it refers to. During
4861 // serialization this does nothing, since the partial snapshot cache is
4862 // empty. However the next thing we do is create the partial snapshot,
4863 // filling up the partial snapshot cache with objects it needs as we go.
4864 SerializerDeserializer::Iterate(isolate_, v);
4865 // We don't do a v->Synchronize call here, because in debug mode that will
4866 // output a flag to the snapshot. However at this point the serializer and
4867 // deserializer are deliberately a little unsynchronized (see above) so the
4868 // checking of the sync flag in the snapshot would fail.
4872 // TODO(1236194): Since the heap size is configurable on the command line
4873 // and through the API, we should gracefully handle the case that the heap
4874 // size is not big enough to fit all the initial objects.
4875 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4876 int max_executable_size, size_t code_range_size) {
4877 if (HasBeenSetUp()) return false;
4879 // Overwrite default configuration.
4880 if (max_semi_space_size > 0) {
4881 max_semi_space_size_ = max_semi_space_size * MB;
4883 if (max_old_space_size > 0) {
4884 max_old_generation_size_ = max_old_space_size * MB;
4886 if (max_executable_size > 0) {
4887 max_executable_size_ = max_executable_size * MB;
4890 // If max space size flags are specified overwrite the configuration.
4891 if (FLAG_max_semi_space_size > 0) {
4892 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4894 if (FLAG_max_old_space_size > 0) {
4895 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4897 if (FLAG_max_executable_size > 0) {
4898 max_executable_size_ = FLAG_max_executable_size * MB;
4901 if (FLAG_stress_compaction) {
4902 // This will cause more frequent GCs when stressing.
4903 max_semi_space_size_ = Page::kPageSize;
4906 if (Snapshot::HaveASnapshotToStartFrom()) {
4907 // If we are using a snapshot we always reserve the default amount
4908 // of memory for each semispace because code in the snapshot has
4909 // write-barrier code that relies on the size and alignment of new
4910 // space. We therefore cannot use a larger max semispace size
4911 // than the default reserved semispace size.
4912 if (max_semi_space_size_ > reserved_semispace_size_) {
4913 max_semi_space_size_ = reserved_semispace_size_;
4914 if (FLAG_trace_gc) {
4915 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4916 reserved_semispace_size_ >> 10);
4920 // If we are not using snapshots we reserve space for the actual
4921 // max semispace size.
4922 reserved_semispace_size_ = max_semi_space_size_;
4925 // The max executable size must be less than or equal to the max old
4927 if (max_executable_size_ > max_old_generation_size_) {
4928 max_executable_size_ = max_old_generation_size_;
4931 // The new space size must be a power of two to support single-bit testing
4933 max_semi_space_size_ =
4934 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4935 reserved_semispace_size_ =
4936 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4938 if (FLAG_min_semi_space_size > 0) {
4939 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4940 if (initial_semispace_size > max_semi_space_size_) {
4941 initial_semispace_size_ = max_semi_space_size_;
4942 if (FLAG_trace_gc) {
4944 "Min semi-space size cannot be more than the maximum"
4945 "semi-space size of %d MB\n",
4946 max_semi_space_size_);
4949 initial_semispace_size_ = initial_semispace_size;
4953 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4955 // The old generation is paged and needs at least one page for each space.
4956 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4957 max_old_generation_size_ =
4958 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4959 max_old_generation_size_);
4961 // We rely on being able to allocate new arrays in paged spaces.
4962 DCHECK(Page::kMaxRegularHeapObjectSize >=
4964 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4965 AllocationMemento::kSize));
4967 code_range_size_ = code_range_size * MB;
4974 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4977 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4978 *stats->start_marker = HeapStats::kStartMarker;
4979 *stats->end_marker = HeapStats::kEndMarker;
4980 *stats->new_space_size = new_space_.SizeAsInt();
4981 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4982 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4983 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4984 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4985 *stats->old_data_space_capacity = old_data_space_->Capacity();
4986 *stats->code_space_size = code_space_->SizeOfObjects();
4987 *stats->code_space_capacity = code_space_->Capacity();
4988 *stats->map_space_size = map_space_->SizeOfObjects();
4989 *stats->map_space_capacity = map_space_->Capacity();
4990 *stats->cell_space_size = cell_space_->SizeOfObjects();
4991 *stats->cell_space_capacity = cell_space_->Capacity();
4992 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4993 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4994 *stats->lo_space_size = lo_space_->Size();
4995 isolate_->global_handles()->RecordStats(stats);
4996 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4997 *stats->memory_allocator_capacity =
4998 isolate()->memory_allocator()->Size() +
4999 isolate()->memory_allocator()->Available();
5000 *stats->os_error = base::OS::GetLastError();
5001 isolate()->memory_allocator()->Available();
5002 if (take_snapshot) {
5003 HeapIterator iterator(this);
5004 for (HeapObject* obj = iterator.next(); obj != NULL;
5005 obj = iterator.next()) {
5006 InstanceType type = obj->map()->instance_type();
5007 DCHECK(0 <= type && type <= LAST_TYPE);
5008 stats->objects_per_type[type]++;
5009 stats->size_per_type[type] += obj->Size();
5015 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5016 return old_pointer_space_->SizeOfObjects() +
5017 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5018 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5019 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5023 int64_t Heap::PromotedExternalMemorySize() {
5024 if (amount_of_external_allocated_memory_ <=
5025 amount_of_external_allocated_memory_at_last_global_gc_)
5027 return amount_of_external_allocated_memory_ -
5028 amount_of_external_allocated_memory_at_last_global_gc_;
5032 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5033 int freed_global_handles) {
5034 const int kMaxHandles = 1000;
5035 const int kMinHandles = 100;
5036 double min_factor = 1.1;
5037 double max_factor = 4;
5038 // We set the old generation growing factor to 2 to grow the heap slower on
5039 // memory-constrained devices.
5040 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5043 // If there are many freed global handles, then the next full GC will
5044 // likely collect a lot of garbage. Choose the heap growing factor
5045 // depending on freed global handles.
5046 // TODO(ulan, hpayer): Take into account mutator utilization.
5048 if (freed_global_handles <= kMinHandles) {
5049 factor = max_factor;
5050 } else if (freed_global_handles >= kMaxHandles) {
5051 factor = min_factor;
5053 // Compute factor using linear interpolation between points
5054 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5055 factor = max_factor -
5056 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5057 (kMaxHandles - kMinHandles);
5060 if (FLAG_stress_compaction ||
5061 mark_compact_collector()->reduce_memory_footprint_) {
5062 factor = min_factor;
5065 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5066 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5067 limit += new_space_.Capacity();
5068 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5069 return Min(limit, halfway_to_the_max);
5073 void Heap::EnableInlineAllocation() {
5074 if (!inline_allocation_disabled_) return;
5075 inline_allocation_disabled_ = false;
5077 // Update inline allocation limit for new space.
5078 new_space()->UpdateInlineAllocationLimit(0);
5082 void Heap::DisableInlineAllocation() {
5083 if (inline_allocation_disabled_) return;
5084 inline_allocation_disabled_ = true;
5086 // Update inline allocation limit for new space.
5087 new_space()->UpdateInlineAllocationLimit(0);
5089 // Update inline allocation limit for old spaces.
5090 PagedSpaces spaces(this);
5091 for (PagedSpace* space = spaces.next(); space != NULL;
5092 space = spaces.next()) {
5093 space->EmptyAllocationInfo();
5098 V8_DECLARE_ONCE(initialize_gc_once);
5100 static void InitializeGCOnce() {
5101 InitializeScavengingVisitorsTables();
5102 NewSpaceScavenger::Initialize();
5103 MarkCompactCollector::Initialize();
5107 bool Heap::SetUp() {
5109 allocation_timeout_ = FLAG_gc_interval;
5112 // Initialize heap spaces and initial maps and objects. Whenever something
5113 // goes wrong, just return false. The caller should check the results and
5114 // call Heap::TearDown() to release allocated memory.
5116 // If the heap is not yet configured (e.g. through the API), configure it.
5117 // Configuration is based on the flags new-space-size (really the semispace
5118 // size) and old-space-size if set or the initial values of semispace_size_
5119 // and old_generation_size_ otherwise.
5121 if (!ConfigureHeapDefault()) return false;
5124 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5126 MarkMapPointersAsEncoded(false);
5128 // Set up memory allocator.
5129 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5132 // Set up new space.
5133 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5136 new_space_top_after_last_gc_ = new_space()->top();
5138 // Initialize old pointer space.
5139 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5140 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5141 if (old_pointer_space_ == NULL) return false;
5142 if (!old_pointer_space_->SetUp()) return false;
5144 // Initialize old data space.
5145 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5147 if (old_data_space_ == NULL) return false;
5148 if (!old_data_space_->SetUp()) return false;
5150 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5152 // Initialize the code space, set its maximum capacity to the old
5153 // generation size. It needs executable memory.
5155 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5156 if (code_space_ == NULL) return false;
5157 if (!code_space_->SetUp()) return false;
5159 // Initialize map space.
5160 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5161 if (map_space_ == NULL) return false;
5162 if (!map_space_->SetUp()) return false;
5164 // Initialize simple cell space.
5165 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5166 if (cell_space_ == NULL) return false;
5167 if (!cell_space_->SetUp()) return false;
5169 // Initialize global property cell space.
5170 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5171 PROPERTY_CELL_SPACE);
5172 if (property_cell_space_ == NULL) return false;
5173 if (!property_cell_space_->SetUp()) return false;
5175 // The large object code space may contain code or data. We set the memory
5176 // to be non-executable here for safety, but this means we need to enable it
5177 // explicitly when allocating large code objects.
5178 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5179 if (lo_space_ == NULL) return false;
5180 if (!lo_space_->SetUp()) return false;
5182 // Set up the seed that is used to randomize the string hash function.
5183 DCHECK(hash_seed() == 0);
5184 if (FLAG_randomize_hashes) {
5185 if (FLAG_hash_seed == 0) {
5186 int rnd = isolate()->random_number_generator()->NextInt();
5187 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5189 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5193 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5194 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5196 store_buffer()->SetUp();
5198 mark_compact_collector()->SetUp();
5204 bool Heap::CreateHeapObjects() {
5205 // Create initial maps.
5206 if (!CreateInitialMaps()) return false;
5209 // Create initial objects
5210 CreateInitialObjects();
5211 CHECK_EQ(0, gc_count_);
5213 set_native_contexts_list(undefined_value());
5214 set_array_buffers_list(undefined_value());
5215 set_allocation_sites_list(undefined_value());
5216 weak_object_to_code_table_ = undefined_value();
5221 void Heap::SetStackLimits() {
5222 DCHECK(isolate_ != NULL);
5223 DCHECK(isolate_ == isolate());
5224 // On 64 bit machines, pointers are generally out of range of Smis. We write
5225 // something that looks like an out of range Smi to the GC.
5227 // Set up the special root array entries containing the stack limits.
5228 // These are actually addresses, but the tag makes the GC ignore it.
5229 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5230 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5231 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5232 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5236 void Heap::TearDown() {
5238 if (FLAG_verify_heap) {
5243 UpdateMaximumCommitted();
5245 if (FLAG_print_cumulative_gc_stat) {
5247 PrintF("gc_count=%d ", gc_count_);
5248 PrintF("mark_sweep_count=%d ", ms_count_);
5249 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5250 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5251 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5252 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5253 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5254 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5258 if (FLAG_print_max_heap_committed) {
5260 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5261 MaximumCommittedMemory());
5262 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5263 new_space_.MaximumCommittedMemory());
5264 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5265 old_data_space_->MaximumCommittedMemory());
5266 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5267 old_pointer_space_->MaximumCommittedMemory());
5268 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5269 old_pointer_space_->MaximumCommittedMemory());
5270 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5271 code_space_->MaximumCommittedMemory());
5272 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5273 map_space_->MaximumCommittedMemory());
5274 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5275 cell_space_->MaximumCommittedMemory());
5276 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5277 property_cell_space_->MaximumCommittedMemory());
5278 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5279 lo_space_->MaximumCommittedMemory());
5283 if (FLAG_verify_predictable) {
5284 PrintAlloctionsHash();
5287 TearDownArrayBuffers();
5289 isolate_->global_handles()->TearDown();
5291 external_string_table_.TearDown();
5293 mark_compact_collector()->TearDown();
5295 new_space_.TearDown();
5297 if (old_pointer_space_ != NULL) {
5298 old_pointer_space_->TearDown();
5299 delete old_pointer_space_;
5300 old_pointer_space_ = NULL;
5303 if (old_data_space_ != NULL) {
5304 old_data_space_->TearDown();
5305 delete old_data_space_;
5306 old_data_space_ = NULL;
5309 if (code_space_ != NULL) {
5310 code_space_->TearDown();
5315 if (map_space_ != NULL) {
5316 map_space_->TearDown();
5321 if (cell_space_ != NULL) {
5322 cell_space_->TearDown();
5327 if (property_cell_space_ != NULL) {
5328 property_cell_space_->TearDown();
5329 delete property_cell_space_;
5330 property_cell_space_ = NULL;
5333 if (lo_space_ != NULL) {
5334 lo_space_->TearDown();
5339 store_buffer()->TearDown();
5340 incremental_marking()->TearDown();
5342 isolate_->memory_allocator()->TearDown();
5346 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5347 GCType gc_type, bool pass_isolate) {
5348 DCHECK(callback != NULL);
5349 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5350 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5351 return gc_prologue_callbacks_.Add(pair);
5355 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5356 DCHECK(callback != NULL);
5357 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5358 if (gc_prologue_callbacks_[i].callback == callback) {
5359 gc_prologue_callbacks_.Remove(i);
5367 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5368 GCType gc_type, bool pass_isolate) {
5369 DCHECK(callback != NULL);
5370 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5371 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5372 return gc_epilogue_callbacks_.Add(pair);
5376 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5377 DCHECK(callback != NULL);
5378 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5379 if (gc_epilogue_callbacks_[i].callback == callback) {
5380 gc_epilogue_callbacks_.Remove(i);
5388 // TODO(ishell): Find a better place for this.
5389 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5390 Handle<DependentCode> dep) {
5391 DCHECK(!InNewSpace(*obj));
5392 DCHECK(!InNewSpace(*dep));
5393 // This handle scope keeps the table handle local to this function, which
5394 // allows us to safely skip write barriers in table update operations.
5395 HandleScope scope(isolate());
5396 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5398 table = WeakHashTable::Put(table, obj, dep);
5400 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5401 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5403 set_weak_object_to_code_table(*table);
5404 DCHECK_EQ(*dep, table->Lookup(obj));
5408 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5409 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5410 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5411 return DependentCode::cast(empty_fixed_array());
5415 void Heap::EnsureWeakObjectToCodeTable() {
5416 if (!weak_object_to_code_table()->IsHashTable()) {
5417 set_weak_object_to_code_table(
5418 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5424 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5425 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5430 class PrintHandleVisitor : public ObjectVisitor {
5432 void VisitPointers(Object** start, Object** end) {
5433 for (Object** p = start; p < end; p++)
5434 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5435 reinterpret_cast<void*>(*p));
5440 void Heap::PrintHandles() {
5441 PrintF("Handles:\n");
5442 PrintHandleVisitor v;
5443 isolate_->handle_scope_implementer()->Iterate(&v);
5449 Space* AllSpaces::next() {
5450 switch (counter_++) {
5452 return heap_->new_space();
5453 case OLD_POINTER_SPACE:
5454 return heap_->old_pointer_space();
5455 case OLD_DATA_SPACE:
5456 return heap_->old_data_space();
5458 return heap_->code_space();
5460 return heap_->map_space();
5462 return heap_->cell_space();
5463 case PROPERTY_CELL_SPACE:
5464 return heap_->property_cell_space();
5466 return heap_->lo_space();
5473 PagedSpace* PagedSpaces::next() {
5474 switch (counter_++) {
5475 case OLD_POINTER_SPACE:
5476 return heap_->old_pointer_space();
5477 case OLD_DATA_SPACE:
5478 return heap_->old_data_space();
5480 return heap_->code_space();
5482 return heap_->map_space();
5484 return heap_->cell_space();
5485 case PROPERTY_CELL_SPACE:
5486 return heap_->property_cell_space();
5493 OldSpace* OldSpaces::next() {
5494 switch (counter_++) {
5495 case OLD_POINTER_SPACE:
5496 return heap_->old_pointer_space();
5497 case OLD_DATA_SPACE:
5498 return heap_->old_data_space();
5500 return heap_->code_space();
5507 SpaceIterator::SpaceIterator(Heap* heap)
5509 current_space_(FIRST_SPACE),
5514 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5516 current_space_(FIRST_SPACE),
5518 size_func_(size_func) {}
5521 SpaceIterator::~SpaceIterator() {
5522 // Delete active iterator if any.
5527 bool SpaceIterator::has_next() {
5528 // Iterate until no more spaces.
5529 return current_space_ != LAST_SPACE;
5533 ObjectIterator* SpaceIterator::next() {
5534 if (iterator_ != NULL) {
5537 // Move to the next space
5539 if (current_space_ > LAST_SPACE) {
5544 // Return iterator for the new current space.
5545 return CreateIterator();
5549 // Create an iterator for the space to iterate.
5550 ObjectIterator* SpaceIterator::CreateIterator() {
5551 DCHECK(iterator_ == NULL);
5553 switch (current_space_) {
5555 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5557 case OLD_POINTER_SPACE:
5559 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5561 case OLD_DATA_SPACE:
5562 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5565 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5568 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5571 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5573 case PROPERTY_CELL_SPACE:
5575 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5578 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5582 // Return the newly allocated iterator;
5583 DCHECK(iterator_ != NULL);
5588 class HeapObjectsFilter {
5590 virtual ~HeapObjectsFilter() {}
5591 virtual bool SkipObject(HeapObject* object) = 0;
5595 class UnreachableObjectsFilter : public HeapObjectsFilter {
5597 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5598 MarkReachableObjects();
5601 ~UnreachableObjectsFilter() {
5602 heap_->mark_compact_collector()->ClearMarkbits();
5605 bool SkipObject(HeapObject* object) {
5606 MarkBit mark_bit = Marking::MarkBitFrom(object);
5607 return !mark_bit.Get();
5611 class MarkingVisitor : public ObjectVisitor {
5613 MarkingVisitor() : marking_stack_(10) {}
5615 void VisitPointers(Object** start, Object** end) {
5616 for (Object** p = start; p < end; p++) {
5617 if (!(*p)->IsHeapObject()) continue;
5618 HeapObject* obj = HeapObject::cast(*p);
5619 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5620 if (!mark_bit.Get()) {
5622 marking_stack_.Add(obj);
5627 void TransitiveClosure() {
5628 while (!marking_stack_.is_empty()) {
5629 HeapObject* obj = marking_stack_.RemoveLast();
5635 List<HeapObject*> marking_stack_;
5638 void MarkReachableObjects() {
5639 MarkingVisitor visitor;
5640 heap_->IterateRoots(&visitor, VISIT_ALL);
5641 visitor.TransitiveClosure();
5645 DisallowHeapAllocation no_allocation_;
5649 HeapIterator::HeapIterator(Heap* heap)
5650 : make_heap_iterable_helper_(heap),
5651 no_heap_allocation_(),
5653 filtering_(HeapIterator::kNoFiltering),
5659 HeapIterator::HeapIterator(Heap* heap,
5660 HeapIterator::HeapObjectsFiltering filtering)
5661 : make_heap_iterable_helper_(heap),
5662 no_heap_allocation_(),
5664 filtering_(filtering),
5670 HeapIterator::~HeapIterator() { Shutdown(); }
5673 void HeapIterator::Init() {
5674 // Start the iteration.
5675 space_iterator_ = new SpaceIterator(heap_);
5676 switch (filtering_) {
5677 case kFilterUnreachable:
5678 filter_ = new UnreachableObjectsFilter(heap_);
5683 object_iterator_ = space_iterator_->next();
5687 void HeapIterator::Shutdown() {
5689 // Assert that in filtering mode we have iterated through all
5690 // objects. Otherwise, heap will be left in an inconsistent state.
5691 if (filtering_ != kNoFiltering) {
5692 DCHECK(object_iterator_ == NULL);
5695 // Make sure the last iterator is deallocated.
5696 delete space_iterator_;
5697 space_iterator_ = NULL;
5698 object_iterator_ = NULL;
5704 HeapObject* HeapIterator::next() {
5705 if (filter_ == NULL) return NextObject();
5707 HeapObject* obj = NextObject();
5708 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5713 HeapObject* HeapIterator::NextObject() {
5714 // No iterator means we are done.
5715 if (object_iterator_ == NULL) return NULL;
5717 if (HeapObject* obj = object_iterator_->next_object()) {
5718 // If the current iterator has more objects we are fine.
5721 // Go though the spaces looking for one that has objects.
5722 while (space_iterator_->has_next()) {
5723 object_iterator_ = space_iterator_->next();
5724 if (HeapObject* obj = object_iterator_->next_object()) {
5729 // Done with the last space.
5730 object_iterator_ = NULL;
5735 void HeapIterator::reset() {
5736 // Restart the iterator.
5744 Object* const PathTracer::kAnyGlobalObject = NULL;
5746 class PathTracer::MarkVisitor : public ObjectVisitor {
5748 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5749 void VisitPointers(Object** start, Object** end) {
5750 // Scan all HeapObject pointers in [start, end)
5751 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5752 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5757 PathTracer* tracer_;
5761 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5763 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5764 void VisitPointers(Object** start, Object** end) {
5765 // Scan all HeapObject pointers in [start, end)
5766 for (Object** p = start; p < end; p++) {
5767 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5772 PathTracer* tracer_;
5776 void PathTracer::VisitPointers(Object** start, Object** end) {
5777 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5778 // Visit all HeapObject pointers in [start, end)
5779 for (Object** p = start; !done && (p < end); p++) {
5780 if ((*p)->IsHeapObject()) {
5782 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5788 void PathTracer::Reset() {
5789 found_target_ = false;
5790 object_stack_.Clear();
5794 void PathTracer::TracePathFrom(Object** root) {
5795 DCHECK((search_target_ == kAnyGlobalObject) ||
5796 search_target_->IsHeapObject());
5797 found_target_in_trace_ = false;
5800 MarkVisitor mark_visitor(this);
5801 MarkRecursively(root, &mark_visitor);
5803 UnmarkVisitor unmark_visitor(this);
5804 UnmarkRecursively(root, &unmark_visitor);
5810 static bool SafeIsNativeContext(HeapObject* obj) {
5811 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5815 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5816 if (!(*p)->IsHeapObject()) return;
5818 HeapObject* obj = HeapObject::cast(*p);
5820 MapWord map_word = obj->map_word();
5821 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5823 if (found_target_in_trace_) return; // stop if target found
5824 object_stack_.Add(obj);
5825 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5826 (obj == search_target_)) {
5827 found_target_in_trace_ = true;
5828 found_target_ = true;
5832 bool is_native_context = SafeIsNativeContext(obj);
5835 Map* map = Map::cast(map_word.ToMap());
5837 MapWord marked_map_word =
5838 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5839 obj->set_map_word(marked_map_word);
5841 // Scan the object body.
5842 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5843 // This is specialized to scan Context's properly.
5845 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5847 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5848 Context::FIRST_WEAK_SLOT * kPointerSize);
5849 mark_visitor->VisitPointers(start, end);
5851 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5854 // Scan the map after the body because the body is a lot more interesting
5855 // when doing leak detection.
5856 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5858 if (!found_target_in_trace_) { // don't pop if found the target
5859 object_stack_.RemoveLast();
5864 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5865 if (!(*p)->IsHeapObject()) return;
5867 HeapObject* obj = HeapObject::cast(*p);
5869 MapWord map_word = obj->map_word();
5870 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5872 MapWord unmarked_map_word =
5873 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5874 obj->set_map_word(unmarked_map_word);
5876 Map* map = Map::cast(unmarked_map_word.ToMap());
5878 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5880 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5884 void PathTracer::ProcessResults() {
5885 if (found_target_) {
5886 OFStream os(stdout);
5887 os << "=====================================\n"
5888 << "==== Path to object ====\n"
5889 << "=====================================\n\n";
5891 DCHECK(!object_stack_.is_empty());
5892 for (int i = 0; i < object_stack_.length(); i++) {
5893 if (i > 0) os << "\n |\n |\n V\n\n";
5894 object_stack_[i]->Print(os);
5896 os << "=====================================\n";
5901 // Triggers a depth-first traversal of reachable objects from one
5902 // given root object and finds a path to a specific heap object and
5904 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5905 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5906 tracer.VisitPointer(&root);
5910 // Triggers a depth-first traversal of reachable objects from roots
5911 // and finds a path to a specific heap object and prints it.
5912 void Heap::TracePathToObject(Object* target) {
5913 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5914 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5918 // Triggers a depth-first traversal of reachable objects from roots
5919 // and finds a path to any global object and prints it. Useful for
5920 // determining the source for leaks of global objects.
5921 void Heap::TracePathToGlobal() {
5922 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5924 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5929 void Heap::UpdateCumulativeGCStatistics(double duration,
5930 double spent_in_mutator,
5931 double marking_time) {
5932 if (FLAG_print_cumulative_gc_stat) {
5933 total_gc_time_ms_ += duration;
5934 max_gc_pause_ = Max(max_gc_pause_, duration);
5935 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5936 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5937 } else if (FLAG_trace_gc_verbose) {
5938 total_gc_time_ms_ += duration;
5941 marking_time_ += marking_time;
5945 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5946 DisallowHeapAllocation no_gc;
5947 // Uses only lower 32 bits if pointers are larger.
5948 uintptr_t addr_hash =
5949 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5950 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5954 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5955 DisallowHeapAllocation no_gc;
5956 int index = (Hash(map, name) & kHashMask);
5957 for (int i = 0; i < kEntriesPerBucket; i++) {
5958 Key& key = keys_[index + i];
5959 if ((key.map == *map) && key.name->Equals(*name)) {
5960 return field_offsets_[index + i];
5967 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5969 DisallowHeapAllocation no_gc;
5970 if (!name->IsUniqueName()) {
5971 if (!StringTable::InternalizeStringIfExists(
5972 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5976 // This cache is cleared only between mark compact passes, so we expect the
5977 // cache to only contain old space names.
5978 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5980 int index = (Hash(map, name) & kHashMask);
5981 // After a GC there will be free slots, so we use them in order (this may
5982 // help to get the most frequently used one in position 0).
5983 for (int i = 0; i < kEntriesPerBucket; i++) {
5984 Key& key = keys_[index];
5985 Object* free_entry_indicator = NULL;
5986 if (key.map == free_entry_indicator) {
5989 field_offsets_[index + i] = field_offset;
5993 // No free entry found in this bucket, so we move them all down one and
5994 // put the new entry at position zero.
5995 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5996 Key& key = keys_[index + i];
5997 Key& key2 = keys_[index + i - 1];
5999 field_offsets_[index + i] = field_offsets_[index + i - 1];
6002 // Write the new first entry.
6003 Key& key = keys_[index];
6006 field_offsets_[index] = field_offset;
6010 void KeyedLookupCache::Clear() {
6011 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6015 void DescriptorLookupCache::Clear() {
6016 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6020 void ExternalStringTable::CleanUp() {
6022 for (int i = 0; i < new_space_strings_.length(); ++i) {
6023 if (new_space_strings_[i] == heap_->the_hole_value()) {
6026 DCHECK(new_space_strings_[i]->IsExternalString());
6027 if (heap_->InNewSpace(new_space_strings_[i])) {
6028 new_space_strings_[last++] = new_space_strings_[i];
6030 old_space_strings_.Add(new_space_strings_[i]);
6033 new_space_strings_.Rewind(last);
6034 new_space_strings_.Trim();
6037 for (int i = 0; i < old_space_strings_.length(); ++i) {
6038 if (old_space_strings_[i] == heap_->the_hole_value()) {
6041 DCHECK(old_space_strings_[i]->IsExternalString());
6042 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6043 old_space_strings_[last++] = old_space_strings_[i];
6045 old_space_strings_.Rewind(last);
6046 old_space_strings_.Trim();
6048 if (FLAG_verify_heap) {
6055 void ExternalStringTable::TearDown() {
6056 for (int i = 0; i < new_space_strings_.length(); ++i) {
6057 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6059 new_space_strings_.Free();
6060 for (int i = 0; i < old_space_strings_.length(); ++i) {
6061 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6063 old_space_strings_.Free();
6067 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6068 chunk->set_next_chunk(chunks_queued_for_free_);
6069 chunks_queued_for_free_ = chunk;
6073 void Heap::FreeQueuedChunks() {
6074 if (chunks_queued_for_free_ == NULL) return;
6077 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6078 next = chunk->next_chunk();
6079 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6081 if (chunk->owner()->identity() == LO_SPACE) {
6082 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6083 // If FromAnyPointerAddress encounters a slot that belongs to a large
6084 // chunk queued for deletion it will fail to find the chunk because
6085 // it try to perform a search in the list of pages owned by of the large
6086 // object space and queued chunks were detached from that list.
6087 // To work around this we split large chunk into normal kPageSize aligned
6088 // pieces and initialize size, owner and flags field of every piece.
6089 // If FromAnyPointerAddress encounters a slot that belongs to one of
6090 // these smaller pieces it will treat it as a slot on a normal Page.
6091 Address chunk_end = chunk->address() + chunk->size();
6092 MemoryChunk* inner =
6093 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6094 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6095 while (inner <= inner_last) {
6096 // Size of a large chunk is always a multiple of
6097 // OS::AllocateAlignment() so there is always
6098 // enough space for a fake MemoryChunk header.
6099 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6100 // Guard against overflow.
6101 if (area_end < inner->address()) area_end = chunk_end;
6102 inner->SetArea(inner->address(), area_end);
6103 inner->set_size(Page::kPageSize);
6104 inner->set_owner(lo_space());
6105 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6106 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6110 isolate_->heap()->store_buffer()->Compact();
6111 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6112 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6113 next = chunk->next_chunk();
6114 isolate_->memory_allocator()->Free(chunk);
6116 chunks_queued_for_free_ = NULL;
6120 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6121 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6122 // Tag the page pointer to make it findable in the dump file.
6124 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6126 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6128 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6129 reinterpret_cast<Address>(p);
6130 remembered_unmapped_pages_index_++;
6131 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6135 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6136 memset(object_counts_, 0, sizeof(object_counts_));
6137 memset(object_sizes_, 0, sizeof(object_sizes_));
6138 if (clear_last_time_stats) {
6139 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6140 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6145 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6148 void Heap::CheckpointObjectStats() {
6149 base::LockGuard<base::Mutex> lock_guard(
6150 checkpoint_object_stats_mutex.Pointer());
6151 Counters* counters = isolate()->counters();
6152 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6153 counters->count_of_##name()->Increment( \
6154 static_cast<int>(object_counts_[name])); \
6155 counters->count_of_##name()->Decrement( \
6156 static_cast<int>(object_counts_last_time_[name])); \
6157 counters->size_of_##name()->Increment( \
6158 static_cast<int>(object_sizes_[name])); \
6159 counters->size_of_##name()->Decrement( \
6160 static_cast<int>(object_sizes_last_time_[name]));
6161 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6162 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6164 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6165 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6166 counters->count_of_CODE_TYPE_##name()->Increment( \
6167 static_cast<int>(object_counts_[index])); \
6168 counters->count_of_CODE_TYPE_##name()->Decrement( \
6169 static_cast<int>(object_counts_last_time_[index])); \
6170 counters->size_of_CODE_TYPE_##name()->Increment( \
6171 static_cast<int>(object_sizes_[index])); \
6172 counters->size_of_CODE_TYPE_##name()->Decrement( \
6173 static_cast<int>(object_sizes_last_time_[index]));
6174 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6175 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6176 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6177 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6178 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6179 static_cast<int>(object_counts_[index])); \
6180 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6181 static_cast<int>(object_counts_last_time_[index])); \
6182 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6183 static_cast<int>(object_sizes_[index])); \
6184 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6185 static_cast<int>(object_sizes_last_time_[index]));
6186 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6187 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6188 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6190 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6191 counters->count_of_CODE_AGE_##name()->Increment( \
6192 static_cast<int>(object_counts_[index])); \
6193 counters->count_of_CODE_AGE_##name()->Decrement( \
6194 static_cast<int>(object_counts_last_time_[index])); \
6195 counters->size_of_CODE_AGE_##name()->Increment( \
6196 static_cast<int>(object_sizes_[index])); \
6197 counters->size_of_CODE_AGE_##name()->Decrement( \
6198 static_cast<int>(object_sizes_last_time_[index]));
6199 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6200 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6202 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6203 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6207 } // namespace v8::internal