1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h" // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h" // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
45 #include "src/regexp-macro-assembler.h"
46 #include "src/mips64/regexp-macro-assembler-mips64.h"
54 : amount_of_external_allocated_memory_(0),
55 amount_of_external_allocated_memory_at_last_global_gc_(0),
58 // semispace_size_ should be a power of 2 and old_generation_size_ should
59 // be a multiple of Page::kPageSize.
60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62 initial_semispace_size_(Page::kPageSize),
63 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64 max_executable_size_(256ul * (kPointerSize / 4) * MB),
65 // Variables set based on semispace_size_ and old_generation_size_ in
67 // Will be 4 * reserved_semispace_size_ to ensure that young
68 // generation can be aligned to its size.
69 maximum_committed_(0),
70 survived_since_last_expansion_(0),
72 always_allocate_scope_depth_(0),
73 contexts_disposed_(0),
75 flush_monomorphic_ics_(false),
76 scan_on_scavenge_pages_(0),
78 old_pointer_space_(NULL),
79 old_data_space_(NULL),
83 property_cell_space_(NULL),
86 gc_post_processing_depth_(0),
87 allocations_count_(0),
88 raw_allocations_hash_(0),
89 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
92 remembered_unmapped_pages_index_(0),
93 unflattened_strings_length_(0),
95 allocation_timeout_(0),
97 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98 old_gen_exhausted_(false),
99 inline_allocation_disabled_(false),
100 store_buffer_rebuilder_(store_buffer()),
101 hidden_string_(NULL),
102 gc_safe_size_of_old_object_(NULL),
103 total_regexp_code_generated_(0),
105 high_survival_rate_period_length_(0),
106 promoted_objects_size_(0),
108 semi_space_copied_object_size_(0),
109 semi_space_copied_rate_(0),
110 nodes_died_in_new_space_(0),
111 nodes_copied_in_new_space_(0),
113 maximum_size_scavenges_(0),
115 total_gc_time_ms_(0.0),
116 max_alive_after_gc_(0),
117 min_in_mutator_(kMaxInt),
120 mark_compact_collector_(this),
123 incremental_marking_(this),
124 gc_count_at_last_idle_gc_(0),
125 full_codegen_bytes_generated_(0),
126 crankshaft_codegen_bytes_generated_(0),
127 gcs_since_last_deopt_(0),
129 no_weak_object_verification_scope_depth_(0),
131 allocation_sites_scratchpad_length_(0),
132 promotion_queue_(this),
134 external_string_table_(this),
135 chunks_queued_for_free_(NULL),
136 gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
144 // Ensure old_generation_size_ is a multiple of kPageSize.
145 DCHECK(MB >= Page::kPageSize);
147 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
148 set_native_contexts_list(NULL);
149 set_array_buffers_list(Smi::FromInt(0));
150 set_allocation_sites_list(Smi::FromInt(0));
151 set_encountered_weak_collections(Smi::FromInt(0));
152 // Put a dummy entry in the remembered pages so we can find the list the
153 // minidump even if there are no real unmapped pages.
154 RememberUnmappedPage(NULL, false);
156 ClearObjectStats(true);
160 intptr_t Heap::Capacity() {
161 if (!HasBeenSetUp()) return 0;
163 return new_space_.Capacity() + old_pointer_space_->Capacity() +
164 old_data_space_->Capacity() + code_space_->Capacity() +
165 map_space_->Capacity() + cell_space_->Capacity() +
166 property_cell_space_->Capacity();
170 intptr_t Heap::CommittedMemory() {
171 if (!HasBeenSetUp()) return 0;
173 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
174 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
176 property_cell_space_->CommittedMemory() + lo_space_->Size();
180 size_t Heap::CommittedPhysicalMemory() {
181 if (!HasBeenSetUp()) return 0;
183 return new_space_.CommittedPhysicalMemory() +
184 old_pointer_space_->CommittedPhysicalMemory() +
185 old_data_space_->CommittedPhysicalMemory() +
186 code_space_->CommittedPhysicalMemory() +
187 map_space_->CommittedPhysicalMemory() +
188 cell_space_->CommittedPhysicalMemory() +
189 property_cell_space_->CommittedPhysicalMemory() +
190 lo_space_->CommittedPhysicalMemory();
194 intptr_t Heap::CommittedMemoryExecutable() {
195 if (!HasBeenSetUp()) return 0;
197 return isolate()->memory_allocator()->SizeExecutable();
201 void Heap::UpdateMaximumCommitted() {
202 if (!HasBeenSetUp()) return;
204 intptr_t current_committed_memory = CommittedMemory();
205 if (current_committed_memory > maximum_committed_) {
206 maximum_committed_ = current_committed_memory;
211 intptr_t Heap::Available() {
212 if (!HasBeenSetUp()) return 0;
214 return new_space_.Available() + old_pointer_space_->Available() +
215 old_data_space_->Available() + code_space_->Available() +
216 map_space_->Available() + cell_space_->Available() +
217 property_cell_space_->Available();
221 bool Heap::HasBeenSetUp() {
222 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
224 property_cell_space_ != NULL && lo_space_ != NULL;
228 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
229 if (IntrusiveMarking::IsMarked(object)) {
230 return IntrusiveMarking::SizeOfMarkedObject(object);
232 return object->SizeFromMap(object->map());
236 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
237 const char** reason) {
238 // Is global GC requested?
239 if (space != NEW_SPACE) {
240 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241 *reason = "GC in old space requested";
242 return MARK_COMPACTOR;
245 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246 *reason = "GC in old space forced by flags";
247 return MARK_COMPACTOR;
250 // Is enough data promoted to justify a global GC?
251 if (OldGenerationAllocationLimitReached()) {
252 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253 *reason = "promotion limit reached";
254 return MARK_COMPACTOR;
257 // Have allocation in OLD and LO failed?
258 if (old_gen_exhausted_) {
260 ->gc_compactor_caused_by_oldspace_exhaustion()
262 *reason = "old generations exhausted";
263 return MARK_COMPACTOR;
266 // Is there enough space left in OLD to guarantee that a scavenge can
269 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270 // for object promotion. It counts only the bytes that the memory
271 // allocator has not yet allocated from the OS and assigned to any space,
272 // and does not count available bytes already in the old space or code
273 // space. Undercounting is safe---we may get an unrequested full GC when
274 // a scavenge would have succeeded.
275 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
277 ->gc_compactor_caused_by_oldspace_exhaustion()
279 *reason = "scavenge might not succeed";
280 return MARK_COMPACTOR;
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
291 void Heap::ReportStatisticsBeforeGC() {
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set. The following logic is used to avoid
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297 if (FLAG_heap_stats) {
298 ReportHeapStatistics("Before GC");
299 } else if (FLAG_log_gc) {
300 new_space_.ReportStatistics();
302 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
313 void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
315 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
317 ", available: %6" V8_PTR_PREFIX "d KB\n",
318 isolate_->memory_allocator()->Size() / KB,
319 isolate_->memory_allocator()->Available() / KB);
320 PrintPID("New space, used: %6" V8_PTR_PREFIX
322 ", available: %6" V8_PTR_PREFIX
324 ", committed: %6" V8_PTR_PREFIX "d KB\n",
325 new_space_.Size() / KB, new_space_.Available() / KB,
326 new_space_.CommittedMemory() / KB);
327 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
329 ", available: %6" V8_PTR_PREFIX
331 ", committed: %6" V8_PTR_PREFIX "d KB\n",
332 old_pointer_space_->SizeOfObjects() / KB,
333 old_pointer_space_->Available() / KB,
334 old_pointer_space_->CommittedMemory() / KB);
335 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
337 ", available: %6" V8_PTR_PREFIX
339 ", committed: %6" V8_PTR_PREFIX "d KB\n",
340 old_data_space_->SizeOfObjects() / KB,
341 old_data_space_->Available() / KB,
342 old_data_space_->CommittedMemory() / KB);
343 PrintPID("Code space, used: %6" V8_PTR_PREFIX
345 ", available: %6" V8_PTR_PREFIX
347 ", committed: %6" V8_PTR_PREFIX "d KB\n",
348 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX
352 ", available: %6" V8_PTR_PREFIX
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
356 map_space_->CommittedMemory() / KB);
357 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
359 ", available: %6" V8_PTR_PREFIX
361 ", committed: %6" V8_PTR_PREFIX "d KB\n",
362 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
363 cell_space_->CommittedMemory() / KB);
364 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
366 ", available: %6" V8_PTR_PREFIX
368 ", committed: %6" V8_PTR_PREFIX "d KB\n",
369 property_cell_space_->SizeOfObjects() / KB,
370 property_cell_space_->Available() / KB,
371 property_cell_space_->CommittedMemory() / KB);
372 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
374 ", available: %6" V8_PTR_PREFIX
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
378 lo_space_->CommittedMemory() / KB);
379 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
381 ", available: %6" V8_PTR_PREFIX
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 this->SizeOfObjects() / KB, this->Available() / KB,
385 this->CommittedMemory() / KB);
386 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
394 void Heap::ReportStatisticsAfterGC() {
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
398 if (FLAG_heap_stats) {
399 new_space_.CollectStatistics();
400 ReportHeapStatistics("After GC");
401 } else if (FLAG_log_gc) {
402 new_space_.ReportStatistics();
405 if (FLAG_log_gc) new_space_.ReportStatistics();
410 void Heap::GarbageCollectionPrologue() {
412 AllowHeapAllocation for_the_first_part_of_prologue;
413 ClearJSFunctionResultCaches();
415 unflattened_strings_length_ = 0;
417 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
418 mark_compact_collector()->EnableCodeFlushing(true);
422 if (FLAG_verify_heap) {
428 // Reset GC statistics.
429 promoted_objects_size_ = 0;
430 semi_space_copied_object_size_ = 0;
431 nodes_died_in_new_space_ = 0;
432 nodes_copied_in_new_space_ = 0;
435 UpdateMaximumCommitted();
438 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
440 if (FLAG_gc_verbose) Print();
442 ReportStatisticsBeforeGC();
445 store_buffer()->GCPrologue();
447 if (isolate()->concurrent_osr_enabled()) {
448 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
451 if (new_space_.IsAtMaximumCapacity()) {
452 maximum_size_scavenges_++;
454 maximum_size_scavenges_ = 0;
456 CheckNewSpaceExpansionCriteria();
460 intptr_t Heap::SizeOfObjects() {
462 AllSpaces spaces(this);
463 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464 total += space->SizeOfObjects();
470 void Heap::ClearAllICsByKind(Code::Kind kind) {
471 HeapObjectIterator it(code_space());
473 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474 Code* code = Code::cast(object);
475 Code::Kind current_kind = code->kind();
476 if (current_kind == Code::FUNCTION ||
477 current_kind == Code::OPTIMIZED_FUNCTION) {
478 code->ClearInlineCaches(kind);
484 void Heap::RepairFreeListsAfterBoot() {
485 PagedSpaces spaces(this);
486 for (PagedSpace* space = spaces.next(); space != NULL;
487 space = spaces.next()) {
488 space->RepairFreeListsAfterBoot();
493 void Heap::ProcessPretenuringFeedback() {
494 if (FLAG_allocation_site_pretenuring) {
495 int tenure_decisions = 0;
496 int dont_tenure_decisions = 0;
497 int allocation_mementos_found = 0;
498 int allocation_sites = 0;
499 int active_allocation_sites = 0;
501 // If the scratchpad overflowed, we have to iterate over the allocation
503 // TODO(hpayer): We iterate over the whole list of allocation sites when
504 // we grew to the maximum semi-space size to deopt maybe tenured
505 // allocation sites. We could hold the maybe tenured allocation sites
506 // in a seperate data structure if this is a performance problem.
507 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508 bool use_scratchpad =
509 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
510 !deopt_maybe_tenured;
513 Object* list_element = allocation_sites_list();
514 bool trigger_deoptimization = false;
515 bool maximum_size_scavenge = MaximumSizeScavenge();
516 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517 : list_element->IsAllocationSite()) {
518 AllocationSite* site =
520 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521 : AllocationSite::cast(list_element);
522 allocation_mementos_found += site->memento_found_count();
523 if (site->memento_found_count() > 0) {
524 active_allocation_sites++;
525 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526 trigger_deoptimization = true;
528 if (site->GetPretenureMode() == TENURED) {
531 dont_tenure_decisions++;
536 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537 site->set_deopt_dependent_code(true);
538 trigger_deoptimization = true;
541 if (use_scratchpad) {
544 list_element = site->weak_next();
548 if (trigger_deoptimization) {
549 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
552 FlushAllocationSitesScratchpad();
554 if (FLAG_trace_pretenuring_statistics &&
555 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556 dont_tenure_decisions > 0)) {
558 "GC: (mode, #visited allocation sites, #active allocation sites, "
559 "#mementos, #tenure decisions, #donttenure decisions) "
560 "(%s, %d, %d, %d, %d, %d)\n",
561 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562 active_allocation_sites, allocation_mementos_found, tenure_decisions,
563 dont_tenure_decisions);
569 void Heap::DeoptMarkedAllocationSites() {
570 // TODO(hpayer): If iterating over the allocation sites list becomes a
571 // performance issue, use a cache heap data structure instead (similar to the
572 // allocation sites scratchpad).
573 Object* list_element = allocation_sites_list();
574 while (list_element->IsAllocationSite()) {
575 AllocationSite* site = AllocationSite::cast(list_element);
576 if (site->deopt_dependent_code()) {
577 site->dependent_code()->MarkCodeForDeoptimization(
578 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
579 site->set_deopt_dependent_code(false);
581 list_element = site->weak_next();
583 Deoptimizer::DeoptimizeMarkedCode(isolate_);
587 void Heap::GarbageCollectionEpilogue() {
588 store_buffer()->GCEpilogue();
590 // In release mode, we only zap the from space under heap verification.
591 if (Heap::ShouldZapGarbage()) {
595 // Process pretenuring feedback and update allocation sites.
596 ProcessPretenuringFeedback();
599 if (FLAG_verify_heap) {
604 AllowHeapAllocation for_the_rest_of_the_epilogue;
607 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608 if (FLAG_print_handles) PrintHandles();
609 if (FLAG_gc_verbose) Print();
610 if (FLAG_code_stats) ReportCodeStatistics("After GC");
612 if (FLAG_deopt_every_n_garbage_collections > 0) {
613 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614 // the topmost optimized frame can be deoptimized safely, because it
615 // might not have a lazy bailout point right after its current PC.
616 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
617 Deoptimizer::DeoptimizeAll(isolate());
618 gcs_since_last_deopt_ = 0;
622 UpdateMaximumCommitted();
624 isolate_->counters()->alive_after_last_gc()->Set(
625 static_cast<int>(SizeOfObjects()));
627 isolate_->counters()->string_table_capacity()->Set(
628 string_table()->Capacity());
629 isolate_->counters()->number_of_symbols()->Set(
630 string_table()->NumberOfElements());
632 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
633 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
635 (crankshaft_codegen_bytes_generated_ +
636 full_codegen_bytes_generated_)));
639 if (CommittedMemory() > 0) {
640 isolate_->counters()->external_fragmentation_total()->AddSample(
641 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
643 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
648 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
651 isolate_->counters()->heap_fraction_code_space()->AddSample(
652 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
654 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656 isolate_->counters()->heap_fraction_cell_space()->AddSample(
657 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
659 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
665 isolate_->counters()->heap_sample_total_committed()->AddSample(
666 static_cast<int>(CommittedMemory() / KB));
667 isolate_->counters()->heap_sample_total_used()->AddSample(
668 static_cast<int>(SizeOfObjects() / KB));
669 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670 static_cast<int>(map_space()->CommittedMemory() / KB));
671 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672 static_cast<int>(cell_space()->CommittedMemory() / KB));
674 ->heap_sample_property_cell_space_committed()
676 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678 static_cast<int>(code_space()->CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681 static_cast<int>(MaximumCommittedMemory() / KB));
684 #define UPDATE_COUNTERS_FOR_SPACE(space) \
685 isolate_->counters()->space##_bytes_available()->Set( \
686 static_cast<int>(space()->Available())); \
687 isolate_->counters()->space##_bytes_committed()->Set( \
688 static_cast<int>(space()->CommittedMemory())); \
689 isolate_->counters()->space##_bytes_used()->Set( \
690 static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
692 if (space()->CommittedMemory() > 0) { \
693 isolate_->counters()->external_fragmentation_##space()->AddSample( \
694 static_cast<int>(100 - \
695 (space()->SizeOfObjects() * 100.0) / \
696 space()->CommittedMemory())); \
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
715 ReportStatisticsAfterGC();
718 // Remember the last top pointer so that we can later find out
719 // whether we allocated in new space since the last GC.
720 new_space_top_after_last_gc_ = new_space()->top();
724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725 const v8::GCCallbackFlags gc_callback_flags) {
726 // Since we are ignoring the return value, the exact choice of space does
727 // not matter, so long as we do not specify NEW_SPACE, which would not
729 mark_compact_collector_.SetFlags(flags);
730 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
731 mark_compact_collector_.SetFlags(kNoGCFlags);
735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736 // Since we are ignoring the return value, the exact choice of space does
737 // not matter, so long as we do not specify NEW_SPACE, which would not
739 // Major GC would invoke weak handle callbacks on weakly reachable
740 // handles, but won't collect weakly reachable objects until next
741 // major GC. Therefore if we collect aggressively and weak handle callback
742 // has been invoked, we rerun major GC to release objects which become
744 // Note: as weak callbacks can execute arbitrary code, we cannot
745 // hope that eventually there will be no weak callbacks invocations.
746 // Therefore stop recollecting after several attempts.
747 if (isolate()->concurrent_recompilation_enabled()) {
748 // The optimizing compiler may be unnecessarily holding on to memory.
749 DisallowHeapAllocation no_recursive_gc;
750 isolate()->optimizing_compiler_thread()->Flush();
752 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
753 kReduceMemoryFootprintMask);
754 isolate_->compilation_cache()->Clear();
755 const int kMaxNumberOfAttempts = 7;
756 const int kMinNumberOfAttempts = 2;
757 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759 attempt + 1 >= kMinNumberOfAttempts) {
763 mark_compact_collector()->SetFlags(kNoGCFlags);
766 incremental_marking()->UncommitMarkingDeque();
770 void Heap::EnsureFillerObjectAtTop() {
771 // There may be an allocation memento behind every object in new space.
772 // If we evacuate a not full new space or if we are on the last page of
773 // the new space, then there may be uninitialized memory behind the top
774 // pointer of the new space page. We store a filler object there to
775 // identify the unused space.
776 Address from_top = new_space_.top();
777 Address from_limit = new_space_.limit();
778 if (from_top < from_limit) {
779 int remaining_in_page = static_cast<int>(from_limit - from_top);
780 CreateFillerObjectAt(from_top, remaining_in_page);
785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786 const char* collector_reason,
787 const v8::GCCallbackFlags gc_callback_flags) {
788 // The VM is in the GC state until exiting this function.
789 VMState<GC> state(isolate_);
792 // Reset the allocation timeout to the GC interval, but make sure to
793 // allow at least a few allocations after a collection. The reason
794 // for this is that we have a lot of allocation sequences and we
795 // assume that a garbage collection will allow the subsequent
796 // allocation attempts to go through.
797 allocation_timeout_ = Max(6, FLAG_gc_interval);
800 EnsureFillerObjectAtTop();
802 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803 if (FLAG_trace_incremental_marking) {
804 PrintF("[IncrementalMarking] Scavenge during marking.\n");
808 if (collector == MARK_COMPACTOR &&
809 !mark_compact_collector()->abort_incremental_marking() &&
810 !incremental_marking()->IsStopped() &&
811 !incremental_marking()->should_hurry() &&
812 FLAG_incremental_marking_steps) {
813 // Make progress in incremental marking.
814 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
816 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
817 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818 if (FLAG_trace_incremental_marking) {
819 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
821 collector = SCAVENGER;
822 collector_reason = "incremental marking delaying mark-sweep";
826 bool next_gc_likely_to_collect_more = false;
829 tracer()->Start(collector, gc_reason, collector_reason);
830 DCHECK(AllowHeapAllocation::IsAllowed());
831 DisallowHeapAllocation no_allocation_during_gc;
832 GarbageCollectionPrologue();
835 HistogramTimerScope histogram_timer_scope(
836 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837 : isolate_->counters()->gc_compactor());
838 next_gc_likely_to_collect_more =
839 PerformGarbageCollection(collector, gc_callback_flags);
842 GarbageCollectionEpilogue();
846 // Start incremental marking for the next cycle. The heap snapshot
847 // generator needs incremental marking to stay off after it aborted.
848 if (!mark_compact_collector()->abort_incremental_marking() &&
849 WorthActivatingIncrementalMarking()) {
850 incremental_marking()->Start();
853 return next_gc_likely_to_collect_more;
857 int Heap::NotifyContextDisposed() {
858 if (isolate()->concurrent_recompilation_enabled()) {
859 // Flush the queued recompilation tasks.
860 isolate()->optimizing_compiler_thread()->Flush();
862 flush_monomorphic_ics_ = true;
864 return ++contexts_disposed_;
868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
870 if (len == 0) return;
872 DCHECK(array->map() != fixed_cow_array_map());
873 Object** dst_objects = array->data_start() + dst_index;
874 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875 if (!InNewSpace(array)) {
876 for (int i = 0; i < len; i++) {
877 // TODO(hpayer): check store buffer for entries
878 if (InNewSpace(dst_objects[i])) {
879 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
883 incremental_marking()->RecordWrites(array);
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
891 void VisitPointers(Object** start, Object** end) {
892 // Visit all HeapObject pointers in [start, end).
893 for (Object** p = start; p < end; p++) {
894 if ((*p)->IsHeapObject()) {
895 // Check that the string is actually internalized.
896 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897 (*p)->IsInternalizedString());
904 static void VerifyStringTable(Heap* heap) {
905 StringTableVerifier verifier;
906 heap->string_table()->IterateElements(&verifier);
908 #endif // VERIFY_HEAP
911 static bool AbortIncrementalMarkingAndCollectGarbage(
912 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
913 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914 bool result = heap->CollectGarbage(space, gc_reason);
915 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921 bool gc_performed = true;
923 static const int kThreshold = 20;
924 while (gc_performed && counter++ < kThreshold) {
925 gc_performed = false;
926 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928 if (sizes[space] != 0) {
929 AllocationResult allocation;
930 if (space == NEW_SPACE) {
931 allocation = new_space()->AllocateRaw(sizes[space]);
933 allocation = paged_space(space)->AllocateRaw(sizes[space]);
936 if (!allocation.To(&node)) {
937 if (space == NEW_SPACE) {
938 Heap::CollectGarbage(NEW_SPACE,
939 "failed to reserve space in the new space");
941 AbortIncrementalMarkingAndCollectGarbage(
942 this, static_cast<AllocationSpace>(space),
943 "failed to reserve space in paged space");
948 // Mark with a free list node, in case we have a GC before
950 node->set_size(this, sizes[space]);
951 locations_out[space] = node->address();
958 // Failed to reserve the space after several attempts.
959 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
964 void Heap::EnsureFromSpaceIsCommitted() {
965 if (new_space_.CommitFromSpaceIfNeeded()) return;
967 // Committing memory to from space failed.
968 // Memory is exhausted and we will die.
969 V8::FatalProcessOutOfMemory("Committing semi space failed.");
973 void Heap::ClearJSFunctionResultCaches() {
974 if (isolate_->bootstrapper()->IsActive()) return;
976 Object* context = native_contexts_list();
977 while (!context->IsUndefined()) {
978 // Get the caches for this context. GC can happen when the context
979 // is not fully initialized, so the caches can be undefined.
980 Object* caches_or_undefined =
981 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
982 if (!caches_or_undefined->IsUndefined()) {
983 FixedArray* caches = FixedArray::cast(caches_or_undefined);
985 int length = caches->length();
986 for (int i = 0; i < length; i++) {
987 JSFunctionResultCache::cast(caches->get(i))->Clear();
990 // Get the next context:
991 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
996 void Heap::ClearNormalizedMapCaches() {
997 if (isolate_->bootstrapper()->IsActive() &&
998 !incremental_marking()->IsMarking()) {
1002 Object* context = native_contexts_list();
1003 while (!context->IsUndefined()) {
1004 // GC can happen when the context is not fully initialized,
1005 // so the cache can be undefined.
1007 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1008 if (!cache->IsUndefined()) {
1009 NormalizedMapCache::cast(cache)->Clear();
1011 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1016 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1017 if (start_new_space_size == 0) return;
1019 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1020 static_cast<double>(start_new_space_size) * 100);
1022 semi_space_copied_rate_ =
1023 (static_cast<double>(semi_space_copied_object_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1026 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1028 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1029 high_survival_rate_period_length_++;
1031 high_survival_rate_period_length_ = 0;
1035 bool Heap::PerformGarbageCollection(
1036 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1037 int freed_global_handles = 0;
1039 if (collector != SCAVENGER) {
1040 PROFILE(isolate_, CodeMovingGCEvent());
1044 if (FLAG_verify_heap) {
1045 VerifyStringTable(this);
1050 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1053 GCCallbacksScope scope(this);
1054 if (scope.CheckReenter()) {
1055 AllowHeapAllocation allow_allocation;
1056 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1057 VMState<EXTERNAL> state(isolate_);
1058 HandleScope handle_scope(isolate_);
1059 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1063 EnsureFromSpaceIsCommitted();
1065 int start_new_space_size = Heap::new_space()->SizeAsInt();
1067 if (IsHighSurvivalRate()) {
1068 // We speed up the incremental marker if it is running so that it
1069 // does not fall behind the rate of promotion, which would cause a
1070 // constantly growing old space.
1071 incremental_marking()->NotifyOfHighPromotionRate();
1074 if (collector == MARK_COMPACTOR) {
1075 // Perform mark-sweep with optional compaction.
1077 sweep_generation_++;
1078 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1079 // allocates and triggers GC. The real limit is set at after
1080 // PostGarbageCollectionProcessing.
1081 old_generation_allocation_limit_ =
1082 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1083 old_gen_exhausted_ = false;
1088 UpdateSurvivalStatistics(start_new_space_size);
1090 isolate_->counters()->objs_since_last_young()->Set(0);
1092 // Callbacks that fire after this point might trigger nested GCs and
1093 // restart incremental marking, the assertion can't be moved down.
1094 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1096 gc_post_processing_depth_++;
1098 AllowHeapAllocation allow_allocation;
1099 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1100 freed_global_handles =
1101 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1103 gc_post_processing_depth_--;
1105 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1107 // Update relocatables.
1108 Relocatable::PostGarbageCollectionProcessing(isolate_);
1110 if (collector == MARK_COMPACTOR) {
1111 // Register the amount of external allocated memory.
1112 amount_of_external_allocated_memory_at_last_global_gc_ =
1113 amount_of_external_allocated_memory_;
1114 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1115 PromotedSpaceSizeOfObjects(), freed_global_handles);
1119 GCCallbacksScope scope(this);
1120 if (scope.CheckReenter()) {
1121 AllowHeapAllocation allow_allocation;
1122 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1123 VMState<EXTERNAL> state(isolate_);
1124 HandleScope handle_scope(isolate_);
1125 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1130 if (FLAG_verify_heap) {
1131 VerifyStringTable(this);
1135 return freed_global_handles > 0;
1139 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1140 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1141 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1142 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1143 v8::GCPrologueCallback callback =
1144 reinterpret_cast<v8::GCPrologueCallback>(
1145 gc_prologue_callbacks_[i].callback);
1146 callback(gc_type, flags);
1148 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1149 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1156 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1157 GCCallbackFlags gc_callback_flags) {
1158 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1160 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_epilogue_callbacks_[i].callback);
1164 callback(gc_type, gc_callback_flags);
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1174 void Heap::MarkCompact() {
1175 gc_state_ = MARK_COMPACT;
1176 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1178 uint64_t size_of_objects_before_gc = SizeOfObjects();
1180 mark_compact_collector_.Prepare();
1184 MarkCompactPrologue();
1186 mark_compact_collector_.CollectGarbage();
1188 LOG(isolate_, ResourceEvent("markcompact", "end"));
1190 gc_state_ = NOT_IN_GC;
1192 isolate_->counters()->objs_since_last_full()->Set(0);
1194 flush_monomorphic_ics_ = false;
1196 if (FLAG_allocation_site_pretenuring) {
1197 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1202 void Heap::MarkCompactPrologue() {
1203 // At any old GC clear the keyed lookup cache to enable collection of unused
1205 isolate_->keyed_lookup_cache()->Clear();
1206 isolate_->context_slot_cache()->Clear();
1207 isolate_->descriptor_lookup_cache()->Clear();
1208 RegExpResultsCache::Clear(string_split_cache());
1209 RegExpResultsCache::Clear(regexp_multiple_cache());
1211 isolate_->compilation_cache()->MarkCompactPrologue();
1213 CompletelyClearInstanceofCache();
1215 FlushNumberStringCache();
1216 if (FLAG_cleanup_code_caches_at_gc) {
1217 polymorphic_code_cache()->set_cache(undefined_value());
1220 ClearNormalizedMapCaches();
1224 // Helper class for copying HeapObjects
1225 class ScavengeVisitor : public ObjectVisitor {
1227 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1229 void VisitPointer(Object** p) { ScavengePointer(p); }
1231 void VisitPointers(Object** start, Object** end) {
1232 // Copy all HeapObject pointers in [start, end)
1233 for (Object** p = start; p < end; p++) ScavengePointer(p);
1237 void ScavengePointer(Object** p) {
1238 Object* object = *p;
1239 if (!heap_->InNewSpace(object)) return;
1240 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1241 reinterpret_cast<HeapObject*>(object));
1249 // Visitor class to verify pointers in code or data space do not point into
1251 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1253 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1254 void VisitPointers(Object** start, Object** end) {
1255 for (Object** current = start; current < end; current++) {
1256 if ((*current)->IsHeapObject()) {
1257 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1267 static void VerifyNonPointerSpacePointers(Heap* heap) {
1268 // Verify that there are no pointers to new space in spaces where we
1269 // do not expect them.
1270 VerifyNonPointerSpacePointersVisitor v(heap);
1271 HeapObjectIterator code_it(heap->code_space());
1272 for (HeapObject* object = code_it.Next(); object != NULL;
1273 object = code_it.Next())
1274 object->Iterate(&v);
1276 HeapObjectIterator data_it(heap->old_data_space());
1277 for (HeapObject* object = data_it.Next(); object != NULL;
1278 object = data_it.Next())
1279 object->Iterate(&v);
1281 #endif // VERIFY_HEAP
1284 void Heap::CheckNewSpaceExpansionCriteria() {
1285 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1286 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1287 // Grow the size of new space if there is room to grow, enough data
1288 // has survived scavenge since the last expansion and we are not in
1289 // high promotion mode.
1291 survived_since_last_expansion_ = 0;
1296 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1297 return heap->InNewSpace(*p) &&
1298 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1302 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1303 StoreBufferEvent event) {
1304 heap->store_buffer_rebuilder_.Callback(page, event);
1308 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1309 if (event == kStoreBufferStartScanningPagesEvent) {
1310 start_of_current_page_ = NULL;
1311 current_page_ = NULL;
1312 } else if (event == kStoreBufferScanningPageEvent) {
1313 if (current_page_ != NULL) {
1314 // If this page already overflowed the store buffer during this iteration.
1315 if (current_page_->scan_on_scavenge()) {
1316 // Then we should wipe out the entries that have been added for it.
1317 store_buffer_->SetTop(start_of_current_page_);
1318 } else if (store_buffer_->Top() - start_of_current_page_ >=
1319 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1320 // Did we find too many pointers in the previous page? The heuristic is
1321 // that no page can take more then 1/5 the remaining slots in the store
1323 current_page_->set_scan_on_scavenge(true);
1324 store_buffer_->SetTop(start_of_current_page_);
1326 // In this case the page we scanned took a reasonable number of slots in
1327 // the store buffer. It has now been rehabilitated and is no longer
1328 // marked scan_on_scavenge.
1329 DCHECK(!current_page_->scan_on_scavenge());
1332 start_of_current_page_ = store_buffer_->Top();
1333 current_page_ = page;
1334 } else if (event == kStoreBufferFullEvent) {
1335 // The current page overflowed the store buffer again. Wipe out its entries
1336 // in the store buffer and mark it scan-on-scavenge again. This may happen
1337 // several times while scanning.
1338 if (current_page_ == NULL) {
1339 // Store Buffer overflowed while scanning promoted objects. These are not
1340 // in any particular page, though they are likely to be clustered by the
1341 // allocation routines.
1342 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1344 // Store Buffer overflowed while scanning a particular old space page for
1345 // pointers to new space.
1346 DCHECK(current_page_ == page);
1347 DCHECK(page != NULL);
1348 current_page_->set_scan_on_scavenge(true);
1349 DCHECK(start_of_current_page_ != store_buffer_->Top());
1350 store_buffer_->SetTop(start_of_current_page_);
1358 void PromotionQueue::Initialize() {
1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1360 // entries (where each is a pair of intptr_t). This allows us to simplify
1361 // the test fpr when to switch pages.
1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1367 emergency_stack_ = NULL;
1371 void PromotionQueue::RelocateQueueHead() {
1372 DCHECK(emergency_stack_ == NULL);
1374 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1375 intptr_t* head_start = rear_;
1376 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1379 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1381 emergency_stack_ = new List<Entry>(2 * entries_count);
1383 while (head_start != head_end) {
1384 int size = static_cast<int>(*(head_start++));
1385 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1386 emergency_stack_->Add(Entry(obj, size));
1392 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1394 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1396 virtual Object* RetainAs(Object* object) {
1397 if (!heap_->InFromSpace(object)) {
1401 MapWord map_word = HeapObject::cast(object)->map_word();
1402 if (map_word.IsForwardingAddress()) {
1403 return map_word.ToForwardingAddress();
1413 void Heap::Scavenge() {
1414 RelocationLock relocation_lock(this);
1417 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1420 gc_state_ = SCAVENGE;
1422 // Implements Cheney's copying algorithm
1423 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1425 // Clear descriptor cache.
1426 isolate_->descriptor_lookup_cache()->Clear();
1428 // Used for updating survived_since_last_expansion_ at function end.
1429 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1431 SelectScavengingVisitorsTable();
1433 incremental_marking()->PrepareForScavenge();
1435 // Flip the semispaces. After flipping, to space is empty, from space has
1438 new_space_.ResetAllocationInfo();
1440 // We need to sweep newly copied objects which can be either in the
1441 // to space or promoted to the old generation. For to-space
1442 // objects, we treat the bottom of the to space as a queue. Newly
1443 // copied and unswept objects lie between a 'front' mark and the
1444 // allocation pointer.
1446 // Promoted objects can go into various old-generation spaces, and
1447 // can be allocated internally in the spaces (from the free list).
1448 // We treat the top of the to space as a queue of addresses of
1449 // promoted objects. The addresses of newly promoted and unswept
1450 // objects lie between a 'front' mark and a 'rear' mark that is
1451 // updated as a side effect of promoting an object.
1453 // There is guaranteed to be enough room at the top of the to space
1454 // for the addresses of promoted objects: every object promoted
1455 // frees up its size in bytes from the top of the new space, and
1456 // objects are at least one pointer in size.
1457 Address new_space_front = new_space_.ToSpaceStart();
1458 promotion_queue_.Initialize();
1461 store_buffer()->Clean();
1464 ScavengeVisitor scavenge_visitor(this);
1466 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1468 // Copy objects reachable from the old generation.
1470 StoreBufferRebuildScope scope(this, store_buffer(),
1471 &ScavengeStoreBufferCallback);
1472 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1475 // Copy objects reachable from simple cells by scavenging cell values
1477 HeapObjectIterator cell_iterator(cell_space_);
1478 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1479 heap_object = cell_iterator.Next()) {
1480 if (heap_object->IsCell()) {
1481 Cell* cell = Cell::cast(heap_object);
1482 Address value_address = cell->ValueAddress();
1483 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1487 // Copy objects reachable from global property cells by scavenging global
1488 // property cell values directly.
1489 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1490 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1491 heap_object != NULL;
1492 heap_object = js_global_property_cell_iterator.Next()) {
1493 if (heap_object->IsPropertyCell()) {
1494 PropertyCell* cell = PropertyCell::cast(heap_object);
1495 Address value_address = cell->ValueAddress();
1496 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1497 Address type_address = cell->TypeAddress();
1498 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1502 // Copy objects reachable from the encountered weak collections list.
1503 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1505 // Copy objects reachable from the code flushing candidates list.
1506 MarkCompactCollector* collector = mark_compact_collector();
1507 if (collector->is_code_flushing_enabled()) {
1508 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1511 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1513 while (isolate()->global_handles()->IterateObjectGroups(
1514 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1515 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1517 isolate()->global_handles()->RemoveObjectGroups();
1518 isolate()->global_handles()->RemoveImplicitRefGroups();
1520 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1521 &IsUnscavengedHeapObject);
1522 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1524 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1526 UpdateNewSpaceReferencesInExternalStringTable(
1527 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1529 promotion_queue_.Destroy();
1531 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1533 ScavengeWeakObjectRetainer weak_object_retainer(this);
1534 ProcessWeakReferences(&weak_object_retainer);
1536 DCHECK(new_space_front == new_space_.top());
1539 new_space_.set_age_mark(new_space_.top());
1541 new_space_.LowerInlineAllocationLimit(
1542 new_space_.inline_allocation_limit_step());
1544 // Update how much has survived scavenge.
1545 IncrementYoungSurvivorsCounter(static_cast<int>(
1546 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1548 LOG(isolate_, ResourceEvent("scavenge", "end"));
1550 gc_state_ = NOT_IN_GC;
1554 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1556 MapWord first_word = HeapObject::cast(*p)->map_word();
1558 if (!first_word.IsForwardingAddress()) {
1559 // Unreachable external string can be finalized.
1560 heap->FinalizeExternalString(String::cast(*p));
1564 // String is still reachable.
1565 return String::cast(first_word.ToForwardingAddress());
1569 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1570 ExternalStringTableUpdaterCallback updater_func) {
1572 if (FLAG_verify_heap) {
1573 external_string_table_.Verify();
1577 if (external_string_table_.new_space_strings_.is_empty()) return;
1579 Object** start = &external_string_table_.new_space_strings_[0];
1580 Object** end = start + external_string_table_.new_space_strings_.length();
1581 Object** last = start;
1583 for (Object** p = start; p < end; ++p) {
1584 DCHECK(InFromSpace(*p));
1585 String* target = updater_func(this, p);
1587 if (target == NULL) continue;
1589 DCHECK(target->IsExternalString());
1591 if (InNewSpace(target)) {
1592 // String is still in new space. Update the table entry.
1596 // String got promoted. Move it to the old string list.
1597 external_string_table_.AddOldString(target);
1601 DCHECK(last <= end);
1602 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1606 void Heap::UpdateReferencesInExternalStringTable(
1607 ExternalStringTableUpdaterCallback updater_func) {
1608 // Update old space string references.
1609 if (external_string_table_.old_space_strings_.length() > 0) {
1610 Object** start = &external_string_table_.old_space_strings_[0];
1611 Object** end = start + external_string_table_.old_space_strings_.length();
1612 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1615 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1619 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1620 ProcessArrayBuffers(retainer);
1621 ProcessNativeContexts(retainer);
1622 // TODO(mvstanton): AllocationSites only need to be processed during
1623 // MARK_COMPACT, as they live in old space. Verify and address.
1624 ProcessAllocationSites(retainer);
1628 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1629 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1630 // Update the head of the list of contexts.
1631 set_native_contexts_list(head);
1635 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1636 Object* array_buffer_obj =
1637 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1638 set_array_buffers_list(array_buffer_obj);
1642 void Heap::TearDownArrayBuffers() {
1643 Object* undefined = undefined_value();
1644 for (Object* o = array_buffers_list(); o != undefined;) {
1645 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1646 Runtime::FreeArrayBuffer(isolate(), buffer);
1647 o = buffer->weak_next();
1649 set_array_buffers_list(undefined);
1653 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1654 Object* allocation_site_obj =
1655 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1656 set_allocation_sites_list(allocation_site_obj);
1660 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1661 DisallowHeapAllocation no_allocation_scope;
1662 Object* cur = allocation_sites_list();
1663 bool marked = false;
1664 while (cur->IsAllocationSite()) {
1665 AllocationSite* casted = AllocationSite::cast(cur);
1666 if (casted->GetPretenureMode() == flag) {
1667 casted->ResetPretenureDecision();
1668 casted->set_deopt_dependent_code(true);
1671 cur = casted->weak_next();
1673 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1677 void Heap::EvaluateOldSpaceLocalPretenuring(
1678 uint64_t size_of_objects_before_gc) {
1679 uint64_t size_of_objects_after_gc = SizeOfObjects();
1680 double old_generation_survival_rate =
1681 (static_cast<double>(size_of_objects_after_gc) * 100) /
1682 static_cast<double>(size_of_objects_before_gc);
1684 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1685 // Too many objects died in the old generation, pretenuring of wrong
1686 // allocation sites may be the cause for that. We have to deopt all
1687 // dependent code registered in the allocation sites to re-evaluate
1688 // our pretenuring decisions.
1689 ResetAllAllocationSitesDependentCode(TENURED);
1690 if (FLAG_trace_pretenuring) {
1692 "Deopt all allocation sites dependent code due to low survival "
1693 "rate in the old generation %f\n",
1694 old_generation_survival_rate);
1700 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1701 DisallowHeapAllocation no_allocation;
1702 // All external strings are listed in the external string table.
1704 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1706 explicit ExternalStringTableVisitorAdapter(
1707 v8::ExternalResourceVisitor* visitor)
1708 : visitor_(visitor) {}
1709 virtual void VisitPointers(Object** start, Object** end) {
1710 for (Object** p = start; p < end; p++) {
1711 DCHECK((*p)->IsExternalString());
1712 visitor_->VisitExternalString(
1713 Utils::ToLocal(Handle<String>(String::cast(*p))));
1718 v8::ExternalResourceVisitor* visitor_;
1719 } external_string_table_visitor(visitor);
1721 external_string_table_.Iterate(&external_string_table_visitor);
1725 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1727 static inline void VisitPointer(Heap* heap, Object** p) {
1728 Object* object = *p;
1729 if (!heap->InNewSpace(object)) return;
1730 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1731 reinterpret_cast<HeapObject*>(object));
1736 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1737 Address new_space_front) {
1739 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1740 // The addresses new_space_front and new_space_.top() define a
1741 // queue of unprocessed copied objects. Process them until the
1743 while (new_space_front != new_space_.top()) {
1744 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1745 HeapObject* object = HeapObject::FromAddress(new_space_front);
1747 NewSpaceScavenger::IterateBody(object->map(), object);
1750 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1754 // Promote and process all the to-be-promoted objects.
1756 StoreBufferRebuildScope scope(this, store_buffer(),
1757 &ScavengeStoreBufferCallback);
1758 while (!promotion_queue()->is_empty()) {
1761 promotion_queue()->remove(&target, &size);
1763 // Promoted object might be already partially visited
1764 // during old space pointer iteration. Thus we search specificly
1765 // for pointers to from semispace instead of looking for pointers
1767 DCHECK(!target->IsMap());
1768 IterateAndMarkPointersToFromSpace(
1769 target->address(), target->address() + size, &ScavengeObject);
1773 // Take another spin if there are now unswept objects in new space
1774 // (there are currently no more unswept promoted objects).
1775 } while (new_space_front != new_space_.top());
1777 return new_space_front;
1781 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1783 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1785 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1786 kDoubleAlignmentMask) == 0); // NOLINT
1789 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1792 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1794 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1795 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1796 return HeapObject::FromAddress(object->address() + kPointerSize);
1798 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1805 enum LoggingAndProfiling {
1806 LOGGING_AND_PROFILING_ENABLED,
1807 LOGGING_AND_PROFILING_DISABLED
1811 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1814 template <MarksHandling marks_handling,
1815 LoggingAndProfiling logging_and_profiling_mode>
1816 class ScavengingVisitor : public StaticVisitorBase {
1818 static void Initialize() {
1819 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1820 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1821 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1822 table_.Register(kVisitByteArray, &EvacuateByteArray);
1823 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1824 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1825 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1826 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1829 kVisitNativeContext,
1830 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1835 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1836 ConsString::kSize>);
1840 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1841 SlicedString::kSize>);
1845 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1849 kVisitSharedFunctionInfo,
1850 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1851 SharedFunctionInfo::kSize>);
1853 table_.Register(kVisitJSWeakCollection,
1854 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1856 table_.Register(kVisitJSArrayBuffer,
1857 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1859 table_.Register(kVisitJSTypedArray,
1860 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1862 table_.Register(kVisitJSDataView,
1863 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1865 table_.Register(kVisitJSRegExp,
1866 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1868 if (marks_handling == IGNORE_MARKS) {
1871 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1872 JSFunction::kSize>);
1874 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1877 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1878 kVisitDataObject, kVisitDataObjectGeneric>();
1880 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1881 kVisitJSObject, kVisitJSObjectGeneric>();
1883 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1884 kVisitStruct, kVisitStructGeneric>();
1887 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1892 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1894 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1895 bool should_record = false;
1897 should_record = FLAG_heap_stats;
1899 should_record = should_record || FLAG_log_gc;
1900 if (should_record) {
1901 if (heap->new_space()->Contains(obj)) {
1902 heap->new_space()->RecordAllocation(obj);
1904 heap->new_space()->RecordPromotion(obj);
1909 // Helper function used by CopyObject to copy a source object to an
1910 // allocated target object and update the forwarding pointer in the source
1911 // object. Returns the target object.
1912 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1913 HeapObject* target, int size)) {
1914 // If we migrate into to-space, then the to-space top pointer should be
1915 // right after the target object. Incorporate double alignment
1917 DCHECK(!heap->InToSpace(target) ||
1918 target->address() + size == heap->new_space()->top() ||
1919 target->address() + size + kPointerSize == heap->new_space()->top());
1921 // Make sure that we do not overwrite the promotion queue which is at
1922 // the end of to-space.
1923 DCHECK(!heap->InToSpace(target) ||
1924 heap->promotion_queue()->IsBelowPromotionQueue(
1925 heap->new_space()->top()));
1927 // Copy the content of source to target.
1928 heap->CopyBlock(target->address(), source->address(), size);
1930 // Set the forwarding address.
1931 source->set_map_word(MapWord::FromForwardingAddress(target));
1933 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1934 // Update NewSpace stats if necessary.
1935 RecordCopiedObject(heap, target);
1936 heap->OnMoveEvent(target, source, size);
1939 if (marks_handling == TRANSFER_MARKS) {
1940 if (Marking::TransferColor(source, target)) {
1941 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1946 template <int alignment>
1947 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1948 HeapObject* object, int object_size) {
1949 Heap* heap = map->GetHeap();
1951 int allocation_size = object_size;
1952 if (alignment != kObjectAlignment) {
1953 DCHECK(alignment == kDoubleAlignment);
1954 allocation_size += kPointerSize;
1957 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1958 AllocationResult allocation =
1959 heap->new_space()->AllocateRaw(allocation_size);
1961 HeapObject* target = NULL; // Initialization to please compiler.
1962 if (allocation.To(&target)) {
1963 // Order is important here: Set the promotion limit before storing a
1964 // filler for double alignment or migrating the object. Otherwise we
1965 // may end up overwriting promotion queue entries when we migrate the
1967 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1969 if (alignment != kObjectAlignment) {
1970 target = EnsureDoubleAligned(heap, target, allocation_size);
1973 // Order is important: slot might be inside of the target if target
1974 // was allocated over a dead object and slot comes from the store
1977 MigrateObject(heap, object, target, object_size);
1979 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1986 template <ObjectContents object_contents, int alignment>
1987 static inline bool PromoteObject(Map* map, HeapObject** slot,
1988 HeapObject* object, int object_size) {
1989 Heap* heap = map->GetHeap();
1991 int allocation_size = object_size;
1992 if (alignment != kObjectAlignment) {
1993 DCHECK(alignment == kDoubleAlignment);
1994 allocation_size += kPointerSize;
1997 AllocationResult allocation;
1998 if (object_contents == DATA_OBJECT) {
1999 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2000 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2002 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2003 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2006 HeapObject* target = NULL; // Initialization to please compiler.
2007 if (allocation.To(&target)) {
2008 if (alignment != kObjectAlignment) {
2009 target = EnsureDoubleAligned(heap, target, allocation_size);
2012 // Order is important: slot might be inside of the target if target
2013 // was allocated over a dead object and slot comes from the store
2016 MigrateObject(heap, object, target, object_size);
2018 if (object_contents == POINTER_OBJECT) {
2019 if (map->instance_type() == JS_FUNCTION_TYPE) {
2020 heap->promotion_queue()->insert(target,
2021 JSFunction::kNonWeakFieldsEndOffset);
2023 heap->promotion_queue()->insert(target, object_size);
2026 heap->IncrementPromotedObjectsSize(object_size);
2033 template <ObjectContents object_contents, int alignment>
2034 static inline void EvacuateObject(Map* map, HeapObject** slot,
2035 HeapObject* object, int object_size) {
2036 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2037 SLOW_DCHECK(object->Size() == object_size);
2038 Heap* heap = map->GetHeap();
2040 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2041 // A semi-space copy may fail due to fragmentation. In that case, we
2042 // try to promote the object.
2043 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2048 if (PromoteObject<object_contents, alignment>(map, slot, object,
2053 // If promotion failed, we try to copy the object to the other semi-space
2054 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2060 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2061 HeapObject* object) {
2062 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2063 JSFunction::kSize>(map, slot, object);
2065 MapWord map_word = object->map_word();
2066 DCHECK(map_word.IsForwardingAddress());
2067 HeapObject* target = map_word.ToForwardingAddress();
2069 MarkBit mark_bit = Marking::MarkBitFrom(target);
2070 if (Marking::IsBlack(mark_bit)) {
2071 // This object is black and it might not be rescanned by marker.
2072 // We should explicitly record code entry slot for compaction because
2073 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2074 // miss it as it is not HeapObject-tagged.
2075 Address code_entry_slot =
2076 target->address() + JSFunction::kCodeEntryOffset;
2077 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2078 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2079 code_entry_slot, code);
2084 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2085 HeapObject* object) {
2086 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2087 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2092 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2093 HeapObject* object) {
2094 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2095 int object_size = FixedDoubleArray::SizeFor(length);
2096 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2101 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2102 HeapObject* object) {
2103 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2104 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2109 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2110 HeapObject* object) {
2111 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2112 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2117 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2118 HeapObject* object) {
2119 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2120 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2125 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2126 HeapObject* object) {
2127 int object_size = SeqOneByteString::cast(object)
2128 ->SeqOneByteStringSize(map->instance_type());
2129 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2134 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2135 HeapObject* object) {
2136 int object_size = SeqTwoByteString::cast(object)
2137 ->SeqTwoByteStringSize(map->instance_type());
2138 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2143 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2144 HeapObject* object) {
2145 DCHECK(IsShortcutCandidate(map->instance_type()));
2147 Heap* heap = map->GetHeap();
2149 if (marks_handling == IGNORE_MARKS &&
2150 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2152 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2156 if (!heap->InNewSpace(first)) {
2157 object->set_map_word(MapWord::FromForwardingAddress(first));
2161 MapWord first_word = first->map_word();
2162 if (first_word.IsForwardingAddress()) {
2163 HeapObject* target = first_word.ToForwardingAddress();
2166 object->set_map_word(MapWord::FromForwardingAddress(target));
2170 heap->DoScavengeObject(first->map(), slot, first);
2171 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2175 int object_size = ConsString::kSize;
2176 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2180 template <ObjectContents object_contents>
2181 class ObjectEvacuationStrategy {
2183 template <int object_size>
2184 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2185 HeapObject* object) {
2186 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2190 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2191 int object_size = map->instance_size();
2192 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2197 static VisitorDispatchTable<ScavengingCallback> table_;
2201 template <MarksHandling marks_handling,
2202 LoggingAndProfiling logging_and_profiling_mode>
2203 VisitorDispatchTable<ScavengingCallback>
2204 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2207 static void InitializeScavengingVisitorsTables() {
2208 ScavengingVisitor<TRANSFER_MARKS,
2209 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2210 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2211 ScavengingVisitor<TRANSFER_MARKS,
2212 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2213 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2217 void Heap::SelectScavengingVisitorsTable() {
2218 bool logging_and_profiling =
2219 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2220 isolate()->cpu_profiler()->is_profiling() ||
2221 (isolate()->heap_profiler() != NULL &&
2222 isolate()->heap_profiler()->is_tracking_object_moves());
2224 if (!incremental_marking()->IsMarking()) {
2225 if (!logging_and_profiling) {
2226 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2227 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2229 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2230 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2233 if (!logging_and_profiling) {
2234 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2235 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2237 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2238 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2241 if (incremental_marking()->IsCompacting()) {
2242 // When compacting forbid short-circuiting of cons-strings.
2243 // Scavenging code relies on the fact that new space object
2244 // can't be evacuated into evacuation candidate but
2245 // short-circuiting violates this assumption.
2246 scavenging_visitors_table_.Register(
2247 StaticVisitorBase::kVisitShortcutCandidate,
2248 scavenging_visitors_table_.GetVisitorById(
2249 StaticVisitorBase::kVisitConsString));
2255 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2256 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2257 MapWord first_word = object->map_word();
2258 SLOW_DCHECK(!first_word.IsForwardingAddress());
2259 Map* map = first_word.ToMap();
2260 map->GetHeap()->DoScavengeObject(map, p, object);
2264 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2265 int instance_size) {
2267 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2268 if (!allocation.To(&result)) return allocation;
2270 // Map::cast cannot be used due to uninitialized map field.
2271 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2272 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2273 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2274 reinterpret_cast<Map*>(result)->set_visitor_id(
2275 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2276 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2277 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2278 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2279 reinterpret_cast<Map*>(result)->set_bit_field(0);
2280 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2281 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2282 Map::OwnsDescriptors::encode(true);
2283 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2288 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2290 ElementsKind elements_kind) {
2292 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2293 if (!allocation.To(&result)) return allocation;
2295 result->set_map_no_write_barrier(meta_map());
2296 Map* map = Map::cast(result);
2297 map->set_instance_type(instance_type);
2298 map->set_visitor_id(
2299 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2300 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2301 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2302 map->set_instance_size(instance_size);
2303 map->set_inobject_properties(0);
2304 map->set_pre_allocated_property_fields(0);
2305 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2306 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2307 SKIP_WRITE_BARRIER);
2308 map->init_back_pointer(undefined_value());
2309 map->set_unused_property_fields(0);
2310 map->set_instance_descriptors(empty_descriptor_array());
2311 map->set_bit_field(0);
2312 map->set_bit_field2(1 << Map::kIsExtensible);
2313 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2314 Map::OwnsDescriptors::encode(true);
2315 map->set_bit_field3(bit_field3);
2316 map->set_elements_kind(elements_kind);
2322 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2323 AllocationSpace space) {
2326 AllocationResult allocation = AllocateRaw(size, space, space);
2327 if (!allocation.To(&obj)) return allocation;
2330 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2331 DCHECK(chunk->owner()->identity() == space);
2333 CreateFillerObjectAt(obj->address(), size);
2338 const Heap::StringTypeTable Heap::string_type_table[] = {
2339 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2340 { type, size, k##camel_name##MapRootIndex } \
2342 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2343 #undef STRING_TYPE_ELEMENT
2347 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2348 #define CONSTANT_STRING_ELEMENT(name, contents) \
2349 { contents, k##name##RootIndex } \
2351 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2352 #undef CONSTANT_STRING_ELEMENT
2356 const Heap::StructTable Heap::struct_table[] = {
2357 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2358 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2360 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2361 #undef STRUCT_TABLE_ELEMENT
2365 bool Heap::CreateInitialMaps() {
2368 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2369 if (!allocation.To(&obj)) return false;
2371 // Map::cast cannot be used due to uninitialized map field.
2372 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2373 set_meta_map(new_meta_map);
2374 new_meta_map->set_map(new_meta_map);
2376 { // Partial map allocation
2377 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2380 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2381 set_##field_name##_map(map); \
2384 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2385 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2386 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2387 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2388 constant_pool_array);
2390 #undef ALLOCATE_PARTIAL_MAP
2393 // Allocate the empty array.
2395 AllocationResult allocation = AllocateEmptyFixedArray();
2396 if (!allocation.To(&obj)) return false;
2398 set_empty_fixed_array(FixedArray::cast(obj));
2401 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2402 if (!allocation.To(&obj)) return false;
2404 set_null_value(Oddball::cast(obj));
2405 Oddball::cast(obj)->set_kind(Oddball::kNull);
2408 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2409 if (!allocation.To(&obj)) return false;
2411 set_undefined_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2413 DCHECK(!InNewSpace(undefined_value()));
2415 // Set preliminary exception sentinel value before actually initializing it.
2416 set_exception(null_value());
2418 // Allocate the empty descriptor array.
2420 AllocationResult allocation = AllocateEmptyFixedArray();
2421 if (!allocation.To(&obj)) return false;
2423 set_empty_descriptor_array(DescriptorArray::cast(obj));
2425 // Allocate the constant pool array.
2427 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2428 if (!allocation.To(&obj)) return false;
2430 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2432 // Fix the instance_descriptors for the existing maps.
2433 meta_map()->set_code_cache(empty_fixed_array());
2434 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2435 meta_map()->init_back_pointer(undefined_value());
2436 meta_map()->set_instance_descriptors(empty_descriptor_array());
2438 fixed_array_map()->set_code_cache(empty_fixed_array());
2439 fixed_array_map()->set_dependent_code(
2440 DependentCode::cast(empty_fixed_array()));
2441 fixed_array_map()->init_back_pointer(undefined_value());
2442 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2444 undefined_map()->set_code_cache(empty_fixed_array());
2445 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2446 undefined_map()->init_back_pointer(undefined_value());
2447 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2449 null_map()->set_code_cache(empty_fixed_array());
2450 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2451 null_map()->init_back_pointer(undefined_value());
2452 null_map()->set_instance_descriptors(empty_descriptor_array());
2454 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2455 constant_pool_array_map()->set_dependent_code(
2456 DependentCode::cast(empty_fixed_array()));
2457 constant_pool_array_map()->init_back_pointer(undefined_value());
2458 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2460 // Fix prototype object for existing maps.
2461 meta_map()->set_prototype(null_value());
2462 meta_map()->set_constructor(null_value());
2464 fixed_array_map()->set_prototype(null_value());
2465 fixed_array_map()->set_constructor(null_value());
2467 undefined_map()->set_prototype(null_value());
2468 undefined_map()->set_constructor(null_value());
2470 null_map()->set_prototype(null_value());
2471 null_map()->set_constructor(null_value());
2473 constant_pool_array_map()->set_prototype(null_value());
2474 constant_pool_array_map()->set_constructor(null_value());
2477 #define ALLOCATE_MAP(instance_type, size, field_name) \
2480 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2481 set_##field_name##_map(map); \
2484 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2485 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2487 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2488 DCHECK(fixed_array_map() != fixed_cow_array_map());
2490 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2491 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2492 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2493 mutable_heap_number)
2494 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2495 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2497 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2498 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2503 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2505 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2506 const StringTypeTable& entry = string_type_table[i];
2508 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2509 if (!allocation.To(&obj)) return false;
2511 // Mark cons string maps as unstable, because their objects can change
2513 Map* map = Map::cast(obj);
2514 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2515 roots_[entry.index] = map;
2518 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2519 undetectable_string_map()->set_is_undetectable();
2521 ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2522 undetectable_one_byte_string_map()->set_is_undetectable();
2524 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2525 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2526 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2528 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2529 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2530 external_##type##_array)
2532 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2533 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2535 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2538 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2539 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2541 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2543 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2545 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2546 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2547 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2548 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2551 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2552 const StructTable& entry = struct_table[i];
2554 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2555 roots_[entry.index] = map;
2558 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569 native_context_map()->set_dictionary_map(true);
2570 native_context_map()->set_visitor_id(
2571 StaticVisitorBase::kVisitNativeContext);
2573 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574 shared_function_info)
2576 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2577 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2578 external_map()->set_is_extensible(false);
2579 #undef ALLOCATE_VARSIZE_MAP
2585 ByteArray* byte_array;
2586 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2587 set_empty_byte_array(byte_array);
2590 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2592 ExternalArray* obj; \
2593 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2595 set_empty_external_##type##_array(obj); \
2598 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2603 FixedTypedArrayBase* obj; \
2604 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2606 set_empty_fixed_##type##_array(obj); \
2609 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2610 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2612 DCHECK(!InNewSpace(empty_fixed_array()));
2617 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2618 PretenureFlag pretenure) {
2619 // Statically ensure that it is safe to allocate heap numbers in paged
2621 int size = HeapNumber::kSize;
2622 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2624 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2628 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2629 if (!allocation.To(&result)) return allocation;
2632 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2633 HeapObject::cast(result)->set_map_no_write_barrier(map);
2634 HeapNumber::cast(result)->set_value(value);
2639 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2640 V(Float32x4, float32x4) \
2641 V(Float64x2, float64x2) \
2645 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
2646 AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
2647 PretenureFlag pretenure) { \
2648 STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
2650 AllocationSpace space = \
2651 SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
2653 HeapObject* result; \
2654 { AllocationResult allocation = \
2655 AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
2656 if (!allocation.To(&result)) return allocation; \
2659 result->set_map_no_write_barrier( \
2660 isolate()->native_context()->type##_function()->initial_map()); \
2661 JSObject::cast(result)->set_properties(empty_fixed_array()); \
2662 JSObject::cast(result)->set_elements(empty_fixed_array()); \
2664 HeapObject* storage; \
2665 int storage_size = \
2666 FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
2667 space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
2668 { AllocationResult allocation = \
2669 AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
2670 if (!allocation.To(&storage)) return allocation; \
2674 *isolate()->factory()->fixed_##type##_array_map()); \
2675 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2676 elements->set_length(static_cast<int>(1)); \
2677 memset(elements->DataPtr(), 0, elements->DataSize()); \
2678 Fixed##TYPE##Array::cast(storage)->set(0, value); \
2679 TYPE::cast(result)->set_value(storage); \
2684 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2687 AllocationResult Heap::AllocateCell(Object* value) {
2688 int size = Cell::kSize;
2689 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2693 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2694 if (!allocation.To(&result)) return allocation;
2696 result->set_map_no_write_barrier(cell_map());
2697 Cell::cast(result)->set_value(value);
2702 AllocationResult Heap::AllocatePropertyCell() {
2703 int size = PropertyCell::kSize;
2704 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2707 AllocationResult allocation =
2708 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2709 if (!allocation.To(&result)) return allocation;
2711 result->set_map_no_write_barrier(global_property_cell_map());
2712 PropertyCell* cell = PropertyCell::cast(result);
2713 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2714 SKIP_WRITE_BARRIER);
2715 cell->set_value(the_hole_value());
2716 cell->set_type(HeapType::None());
2721 void Heap::CreateApiObjects() {
2722 HandleScope scope(isolate());
2723 Factory* factory = isolate()->factory();
2724 Handle<Map> new_neander_map =
2725 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2727 // Don't use Smi-only elements optimizations for objects with the neander
2728 // map. There are too many cases where element values are set directly with a
2729 // bottleneck to trap the Smi-only -> fast elements transition, and there
2730 // appears to be no benefit for optimize this case.
2731 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2732 set_neander_map(*new_neander_map);
2734 Handle<JSObject> listeners = factory->NewNeanderObject();
2735 Handle<FixedArray> elements = factory->NewFixedArray(2);
2736 elements->set(0, Smi::FromInt(0));
2737 listeners->set_elements(*elements);
2738 set_message_listeners(*listeners);
2742 void Heap::CreateJSEntryStub() {
2743 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2744 set_js_entry_code(*stub.GetCode());
2748 void Heap::CreateJSConstructEntryStub() {
2749 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2750 set_js_construct_entry_code(*stub.GetCode());
2754 void Heap::CreateFixedStubs() {
2755 // Here we create roots for fixed stubs. They are needed at GC
2756 // for cooking and uncooking (check out frames.cc).
2757 // The eliminates the need for doing dictionary lookup in the
2758 // stub cache for these stubs.
2759 HandleScope scope(isolate());
2761 // Create stubs that should be there, so we don't unexpectedly have to
2762 // create them if we need them during the creation of another stub.
2763 // Stub creation mixes raw pointers and handles in an unsafe manner so
2764 // we cannot create stubs while we are creating stubs.
2765 CodeStub::GenerateStubsAheadOfTime(isolate());
2767 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2768 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2771 // gcc-4.4 has problem generating correct code of following snippet:
2772 // { JSEntryStub stub;
2773 // js_entry_code_ = *stub.GetCode();
2775 // { JSConstructEntryStub stub;
2776 // js_construct_entry_code_ = *stub.GetCode();
2778 // To workaround the problem, make separate functions without inlining.
2779 Heap::CreateJSEntryStub();
2780 Heap::CreateJSConstructEntryStub();
2784 void Heap::CreateInitialObjects() {
2785 HandleScope scope(isolate());
2786 Factory* factory = isolate()->factory();
2788 // The -0 value must be set before NewNumber works.
2789 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2790 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2793 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2794 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2796 // The hole has not been created yet, but we want to put something
2797 // predictable in the gaps in the string table, so lets make that Smi zero.
2798 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2800 // Allocate initial string table.
2801 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2803 // Finish initializing oddballs after creating the string table.
2804 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2805 factory->nan_value(), Oddball::kUndefined);
2807 // Initialize the null_value.
2808 Oddball::Initialize(isolate(), factory->null_value(), "null",
2809 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2811 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2812 handle(Smi::FromInt(1), isolate()),
2815 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2816 handle(Smi::FromInt(0), isolate()),
2819 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2820 handle(Smi::FromInt(-1), isolate()),
2821 Oddball::kTheHole));
2823 set_uninitialized_value(*factory->NewOddball(
2824 factory->uninitialized_map(), "uninitialized",
2825 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2827 set_arguments_marker(*factory->NewOddball(
2828 factory->arguments_marker_map(), "arguments_marker",
2829 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2831 set_no_interceptor_result_sentinel(*factory->NewOddball(
2832 factory->no_interceptor_result_sentinel_map(),
2833 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2836 set_termination_exception(*factory->NewOddball(
2837 factory->termination_exception_map(), "termination_exception",
2838 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2840 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2841 handle(Smi::FromInt(-5), isolate()),
2842 Oddball::kException));
2844 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2845 Handle<String> str =
2846 factory->InternalizeUtf8String(constant_string_table[i].contents);
2847 roots_[constant_string_table[i].index] = *str;
2850 // Allocate the hidden string which is used to identify the hidden properties
2851 // in JSObjects. The hash code has a special value so that it will not match
2852 // the empty string when searching for the property. It cannot be part of the
2853 // loop above because it needs to be allocated manually with the special
2854 // hash code in place. The hash code for the hidden_string is zero to ensure
2855 // that it will always be at the first entry in property descriptors.
2856 hidden_string_ = *factory->NewOneByteInternalizedString(
2857 OneByteVector("", 0), String::kEmptyStringHash);
2859 // Create the code_stubs dictionary. The initial size is set to avoid
2860 // expanding the dictionary during bootstrapping.
2861 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2863 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2864 // is set to avoid expanding the dictionary during bootstrapping.
2865 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2867 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2868 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2870 set_instanceof_cache_function(Smi::FromInt(0));
2871 set_instanceof_cache_map(Smi::FromInt(0));
2872 set_instanceof_cache_answer(Smi::FromInt(0));
2876 // Allocate the dictionary of intrinsic function names.
2877 Handle<NameDictionary> intrinsic_names =
2878 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2879 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2880 set_intrinsic_function_names(*intrinsic_names);
2882 set_number_string_cache(
2883 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2885 // Allocate cache for single character one byte strings.
2886 set_single_character_string_cache(
2887 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2889 // Allocate cache for string split and regexp-multiple.
2890 set_string_split_cache(*factory->NewFixedArray(
2891 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2892 set_regexp_multiple_cache(*factory->NewFixedArray(
2893 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2895 // Allocate cache for external strings pointing to native source code.
2896 set_natives_source_cache(
2897 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2899 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2901 // The symbol registry is initialized lazily.
2902 set_symbol_registry(undefined_value());
2904 // Allocate object to hold object observation state.
2905 set_observation_state(*factory->NewJSObjectFromMap(
2906 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2908 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2909 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2910 set_microtask_queue(empty_fixed_array());
2912 set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2913 set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2914 set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2915 set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2916 set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2917 set_generic_symbol(*factory->NewPrivateOwnSymbol());
2918 set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2919 set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2920 set_observed_symbol(*factory->NewPrivateOwnSymbol());
2921 set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2922 set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2923 set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2925 Handle<SeededNumberDictionary> slow_element_dictionary =
2926 SeededNumberDictionary::New(isolate(), 0, TENURED);
2927 slow_element_dictionary->set_requires_slow_elements();
2928 set_empty_slow_element_dictionary(*slow_element_dictionary);
2930 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2932 // Handling of script id generation is in Factory::NewScript.
2933 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2935 set_allocation_sites_scratchpad(
2936 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2937 InitializeAllocationSitesScratchpad();
2939 // Initialize keyed lookup cache.
2940 isolate_->keyed_lookup_cache()->Clear();
2942 // Initialize context slot cache.
2943 isolate_->context_slot_cache()->Clear();
2945 // Initialize descriptor cache.
2946 isolate_->descriptor_lookup_cache()->Clear();
2948 // Initialize compilation cache.
2949 isolate_->compilation_cache()->Clear();
2953 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2954 RootListIndex writable_roots[] = {
2955 kStoreBufferTopRootIndex,
2956 kStackLimitRootIndex,
2957 kNumberStringCacheRootIndex,
2958 kInstanceofCacheFunctionRootIndex,
2959 kInstanceofCacheMapRootIndex,
2960 kInstanceofCacheAnswerRootIndex,
2961 kCodeStubsRootIndex,
2962 kNonMonomorphicCacheRootIndex,
2963 kPolymorphicCodeCacheRootIndex,
2964 kLastScriptIdRootIndex,
2965 kEmptyScriptRootIndex,
2966 kRealStackLimitRootIndex,
2967 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2968 kConstructStubDeoptPCOffsetRootIndex,
2969 kGetterStubDeoptPCOffsetRootIndex,
2970 kSetterStubDeoptPCOffsetRootIndex,
2971 kStringTableRootIndex,
2974 for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2975 if (root_index == writable_roots[i]) return true;
2981 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2982 return !RootCanBeWrittenAfterInitialization(root_index) &&
2983 !InNewSpace(roots_array_start()[root_index]);
2987 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2988 Object* key_pattern, ResultsCacheType type) {
2990 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2991 if (type == STRING_SPLIT_SUBSTRINGS) {
2992 DCHECK(key_pattern->IsString());
2993 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2994 cache = heap->string_split_cache();
2996 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2997 DCHECK(key_pattern->IsFixedArray());
2998 cache = heap->regexp_multiple_cache();
3001 uint32_t hash = key_string->Hash();
3002 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3003 ~(kArrayEntriesPerCacheEntry - 1));
3004 if (cache->get(index + kStringOffset) == key_string &&
3005 cache->get(index + kPatternOffset) == key_pattern) {
3006 return cache->get(index + kArrayOffset);
3009 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3010 if (cache->get(index + kStringOffset) == key_string &&
3011 cache->get(index + kPatternOffset) == key_pattern) {
3012 return cache->get(index + kArrayOffset);
3014 return Smi::FromInt(0);
3018 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3019 Handle<Object> key_pattern,
3020 Handle<FixedArray> value_array,
3021 ResultsCacheType type) {
3022 Factory* factory = isolate->factory();
3023 Handle<FixedArray> cache;
3024 if (!key_string->IsInternalizedString()) return;
3025 if (type == STRING_SPLIT_SUBSTRINGS) {
3026 DCHECK(key_pattern->IsString());
3027 if (!key_pattern->IsInternalizedString()) return;
3028 cache = factory->string_split_cache();
3030 DCHECK(type == REGEXP_MULTIPLE_INDICES);
3031 DCHECK(key_pattern->IsFixedArray());
3032 cache = factory->regexp_multiple_cache();
3035 uint32_t hash = key_string->Hash();
3036 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3037 ~(kArrayEntriesPerCacheEntry - 1));
3038 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3039 cache->set(index + kStringOffset, *key_string);
3040 cache->set(index + kPatternOffset, *key_pattern);
3041 cache->set(index + kArrayOffset, *value_array);
3044 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3045 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3046 cache->set(index2 + kStringOffset, *key_string);
3047 cache->set(index2 + kPatternOffset, *key_pattern);
3048 cache->set(index2 + kArrayOffset, *value_array);
3050 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3051 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3052 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3053 cache->set(index + kStringOffset, *key_string);
3054 cache->set(index + kPatternOffset, *key_pattern);
3055 cache->set(index + kArrayOffset, *value_array);
3058 // If the array is a reasonably short list of substrings, convert it into a
3059 // list of internalized strings.
3060 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3061 for (int i = 0; i < value_array->length(); i++) {
3062 Handle<String> str(String::cast(value_array->get(i)), isolate);
3063 Handle<String> internalized_str = factory->InternalizeString(str);
3064 value_array->set(i, *internalized_str);
3067 // Convert backing store to a copy-on-write array.
3068 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3072 void RegExpResultsCache::Clear(FixedArray* cache) {
3073 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3074 cache->set(i, Smi::FromInt(0));
3079 int Heap::FullSizeNumberStringCacheLength() {
3080 // Compute the size of the number string cache based on the max newspace size.
3081 // The number string cache has a minimum size based on twice the initial cache
3082 // size to ensure that it is bigger after being made 'full size'.
3083 int number_string_cache_size = max_semi_space_size_ / 512;
3084 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3085 Min(0x4000, number_string_cache_size));
3086 // There is a string and a number per entry so the length is twice the number
3088 return number_string_cache_size * 2;
3092 void Heap::FlushNumberStringCache() {
3093 // Flush the number to string cache.
3094 int len = number_string_cache()->length();
3095 for (int i = 0; i < len; i++) {
3096 number_string_cache()->set_undefined(i);
3101 void Heap::FlushAllocationSitesScratchpad() {
3102 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3103 allocation_sites_scratchpad()->set_undefined(i);
3105 allocation_sites_scratchpad_length_ = 0;
3109 void Heap::InitializeAllocationSitesScratchpad() {
3110 DCHECK(allocation_sites_scratchpad()->length() ==
3111 kAllocationSiteScratchpadSize);
3112 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3113 allocation_sites_scratchpad()->set_undefined(i);
3118 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3119 ScratchpadSlotMode mode) {
3120 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3121 // We cannot use the normal write-barrier because slots need to be
3122 // recorded with non-incremental marking as well. We have to explicitly
3123 // record the slot to take evacuation candidates into account.
3124 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3125 site, SKIP_WRITE_BARRIER);
3126 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3127 allocation_sites_scratchpad_length_);
3129 if (mode == RECORD_SCRATCHPAD_SLOT) {
3130 // We need to allow slots buffer overflow here since the evacuation
3131 // candidates are not part of the global list of old space pages and
3132 // releasing an evacuation candidate due to a slots buffer overflow
3133 // results in lost pages.
3134 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3135 SlotsBuffer::IGNORE_OVERFLOW);
3137 allocation_sites_scratchpad_length_++;
3142 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3143 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3147 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3148 ExternalArrayType array_type) {
3149 switch (array_type) {
3150 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3151 case kExternal##Type##Array: \
3152 return kExternal##Type##ArrayMapRootIndex;
3154 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3155 #undef ARRAY_TYPE_TO_ROOT_INDEX
3159 return kUndefinedValueRootIndex;
3164 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3165 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3169 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3170 ExternalArrayType array_type) {
3171 switch (array_type) {
3172 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3173 case kExternal##Type##Array: \
3174 return kFixed##Type##ArrayMapRootIndex;
3176 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3177 #undef ARRAY_TYPE_TO_ROOT_INDEX
3181 return kUndefinedValueRootIndex;
3186 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3187 ElementsKind elementsKind) {
3188 switch (elementsKind) {
3189 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3190 case EXTERNAL_##TYPE##_ELEMENTS: \
3191 return kEmptyExternal##Type##ArrayRootIndex;
3193 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3194 #undef ELEMENT_KIND_TO_ROOT_INDEX
3198 return kUndefinedValueRootIndex;
3203 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3204 ElementsKind elementsKind) {
3205 switch (elementsKind) {
3206 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3207 case TYPE##_ELEMENTS: \
3208 return kEmptyFixed##Type##ArrayRootIndex;
3210 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3211 #undef ELEMENT_KIND_TO_ROOT_INDEX
3214 return kUndefinedValueRootIndex;
3219 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3220 return ExternalArray::cast(
3221 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3225 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3226 return FixedTypedArrayBase::cast(
3227 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3231 AllocationResult Heap::AllocateForeign(Address address,
3232 PretenureFlag pretenure) {
3233 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3234 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3235 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3237 AllocationResult allocation = Allocate(foreign_map(), space);
3238 if (!allocation.To(&result)) return allocation;
3239 result->set_foreign_address(address);
3244 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3245 if (length < 0 || length > ByteArray::kMaxLength) {
3246 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3248 int size = ByteArray::SizeFor(length);
3249 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3252 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3253 if (!allocation.To(&result)) return allocation;
3256 result->set_map_no_write_barrier(byte_array_map());
3257 ByteArray::cast(result)->set_length(length);
3262 void Heap::CreateFillerObjectAt(Address addr, int size) {
3263 if (size == 0) return;
3264 HeapObject* filler = HeapObject::FromAddress(addr);
3265 if (size == kPointerSize) {
3266 filler->set_map_no_write_barrier(one_pointer_filler_map());
3267 } else if (size == 2 * kPointerSize) {
3268 filler->set_map_no_write_barrier(two_pointer_filler_map());
3270 filler->set_map_no_write_barrier(free_space_map());
3271 FreeSpace::cast(filler)->set_size(size);
3276 bool Heap::CanMoveObjectStart(HeapObject* object) {
3277 Address address = object->address();
3278 bool is_in_old_pointer_space = InOldPointerSpace(address);
3279 bool is_in_old_data_space = InOldDataSpace(address);
3281 if (lo_space()->Contains(object)) return false;
3283 Page* page = Page::FromAddress(address);
3284 // We can move the object start if:
3285 // (1) the object is not in old pointer or old data space,
3286 // (2) the page of the object was already swept,
3287 // (3) the page was already concurrently swept. This case is an optimization
3288 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3289 // pages is set after sweeping all pages.
3290 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3291 page->WasSwept() || page->SweepingCompleted();
3295 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3296 if (incremental_marking()->IsMarking() &&
3297 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3298 if (mode == FROM_GC) {
3299 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3301 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3307 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3308 int elements_to_trim) {
3309 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3310 const int bytes_to_trim = elements_to_trim * element_size;
3311 Map* map = object->map();
3313 // For now this trick is only applied to objects in new and paged space.
3314 // In large object space the object's start must coincide with chunk
3315 // and thus the trick is just not applicable.
3316 DCHECK(!lo_space()->Contains(object));
3317 DCHECK(object->map() != fixed_cow_array_map());
3319 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3320 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3321 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3323 const int len = object->length();
3324 DCHECK(elements_to_trim <= len);
3326 // Calculate location of new array start.
3327 Address new_start = object->address() + bytes_to_trim;
3329 // Technically in new space this write might be omitted (except for
3330 // debug mode which iterates through the heap), but to play safer
3332 CreateFillerObjectAt(object->address(), bytes_to_trim);
3334 // Initialize header of the trimmed array. Since left trimming is only
3335 // performed on pages which are not concurrently swept creating a filler
3336 // object does not require synchronization.
3337 DCHECK(CanMoveObjectStart(object));
3338 Object** former_start = HeapObject::RawField(object, 0);
3339 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3340 former_start[new_start_index] = map;
3341 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3342 FixedArrayBase* new_object =
3343 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3345 // Maintain consistency of live bytes during incremental marking
3346 marking()->TransferMark(object->address(), new_start);
3347 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3349 // Notify the heap profiler of change in object layout.
3350 OnMoveEvent(new_object, object, new_object->Size());
3355 // Force instantiation of templatized method.
3357 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3359 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3362 template<Heap::InvocationMode mode>
3363 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3364 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3365 const int bytes_to_trim = elements_to_trim * element_size;
3367 // For now this trick is only applied to objects in new and paged space.
3368 DCHECK(object->map() != fixed_cow_array_map());
3370 const int len = object->length();
3371 DCHECK(elements_to_trim < len);
3373 // Calculate location of new array end.
3374 Address new_end = object->address() + object->Size() - bytes_to_trim;
3376 // Technically in new space this write might be omitted (except for
3377 // debug mode which iterates through the heap), but to play safer
3379 // We do not create a filler for objects in large object space.
3380 // TODO(hpayer): We should shrink the large object page if the size
3381 // of the object changed significantly.
3382 if (!lo_space()->Contains(object)) {
3383 CreateFillerObjectAt(new_end, bytes_to_trim);
3386 // Initialize header of the trimmed array. We are storing the new length
3387 // using release store after creating a filler for the left-over space to
3388 // avoid races with the sweeper thread.
3389 object->synchronized_set_length(len - elements_to_trim);
3391 // Maintain consistency of live bytes during incremental marking
3392 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3394 // Notify the heap profiler of change in object layout. The array may not be
3395 // moved during GC, and size has to be adjusted nevertheless.
3396 HeapProfiler* profiler = isolate()->heap_profiler();
3397 if (profiler->is_tracking_allocations()) {
3398 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3403 AllocationResult Heap::AllocateExternalArray(int length,
3404 ExternalArrayType array_type,
3405 void* external_pointer,
3406 PretenureFlag pretenure) {
3407 int size = ExternalArray::kAlignedSize;
3408 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3411 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3412 if (!allocation.To(&result)) return allocation;
3415 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3416 ExternalArray::cast(result)->set_length(length);
3417 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3421 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3422 ElementsKind* element_kind) {
3423 switch (array_type) {
3424 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3425 case kExternal##Type##Array: \
3426 *element_size = size; \
3427 *element_kind = TYPE##_ELEMENTS; \
3430 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3431 #undef TYPED_ARRAY_CASE
3434 *element_size = 0; // Bogus
3435 *element_kind = UINT8_ELEMENTS; // Bogus
3441 AllocationResult Heap::AllocateFixedTypedArray(int length,
3442 ExternalArrayType array_type,
3443 PretenureFlag pretenure) {
3445 ElementsKind elements_kind;
3446 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3447 int size = OBJECT_POINTER_ALIGN(length * element_size +
3448 FixedTypedArrayBase::kDataOffset);
3449 #ifndef V8_HOST_ARCH_64_BIT
3450 if (array_type == kExternalFloat64Array) {
3451 size += kPointerSize;
3454 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3457 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3458 if (!allocation.To(&object)) return allocation;
3460 if (array_type == kExternalFloat64Array) {
3461 object = EnsureDoubleAligned(this, object, size);
3464 object->set_map(MapForFixedTypedArray(array_type));
3465 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3466 elements->set_length(length);
3467 memset(elements->DataPtr(), 0, elements->DataSize());
3472 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3473 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3474 AllocationResult allocation =
3475 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3478 if (!allocation.To(&result)) return allocation;
3481 Address address = result->address();
3482 // Code objects which should stay at a fixed address are allocated either
3483 // in the first page of code space (objects on the first page of each space
3484 // are never moved) or in large object space.
3485 if (!code_space_->FirstPage()->Contains(address) &&
3486 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3487 // Discard the first code allocation, which was on a page where it could
3489 CreateFillerObjectAt(result->address(), object_size);
3490 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3491 if (!allocation.To(&result)) return allocation;
3492 OnAllocationEvent(result, object_size);
3496 result->set_map_no_write_barrier(code_map());
3497 Code* code = Code::cast(result);
3498 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3499 isolate_->code_range()->contains(code->address()));
3500 code->set_gc_metadata(Smi::FromInt(0));
3501 code->set_ic_age(global_ic_age_);
3506 AllocationResult Heap::CopyCode(Code* code) {
3507 AllocationResult allocation;
3508 HeapObject* new_constant_pool;
3509 if (FLAG_enable_ool_constant_pool &&
3510 code->constant_pool() != empty_constant_pool_array()) {
3511 // Copy the constant pool, since edits to the copied code may modify
3512 // the constant pool.
3513 allocation = CopyConstantPoolArray(code->constant_pool());
3514 if (!allocation.To(&new_constant_pool)) return allocation;
3516 new_constant_pool = empty_constant_pool_array();
3520 // Allocate an object the same size as the code object.
3521 int obj_size = code->Size();
3522 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3523 if (!allocation.To(&result)) return allocation;
3525 // Copy code object.
3526 Address old_addr = code->address();
3527 Address new_addr = result->address();
3528 CopyBlock(new_addr, old_addr, obj_size);
3529 Code* new_code = Code::cast(result);
3531 // Update the constant pool.
3532 new_code->set_constant_pool(new_constant_pool);
3534 // Relocate the copy.
3535 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3536 isolate_->code_range()->contains(code->address()));
3537 new_code->Relocate(new_addr - old_addr);
3542 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3543 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3544 // do not risk leaving uninitialized Code object (and breaking the heap).
3545 ByteArray* reloc_info_array;
3547 AllocationResult allocation =
3548 AllocateByteArray(reloc_info.length(), TENURED);
3549 if (!allocation.To(&reloc_info_array)) return allocation;
3551 HeapObject* new_constant_pool;
3552 if (FLAG_enable_ool_constant_pool &&
3553 code->constant_pool() != empty_constant_pool_array()) {
3554 // Copy the constant pool, since edits to the copied code may modify
3555 // the constant pool.
3556 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3557 if (!allocation.To(&new_constant_pool)) return allocation;
3559 new_constant_pool = empty_constant_pool_array();
3562 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3564 int new_obj_size = Code::SizeFor(new_body_size);
3566 Address old_addr = code->address();
3568 size_t relocation_offset =
3569 static_cast<size_t>(code->instruction_end() - old_addr);
3572 AllocationResult allocation =
3573 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3574 if (!allocation.To(&result)) return allocation;
3576 // Copy code object.
3577 Address new_addr = result->address();
3579 // Copy header and instructions.
3580 CopyBytes(new_addr, old_addr, relocation_offset);
3582 Code* new_code = Code::cast(result);
3583 new_code->set_relocation_info(reloc_info_array);
3585 // Update constant pool.
3586 new_code->set_constant_pool(new_constant_pool);
3588 // Copy patched rinfo.
3589 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3590 static_cast<size_t>(reloc_info.length()));
3592 // Relocate the copy.
3593 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3594 isolate_->code_range()->contains(code->address()));
3595 new_code->Relocate(new_addr - old_addr);
3598 if (FLAG_verify_heap) code->ObjectVerify();
3604 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3605 AllocationSite* allocation_site) {
3606 memento->set_map_no_write_barrier(allocation_memento_map());
3607 DCHECK(allocation_site->map() == allocation_site_map());
3608 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3609 if (FLAG_allocation_site_pretenuring) {
3610 allocation_site->IncrementMementoCreateCount();
3615 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3616 AllocationSite* allocation_site) {
3617 DCHECK(gc_state_ == NOT_IN_GC);
3618 DCHECK(map->instance_type() != MAP_TYPE);
3619 // If allocation failures are disallowed, we may allocate in a different
3620 // space when new space is full and the object is not a large object.
3621 AllocationSpace retry_space =
3622 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3623 int size = map->instance_size();
3624 if (allocation_site != NULL) {
3625 size += AllocationMemento::kSize;
3628 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3629 if (!allocation.To(&result)) return allocation;
3630 // No need for write barrier since object is white and map is in old space.
3631 result->set_map_no_write_barrier(map);
3632 if (allocation_site != NULL) {
3633 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3634 reinterpret_cast<Address>(result) + map->instance_size());
3635 InitializeAllocationMemento(alloc_memento, allocation_site);
3641 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3643 obj->set_properties(properties);
3644 obj->initialize_elements();
3645 // TODO(1240798): Initialize the object's body using valid initial values
3646 // according to the object's initial map. For example, if the map's
3647 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3648 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3649 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3650 // verification code has to cope with (temporarily) invalid objects. See
3651 // for example, JSArray::JSArrayVerify).
3653 // We cannot always fill with one_pointer_filler_map because objects
3654 // created from API functions expect their internal fields to be initialized
3655 // with undefined_value.
3656 // Pre-allocated fields need to be initialized with undefined_value as well
3657 // so that object accesses before the constructor completes (e.g. in the
3658 // debugger) will not cause a crash.
3659 if (map->constructor()->IsJSFunction() &&
3660 JSFunction::cast(map->constructor())
3661 ->IsInobjectSlackTrackingInProgress()) {
3662 // We might want to shrink the object later.
3663 DCHECK(obj->GetInternalFieldCount() == 0);
3664 filler = Heap::one_pointer_filler_map();
3666 filler = Heap::undefined_value();
3668 obj->InitializeBody(map, Heap::undefined_value(), filler);
3672 AllocationResult Heap::AllocateJSObjectFromMap(
3673 Map* map, PretenureFlag pretenure, bool allocate_properties,
3674 AllocationSite* allocation_site) {
3675 // JSFunctions should be allocated using AllocateFunction to be
3676 // properly initialized.
3677 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3679 // Both types of global objects should be allocated using
3680 // AllocateGlobalObject to be properly initialized.
3681 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3682 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3684 // Allocate the backing storage for the properties.
3685 FixedArray* properties;
3686 if (allocate_properties) {
3687 int prop_size = map->InitialPropertiesLength();
3688 DCHECK(prop_size >= 0);
3690 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3691 if (!allocation.To(&properties)) return allocation;
3694 properties = empty_fixed_array();
3697 // Allocate the JSObject.
3698 int size = map->instance_size();
3699 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3701 AllocationResult allocation = Allocate(map, space, allocation_site);
3702 if (!allocation.To(&js_obj)) return allocation;
3704 // Initialize the JSObject.
3705 InitializeJSObjectFromMap(js_obj, properties, map);
3706 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3707 js_obj->HasFixedTypedArrayElements());
3712 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3713 PretenureFlag pretenure,
3714 AllocationSite* allocation_site) {
3715 DCHECK(constructor->has_initial_map());
3717 // Allocate the object based on the constructors initial map.
3718 AllocationResult allocation = AllocateJSObjectFromMap(
3719 constructor->initial_map(), pretenure, true, allocation_site);
3721 // Make sure result is NOT a global object if valid.
3723 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3729 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3730 // Never used to copy functions. If functions need to be copied we
3731 // have to be careful to clear the literals array.
3732 SLOW_DCHECK(!source->IsJSFunction());
3735 Map* map = source->map();
3736 int object_size = map->instance_size();
3739 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3741 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3743 // If we're forced to always allocate, we use the general allocation
3744 // functions which may leave us with an object in old space.
3745 if (always_allocate()) {
3747 AllocationResult allocation =
3748 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3749 if (!allocation.To(&clone)) return allocation;
3751 Address clone_address = clone->address();
3752 CopyBlock(clone_address, source->address(), object_size);
3753 // Update write barrier for all fields that lie beyond the header.
3754 RecordWrites(clone_address, JSObject::kHeaderSize,
3755 (object_size - JSObject::kHeaderSize) / kPointerSize);
3757 wb_mode = SKIP_WRITE_BARRIER;
3760 int adjusted_object_size =
3761 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3762 AllocationResult allocation =
3763 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3764 if (!allocation.To(&clone)) return allocation;
3766 SLOW_DCHECK(InNewSpace(clone));
3767 // Since we know the clone is allocated in new space, we can copy
3768 // the contents without worrying about updating the write barrier.
3769 CopyBlock(clone->address(), source->address(), object_size);
3772 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3773 reinterpret_cast<Address>(clone) + object_size);
3774 InitializeAllocationMemento(alloc_memento, site);
3778 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3779 source->GetElementsKind());
3780 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3781 FixedArray* properties = FixedArray::cast(source->properties());
3782 // Update elements if necessary.
3783 if (elements->length() > 0) {
3784 FixedArrayBase* elem;
3786 AllocationResult allocation;
3787 if (elements->map() == fixed_cow_array_map()) {
3788 allocation = FixedArray::cast(elements);
3789 } else if (source->HasFastDoubleElements()) {
3790 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3792 allocation = CopyFixedArray(FixedArray::cast(elements));
3794 if (!allocation.To(&elem)) return allocation;
3796 JSObject::cast(clone)->set_elements(elem, wb_mode);
3798 // Update properties if necessary.
3799 if (properties->length() > 0) {
3802 AllocationResult allocation = CopyFixedArray(properties);
3803 if (!allocation.To(&prop)) return allocation;
3805 JSObject::cast(clone)->set_properties(prop, wb_mode);
3807 // Return the new clone.
3812 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3814 // Only works for one byte strings.
3815 DCHECK(vector.length() == len);
3816 MemCopy(chars, vector.start(), len);
3819 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3821 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3822 unsigned stream_length = vector.length();
3823 while (stream_length != 0) {
3824 unsigned consumed = 0;
3825 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3826 DCHECK(c != unibrow::Utf8::kBadChar);
3827 DCHECK(consumed <= stream_length);
3828 stream_length -= consumed;
3830 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3833 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3834 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3841 DCHECK(stream_length == 0);
3846 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3847 DCHECK(s->length() == len);
3848 String::WriteToFlat(s, chars, 0, len);
3852 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3853 DCHECK(s->length() == len);
3854 String::WriteToFlat(s, chars, 0, len);
3858 template <bool is_one_byte, typename T>
3859 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3860 uint32_t hash_field) {
3862 // Compute map and object size.
3866 DCHECK_LE(0, chars);
3867 DCHECK_GE(String::kMaxLength, chars);
3869 map = one_byte_internalized_string_map();
3870 size = SeqOneByteString::SizeFor(chars);
3872 map = internalized_string_map();
3873 size = SeqTwoByteString::SizeFor(chars);
3875 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3880 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3881 if (!allocation.To(&result)) return allocation;
3884 result->set_map_no_write_barrier(map);
3885 // Set length and hash fields of the allocated string.
3886 String* answer = String::cast(result);
3887 answer->set_length(chars);
3888 answer->set_hash_field(hash_field);
3890 DCHECK_EQ(size, answer->Size());
3893 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3895 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3901 // Need explicit instantiations.
3902 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3905 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3908 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3909 Vector<const char>, int, uint32_t);
3912 AllocationResult Heap::AllocateRawOneByteString(int length,
3913 PretenureFlag pretenure) {
3914 DCHECK_LE(0, length);
3915 DCHECK_GE(String::kMaxLength, length);
3916 int size = SeqOneByteString::SizeFor(length);
3917 DCHECK(size <= SeqOneByteString::kMaxSize);
3918 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3922 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3923 if (!allocation.To(&result)) return allocation;
3926 // Partially initialize the object.
3927 result->set_map_no_write_barrier(one_byte_string_map());
3928 String::cast(result)->set_length(length);
3929 String::cast(result)->set_hash_field(String::kEmptyHashField);
3930 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3936 AllocationResult Heap::AllocateRawTwoByteString(int length,
3937 PretenureFlag pretenure) {
3938 DCHECK_LE(0, length);
3939 DCHECK_GE(String::kMaxLength, length);
3940 int size = SeqTwoByteString::SizeFor(length);
3941 DCHECK(size <= SeqTwoByteString::kMaxSize);
3942 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3946 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3947 if (!allocation.To(&result)) return allocation;
3950 // Partially initialize the object.
3951 result->set_map_no_write_barrier(string_map());
3952 String::cast(result)->set_length(length);
3953 String::cast(result)->set_hash_field(String::kEmptyHashField);
3954 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3959 AllocationResult Heap::AllocateEmptyFixedArray() {
3960 int size = FixedArray::SizeFor(0);
3963 AllocationResult allocation =
3964 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3965 if (!allocation.To(&result)) return allocation;
3967 // Initialize the object.
3968 result->set_map_no_write_barrier(fixed_array_map());
3969 FixedArray::cast(result)->set_length(0);
3974 AllocationResult Heap::AllocateEmptyExternalArray(
3975 ExternalArrayType array_type) {
3976 return AllocateExternalArray(0, array_type, NULL, TENURED);
3980 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3981 if (!InNewSpace(src)) {
3985 int len = src->length();
3988 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3989 if (!allocation.To(&obj)) return allocation;
3991 obj->set_map_no_write_barrier(fixed_array_map());
3992 FixedArray* result = FixedArray::cast(obj);
3993 result->set_length(len);
3996 DisallowHeapAllocation no_gc;
3997 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3998 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4000 // TODO(mvstanton): The map is set twice because of protection against calling
4001 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4002 // we might then be able to remove this whole method.
4003 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4008 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4009 ExternalArrayType array_type) {
4010 return AllocateFixedTypedArray(0, array_type, TENURED);
4014 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4015 int len = src->length();
4018 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4019 if (!allocation.To(&obj)) return allocation;
4021 if (InNewSpace(obj)) {
4022 obj->set_map_no_write_barrier(map);
4023 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4024 FixedArray::SizeFor(len) - kPointerSize);
4027 obj->set_map_no_write_barrier(map);
4028 FixedArray* result = FixedArray::cast(obj);
4029 result->set_length(len);
4032 DisallowHeapAllocation no_gc;
4033 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4034 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4039 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4041 int len = src->length();
4044 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4045 if (!allocation.To(&obj)) return allocation;
4047 obj->set_map_no_write_barrier(map);
4048 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4049 src->address() + FixedDoubleArray::kLengthOffset,
4050 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4055 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4058 if (src->is_extended_layout()) {
4059 ConstantPoolArray::NumberOfEntries small(src,
4060 ConstantPoolArray::SMALL_SECTION);
4061 ConstantPoolArray::NumberOfEntries extended(
4062 src, ConstantPoolArray::EXTENDED_SECTION);
4063 AllocationResult allocation =
4064 AllocateExtendedConstantPoolArray(small, extended);
4065 if (!allocation.To(&obj)) return allocation;
4067 ConstantPoolArray::NumberOfEntries small(src,
4068 ConstantPoolArray::SMALL_SECTION);
4069 AllocationResult allocation = AllocateConstantPoolArray(small);
4070 if (!allocation.To(&obj)) return allocation;
4072 obj->set_map_no_write_barrier(map);
4073 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4074 src->address() + ConstantPoolArray::kFirstEntryOffset,
4075 src->size() - ConstantPoolArray::kFirstEntryOffset);
4080 AllocationResult Heap::AllocateRawFixedArray(int length,
4081 PretenureFlag pretenure) {
4082 if (length < 0 || length > FixedArray::kMaxLength) {
4083 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4085 int size = FixedArray::SizeFor(length);
4086 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4088 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4092 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4093 PretenureFlag pretenure,
4095 DCHECK(length >= 0);
4096 DCHECK(empty_fixed_array()->IsFixedArray());
4097 if (length == 0) return empty_fixed_array();
4099 DCHECK(!InNewSpace(filler));
4102 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4103 if (!allocation.To(&result)) return allocation;
4106 result->set_map_no_write_barrier(fixed_array_map());
4107 FixedArray* array = FixedArray::cast(result);
4108 array->set_length(length);
4109 MemsetPointer(array->data_start(), filler, length);
4114 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4115 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4119 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4120 if (length == 0) return empty_fixed_array();
4124 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4125 if (!allocation.To(&obj)) return allocation;
4128 obj->set_map_no_write_barrier(fixed_array_map());
4129 FixedArray::cast(obj)->set_length(length);
4134 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4135 int length, PretenureFlag pretenure) {
4136 if (length == 0) return empty_fixed_array();
4138 HeapObject* elements;
4139 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4140 if (!allocation.To(&elements)) return allocation;
4142 elements->set_map_no_write_barrier(fixed_double_array_map());
4143 FixedDoubleArray::cast(elements)->set_length(length);
4148 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4149 PretenureFlag pretenure) {
4150 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4151 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4153 int size = FixedDoubleArray::SizeFor(length);
4154 #ifndef V8_HOST_ARCH_64_BIT
4155 size += kPointerSize;
4157 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4161 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4162 if (!allocation.To(&object)) return allocation;
4165 return EnsureDoubleAligned(this, object, size);
4169 AllocationResult Heap::AllocateConstantPoolArray(
4170 const ConstantPoolArray::NumberOfEntries& small) {
4171 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4172 int size = ConstantPoolArray::SizeFor(small);
4173 #ifndef V8_HOST_ARCH_64_BIT
4174 size += kPointerSize;
4176 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4180 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4181 if (!allocation.To(&object)) return allocation;
4183 object = EnsureDoubleAligned(this, object, size);
4184 object->set_map_no_write_barrier(constant_pool_array_map());
4186 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4187 constant_pool->Init(small);
4188 constant_pool->ClearPtrEntries(isolate());
4189 return constant_pool;
4193 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4194 const ConstantPoolArray::NumberOfEntries& small,
4195 const ConstantPoolArray::NumberOfEntries& extended) {
4196 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4197 CHECK(extended.are_in_range(0, kMaxInt));
4198 int size = ConstantPoolArray::SizeForExtended(small, extended);
4199 #ifndef V8_HOST_ARCH_64_BIT
4200 size += kPointerSize;
4202 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4206 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4207 if (!allocation.To(&object)) return allocation;
4209 object = EnsureDoubleAligned(this, object, size);
4210 object->set_map_no_write_barrier(constant_pool_array_map());
4212 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4213 constant_pool->InitExtended(small, extended);
4214 constant_pool->ClearPtrEntries(isolate());
4215 return constant_pool;
4219 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4220 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4221 int size = ConstantPoolArray::SizeFor(small);
4224 AllocationResult allocation =
4225 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4226 if (!allocation.To(&result)) return allocation;
4228 result->set_map_no_write_barrier(constant_pool_array_map());
4229 ConstantPoolArray::cast(result)->Init(small);
4234 AllocationResult Heap::AllocateSymbol() {
4235 // Statically ensure that it is safe to allocate symbols in paged spaces.
4236 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4239 AllocationResult allocation =
4240 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4241 if (!allocation.To(&result)) return allocation;
4243 result->set_map_no_write_barrier(symbol_map());
4245 // Generate a random hash value.
4249 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4251 } while (hash == 0 && attempts < 30);
4252 if (hash == 0) hash = 1; // never return 0
4254 Symbol::cast(result)
4255 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4256 Symbol::cast(result)->set_name(undefined_value());
4257 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4259 DCHECK(!Symbol::cast(result)->is_private());
4264 AllocationResult Heap::AllocateStruct(InstanceType type) {
4267 #define MAKE_CASE(NAME, Name, name) \
4269 map = name##_map(); \
4271 STRUCT_LIST(MAKE_CASE)
4277 int size = map->instance_size();
4278 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4281 AllocationResult allocation = Allocate(map, space);
4282 if (!allocation.To(&result)) return allocation;
4284 result->InitializeBody(size);
4289 bool Heap::IsHeapIterable() {
4290 // TODO(hpayer): This function is not correct. Allocation folding in old
4291 // space breaks the iterability.
4292 return new_space_top_after_last_gc_ == new_space()->top();
4296 void Heap::MakeHeapIterable() {
4297 DCHECK(AllowHeapAllocation::IsAllowed());
4298 if (!IsHeapIterable()) {
4299 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4301 if (mark_compact_collector()->sweeping_in_progress()) {
4302 mark_compact_collector()->EnsureSweepingCompleted();
4304 DCHECK(IsHeapIterable());
4308 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4309 incremental_marking()->Step(step_size,
4310 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4312 if (incremental_marking()->IsComplete()) {
4313 bool uncommit = false;
4314 if (gc_count_at_last_idle_gc_ == gc_count_) {
4315 // No GC since the last full GC, the mutator is probably not active.
4316 isolate_->compilation_cache()->Clear();
4319 CollectAllGarbage(kReduceMemoryFootprintMask,
4320 "idle notification: finalize incremental");
4321 gc_idle_time_handler_.NotifyIdleMarkCompact();
4322 gc_count_at_last_idle_gc_ = gc_count_;
4324 new_space_.Shrink();
4325 UncommitFromSpace();
4331 bool Heap::WorthActivatingIncrementalMarking() {
4332 return incremental_marking()->IsStopped() &&
4333 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
4337 bool Heap::IdleNotification(int idle_time_in_ms) {
4338 // If incremental marking is off, we do not perform idle notification.
4339 if (!FLAG_incremental_marking) return true;
4340 base::ElapsedTimer timer;
4342 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4344 HistogramTimerScope idle_notification_scope(
4345 isolate_->counters()->gc_idle_notification());
4347 GCIdleTimeHandler::HeapState heap_state;
4348 heap_state.contexts_disposed = contexts_disposed_;
4349 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4350 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4351 // TODO(ulan): Start incremental marking only for large heaps.
4352 heap_state.can_start_incremental_marking =
4353 incremental_marking()->ShouldActivate();
4354 heap_state.sweeping_in_progress =
4355 mark_compact_collector()->sweeping_in_progress();
4356 heap_state.mark_compact_speed_in_bytes_per_ms =
4357 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4358 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4359 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4360 heap_state.scavenge_speed_in_bytes_per_ms =
4361 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4362 heap_state.available_new_space_memory = new_space_.Available();
4363 heap_state.new_space_capacity = new_space_.Capacity();
4364 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4365 static_cast<size_t>(
4366 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4368 GCIdleTimeAction action =
4369 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4371 bool result = false;
4372 switch (action.type) {
4376 case DO_INCREMENTAL_MARKING:
4377 if (incremental_marking()->IsStopped()) {
4378 incremental_marking()->Start();
4380 AdvanceIdleIncrementalMarking(action.parameter);
4383 HistogramTimerScope scope(isolate_->counters()->gc_context());
4384 const char* message = contexts_disposed_
4385 ? "idle notification: contexts disposed"
4386 : "idle notification: finalize idle round";
4387 CollectAllGarbage(kReduceMemoryFootprintMask, message);
4388 gc_idle_time_handler_.NotifyIdleMarkCompact();
4392 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4394 case DO_FINALIZE_SWEEPING:
4395 mark_compact_collector()->EnsureSweepingCompleted();
4401 int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4402 if (actual_time_ms <= idle_time_in_ms) {
4403 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4404 idle_time_in_ms - actual_time_ms);
4406 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4407 actual_time_ms - idle_time_in_ms);
4410 if (FLAG_trace_idle_notification) {
4411 PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4412 idle_time_in_ms, actual_time_ms);
4417 contexts_disposed_ = 0;
4424 void Heap::Print() {
4425 if (!HasBeenSetUp()) return;
4426 isolate()->PrintStack(stdout);
4427 AllSpaces spaces(this);
4428 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4434 void Heap::ReportCodeStatistics(const char* title) {
4435 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4436 PagedSpace::ResetCodeStatistics(isolate());
4437 // We do not look for code in new space, map space, or old space. If code
4438 // somehow ends up in those spaces, we would miss it here.
4439 code_space_->CollectCodeStatistics();
4440 lo_space_->CollectCodeStatistics();
4441 PagedSpace::ReportCodeStatistics(isolate());
4445 // This function expects that NewSpace's allocated objects histogram is
4446 // populated (via a call to CollectStatistics or else as a side effect of a
4447 // just-completed scavenge collection).
4448 void Heap::ReportHeapStatistics(const char* title) {
4450 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4452 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4453 old_generation_allocation_limit_);
4456 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4457 isolate_->global_handles()->PrintStats();
4460 PrintF("Heap statistics : ");
4461 isolate_->memory_allocator()->ReportStatistics();
4462 PrintF("To space : ");
4463 new_space_.ReportStatistics();
4464 PrintF("Old pointer space : ");
4465 old_pointer_space_->ReportStatistics();
4466 PrintF("Old data space : ");
4467 old_data_space_->ReportStatistics();
4468 PrintF("Code space : ");
4469 code_space_->ReportStatistics();
4470 PrintF("Map space : ");
4471 map_space_->ReportStatistics();
4472 PrintF("Cell space : ");
4473 cell_space_->ReportStatistics();
4474 PrintF("PropertyCell space : ");
4475 property_cell_space_->ReportStatistics();
4476 PrintF("Large object space : ");
4477 lo_space_->ReportStatistics();
4478 PrintF(">>>>>> ========================================= >>>>>>\n");
4483 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4486 bool Heap::Contains(Address addr) {
4487 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4488 return HasBeenSetUp() &&
4489 (new_space_.ToSpaceContains(addr) ||
4490 old_pointer_space_->Contains(addr) ||
4491 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4492 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4493 property_cell_space_->Contains(addr) ||
4494 lo_space_->SlowContains(addr));
4498 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4499 return InSpace(value->address(), space);
4503 bool Heap::InSpace(Address addr, AllocationSpace space) {
4504 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4505 if (!HasBeenSetUp()) return false;
4509 return new_space_.ToSpaceContains(addr);
4510 case OLD_POINTER_SPACE:
4511 return old_pointer_space_->Contains(addr);
4512 case OLD_DATA_SPACE:
4513 return old_data_space_->Contains(addr);
4515 return code_space_->Contains(addr);
4517 return map_space_->Contains(addr);
4519 return cell_space_->Contains(addr);
4520 case PROPERTY_CELL_SPACE:
4521 return property_cell_space_->Contains(addr);
4523 return lo_space_->SlowContains(addr);
4533 void Heap::Verify() {
4534 CHECK(HasBeenSetUp());
4535 HandleScope scope(isolate());
4537 store_buffer()->Verify();
4539 if (mark_compact_collector()->sweeping_in_progress()) {
4540 // We have to wait here for the sweeper threads to have an iterable heap.
4541 mark_compact_collector()->EnsureSweepingCompleted();
4544 VerifyPointersVisitor visitor;
4545 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4547 VerifySmisVisitor smis_visitor;
4548 IterateSmiRoots(&smis_visitor);
4550 new_space_.Verify();
4552 old_pointer_space_->Verify(&visitor);
4553 map_space_->Verify(&visitor);
4555 VerifyPointersVisitor no_dirty_regions_visitor;
4556 old_data_space_->Verify(&no_dirty_regions_visitor);
4557 code_space_->Verify(&no_dirty_regions_visitor);
4558 cell_space_->Verify(&no_dirty_regions_visitor);
4559 property_cell_space_->Verify(&no_dirty_regions_visitor);
4561 lo_space_->Verify();
4566 void Heap::ZapFromSpace() {
4567 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4568 new_space_.FromSpaceEnd());
4569 while (it.has_next()) {
4570 NewSpacePage* page = it.next();
4571 for (Address cursor = page->area_start(), limit = page->area_end();
4572 cursor < limit; cursor += kPointerSize) {
4573 Memory::Address_at(cursor) = kFromSpaceZapValue;
4579 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4580 ObjectSlotCallback callback) {
4581 Address slot_address = start;
4583 // We are not collecting slots on new space objects during mutation
4584 // thus we have to scan for pointers to evacuation candidates when we
4585 // promote objects. But we should not record any slots in non-black
4586 // objects. Grey object's slots would be rescanned.
4587 // White object might not survive until the end of collection
4588 // it would be a violation of the invariant to record it's slots.
4589 bool record_slots = false;
4590 if (incremental_marking()->IsCompacting()) {
4591 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4592 record_slots = Marking::IsBlack(mark_bit);
4595 while (slot_address < end) {
4596 Object** slot = reinterpret_cast<Object**>(slot_address);
4597 Object* object = *slot;
4598 // If the store buffer becomes overfull we mark pages as being exempt from
4599 // the store buffer. These pages are scanned to find pointers that point
4600 // to the new space. In that case we may hit newly promoted objects and
4601 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4602 if (object->IsHeapObject()) {
4603 if (Heap::InFromSpace(object)) {
4604 callback(reinterpret_cast<HeapObject**>(slot),
4605 HeapObject::cast(object));
4606 Object* new_object = *slot;
4607 if (InNewSpace(new_object)) {
4608 SLOW_DCHECK(Heap::InToSpace(new_object));
4609 SLOW_DCHECK(new_object->IsHeapObject());
4610 store_buffer_.EnterDirectlyIntoStoreBuffer(
4611 reinterpret_cast<Address>(slot));
4613 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4614 } else if (record_slots &&
4615 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4616 mark_compact_collector()->RecordSlot(slot, slot, object);
4619 slot_address += kPointerSize;
4625 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4628 bool IsAMapPointerAddress(Object** addr) {
4629 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4630 int mod = a % Map::kSize;
4631 return mod >= Map::kPointerFieldsBeginOffset &&
4632 mod < Map::kPointerFieldsEndOffset;
4636 bool EverythingsAPointer(Object** addr) { return true; }
4639 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4640 Object**** store_buffer_position,
4641 Object*** store_buffer_top,
4642 CheckStoreBufferFilter filter,
4643 Address special_garbage_start,
4644 Address special_garbage_end) {
4645 Map* free_space_map = heap->free_space_map();
4646 for (; current < limit; current++) {
4647 Object* o = *current;
4648 Address current_address = reinterpret_cast<Address>(current);
4650 if (o == free_space_map) {
4651 Address current_address = reinterpret_cast<Address>(current);
4652 FreeSpace* free_space =
4653 FreeSpace::cast(HeapObject::FromAddress(current_address));
4654 int skip = free_space->Size();
4655 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4657 current_address += skip - kPointerSize;
4658 current = reinterpret_cast<Object**>(current_address);
4661 // Skip the current linear allocation space between top and limit which is
4662 // unmarked with the free space map, but can contain junk.
4663 if (current_address == special_garbage_start &&
4664 special_garbage_end != special_garbage_start) {
4665 current_address = special_garbage_end - kPointerSize;
4666 current = reinterpret_cast<Object**>(current_address);
4669 if (!(*filter)(current)) continue;
4670 DCHECK(current_address < special_garbage_start ||
4671 current_address >= special_garbage_end);
4672 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4673 // We have to check that the pointer does not point into new space
4674 // without trying to cast it to a heap object since the hash field of
4675 // a string can contain values like 1 and 3 which are tagged null
4677 if (!heap->InNewSpace(o)) continue;
4678 while (**store_buffer_position < current &&
4679 *store_buffer_position < store_buffer_top) {
4680 (*store_buffer_position)++;
4682 if (**store_buffer_position != current ||
4683 *store_buffer_position == store_buffer_top) {
4684 Object** obj_start = current;
4685 while (!(*obj_start)->IsMap()) obj_start--;
4692 // Check that the store buffer contains all intergenerational pointers by
4693 // scanning a page and ensuring that all pointers to young space are in the
4695 void Heap::OldPointerSpaceCheckStoreBuffer() {
4696 OldSpace* space = old_pointer_space();
4697 PageIterator pages(space);
4699 store_buffer()->SortUniq();
4701 while (pages.has_next()) {
4702 Page* page = pages.next();
4703 Object** current = reinterpret_cast<Object**>(page->area_start());
4705 Address end = page->area_end();
4707 Object*** store_buffer_position = store_buffer()->Start();
4708 Object*** store_buffer_top = store_buffer()->Top();
4710 Object** limit = reinterpret_cast<Object**>(end);
4711 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4712 store_buffer_top, &EverythingsAPointer, space->top(),
4718 void Heap::MapSpaceCheckStoreBuffer() {
4719 MapSpace* space = map_space();
4720 PageIterator pages(space);
4722 store_buffer()->SortUniq();
4724 while (pages.has_next()) {
4725 Page* page = pages.next();
4726 Object** current = reinterpret_cast<Object**>(page->area_start());
4728 Address end = page->area_end();
4730 Object*** store_buffer_position = store_buffer()->Start();
4731 Object*** store_buffer_top = store_buffer()->Top();
4733 Object** limit = reinterpret_cast<Object**>(end);
4734 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4735 store_buffer_top, &IsAMapPointerAddress, space->top(),
4741 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4742 LargeObjectIterator it(lo_space());
4743 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4744 // We only have code, sequential strings, or fixed arrays in large
4745 // object space, and only fixed arrays can possibly contain pointers to
4746 // the young generation.
4747 if (object->IsFixedArray()) {
4748 Object*** store_buffer_position = store_buffer()->Start();
4749 Object*** store_buffer_top = store_buffer()->Top();
4750 Object** current = reinterpret_cast<Object**>(object->address());
4752 reinterpret_cast<Object**>(object->address() + object->Size());
4753 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4754 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4761 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4762 IterateStrongRoots(v, mode);
4763 IterateWeakRoots(v, mode);
4767 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4768 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4769 v->Synchronize(VisitorSynchronization::kStringTable);
4770 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4771 // Scavenge collections have special processing for this.
4772 external_string_table_.Iterate(v);
4774 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4778 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4779 // Acquire execution access since we are going to read stack limit values.
4780 ExecutionAccess access(isolate());
4781 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4782 v->Synchronize(VisitorSynchronization::kSmiRootList);
4786 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4787 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4788 v->Synchronize(VisitorSynchronization::kStrongRootList);
4790 v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4791 v->Synchronize(VisitorSynchronization::kInternalizedString);
4793 isolate_->bootstrapper()->Iterate(v);
4794 v->Synchronize(VisitorSynchronization::kBootstrapper);
4795 isolate_->Iterate(v);
4796 v->Synchronize(VisitorSynchronization::kTop);
4797 Relocatable::Iterate(isolate_, v);
4798 v->Synchronize(VisitorSynchronization::kRelocatable);
4800 if (isolate_->deoptimizer_data() != NULL) {
4801 isolate_->deoptimizer_data()->Iterate(v);
4803 v->Synchronize(VisitorSynchronization::kDebug);
4804 isolate_->compilation_cache()->Iterate(v);
4805 v->Synchronize(VisitorSynchronization::kCompilationCache);
4807 // Iterate over local handles in handle scopes.
4808 isolate_->handle_scope_implementer()->Iterate(v);
4809 isolate_->IterateDeferredHandles(v);
4810 v->Synchronize(VisitorSynchronization::kHandleScope);
4812 // Iterate over the builtin code objects and code stubs in the
4813 // heap. Note that it is not necessary to iterate over code objects
4814 // on scavenge collections.
4815 if (mode != VISIT_ALL_IN_SCAVENGE) {
4816 isolate_->builtins()->IterateBuiltins(v);
4818 v->Synchronize(VisitorSynchronization::kBuiltins);
4820 // Iterate over global handles.
4822 case VISIT_ONLY_STRONG:
4823 isolate_->global_handles()->IterateStrongRoots(v);
4825 case VISIT_ALL_IN_SCAVENGE:
4826 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4828 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4830 isolate_->global_handles()->IterateAllRoots(v);
4833 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4835 // Iterate over eternal handles.
4836 if (mode == VISIT_ALL_IN_SCAVENGE) {
4837 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4839 isolate_->eternal_handles()->IterateAllRoots(v);
4841 v->Synchronize(VisitorSynchronization::kEternalHandles);
4843 // Iterate over pointers being held by inactive threads.
4844 isolate_->thread_manager()->Iterate(v);
4845 v->Synchronize(VisitorSynchronization::kThreadManager);
4847 // Iterate over the pointers the Serialization/Deserialization code is
4849 // During garbage collection this keeps the partial snapshot cache alive.
4850 // During deserialization of the startup snapshot this creates the partial
4851 // snapshot cache and deserializes the objects it refers to. During
4852 // serialization this does nothing, since the partial snapshot cache is
4853 // empty. However the next thing we do is create the partial snapshot,
4854 // filling up the partial snapshot cache with objects it needs as we go.
4855 SerializerDeserializer::Iterate(isolate_, v);
4856 // We don't do a v->Synchronize call here, because in debug mode that will
4857 // output a flag to the snapshot. However at this point the serializer and
4858 // deserializer are deliberately a little unsynchronized (see above) so the
4859 // checking of the sync flag in the snapshot would fail.
4863 // TODO(1236194): Since the heap size is configurable on the command line
4864 // and through the API, we should gracefully handle the case that the heap
4865 // size is not big enough to fit all the initial objects.
4866 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4867 int max_executable_size, size_t code_range_size) {
4868 if (HasBeenSetUp()) return false;
4870 // Overwrite default configuration.
4871 if (max_semi_space_size > 0) {
4872 max_semi_space_size_ = max_semi_space_size * MB;
4874 if (max_old_space_size > 0) {
4875 max_old_generation_size_ = max_old_space_size * MB;
4877 if (max_executable_size > 0) {
4878 max_executable_size_ = max_executable_size * MB;
4881 // If max space size flags are specified overwrite the configuration.
4882 if (FLAG_max_semi_space_size > 0) {
4883 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4885 if (FLAG_max_old_space_size > 0) {
4886 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4888 if (FLAG_max_executable_size > 0) {
4889 max_executable_size_ = FLAG_max_executable_size * MB;
4892 if (FLAG_stress_compaction) {
4893 // This will cause more frequent GCs when stressing.
4894 max_semi_space_size_ = Page::kPageSize;
4897 if (Snapshot::HaveASnapshotToStartFrom()) {
4898 // If we are using a snapshot we always reserve the default amount
4899 // of memory for each semispace because code in the snapshot has
4900 // write-barrier code that relies on the size and alignment of new
4901 // space. We therefore cannot use a larger max semispace size
4902 // than the default reserved semispace size.
4903 if (max_semi_space_size_ > reserved_semispace_size_) {
4904 max_semi_space_size_ = reserved_semispace_size_;
4905 if (FLAG_trace_gc) {
4906 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4907 reserved_semispace_size_ >> 10);
4911 // If we are not using snapshots we reserve space for the actual
4912 // max semispace size.
4913 reserved_semispace_size_ = max_semi_space_size_;
4916 // The max executable size must be less than or equal to the max old
4918 if (max_executable_size_ > max_old_generation_size_) {
4919 max_executable_size_ = max_old_generation_size_;
4922 // The new space size must be a power of two to support single-bit testing
4924 max_semi_space_size_ =
4925 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4926 reserved_semispace_size_ =
4927 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4929 if (FLAG_min_semi_space_size > 0) {
4930 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4931 if (initial_semispace_size > max_semi_space_size_) {
4932 initial_semispace_size_ = max_semi_space_size_;
4933 if (FLAG_trace_gc) {
4935 "Min semi-space size cannot be more than the maximum"
4936 "semi-space size of %d MB\n",
4937 max_semi_space_size_);
4940 initial_semispace_size_ = initial_semispace_size;
4944 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4946 // The old generation is paged and needs at least one page for each space.
4947 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4948 max_old_generation_size_ =
4949 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4950 max_old_generation_size_);
4952 // We rely on being able to allocate new arrays in paged spaces.
4953 DCHECK(Page::kMaxRegularHeapObjectSize >=
4955 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4956 AllocationMemento::kSize));
4958 code_range_size_ = code_range_size * MB;
4965 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4968 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4969 *stats->start_marker = HeapStats::kStartMarker;
4970 *stats->end_marker = HeapStats::kEndMarker;
4971 *stats->new_space_size = new_space_.SizeAsInt();
4972 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4973 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4974 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4975 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4976 *stats->old_data_space_capacity = old_data_space_->Capacity();
4977 *stats->code_space_size = code_space_->SizeOfObjects();
4978 *stats->code_space_capacity = code_space_->Capacity();
4979 *stats->map_space_size = map_space_->SizeOfObjects();
4980 *stats->map_space_capacity = map_space_->Capacity();
4981 *stats->cell_space_size = cell_space_->SizeOfObjects();
4982 *stats->cell_space_capacity = cell_space_->Capacity();
4983 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4984 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4985 *stats->lo_space_size = lo_space_->Size();
4986 isolate_->global_handles()->RecordStats(stats);
4987 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4988 *stats->memory_allocator_capacity =
4989 isolate()->memory_allocator()->Size() +
4990 isolate()->memory_allocator()->Available();
4991 *stats->os_error = base::OS::GetLastError();
4992 isolate()->memory_allocator()->Available();
4993 if (take_snapshot) {
4994 HeapIterator iterator(this);
4995 for (HeapObject* obj = iterator.next(); obj != NULL;
4996 obj = iterator.next()) {
4997 InstanceType type = obj->map()->instance_type();
4998 DCHECK(0 <= type && type <= LAST_TYPE);
4999 stats->objects_per_type[type]++;
5000 stats->size_per_type[type] += obj->Size();
5006 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5007 return old_pointer_space_->SizeOfObjects() +
5008 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5009 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5010 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5014 int64_t Heap::PromotedExternalMemorySize() {
5015 if (amount_of_external_allocated_memory_ <=
5016 amount_of_external_allocated_memory_at_last_global_gc_)
5018 return amount_of_external_allocated_memory_ -
5019 amount_of_external_allocated_memory_at_last_global_gc_;
5023 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5024 int freed_global_handles) {
5025 const int kMaxHandles = 1000;
5026 const int kMinHandles = 100;
5027 double min_factor = 1.1;
5028 double max_factor = 4;
5029 // We set the old generation growing factor to 2 to grow the heap slower on
5030 // memory-constrained devices.
5031 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5034 // If there are many freed global handles, then the next full GC will
5035 // likely collect a lot of garbage. Choose the heap growing factor
5036 // depending on freed global handles.
5037 // TODO(ulan, hpayer): Take into account mutator utilization.
5039 if (freed_global_handles <= kMinHandles) {
5040 factor = max_factor;
5041 } else if (freed_global_handles >= kMaxHandles) {
5042 factor = min_factor;
5044 // Compute factor using linear interpolation between points
5045 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5046 factor = max_factor -
5047 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5048 (kMaxHandles - kMinHandles);
5051 if (FLAG_stress_compaction ||
5052 mark_compact_collector()->reduce_memory_footprint_) {
5053 factor = min_factor;
5056 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5057 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5058 limit += new_space_.Capacity();
5059 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5060 return Min(limit, halfway_to_the_max);
5064 void Heap::EnableInlineAllocation() {
5065 if (!inline_allocation_disabled_) return;
5066 inline_allocation_disabled_ = false;
5068 // Update inline allocation limit for new space.
5069 new_space()->UpdateInlineAllocationLimit(0);
5073 void Heap::DisableInlineAllocation() {
5074 if (inline_allocation_disabled_) return;
5075 inline_allocation_disabled_ = true;
5077 // Update inline allocation limit for new space.
5078 new_space()->UpdateInlineAllocationLimit(0);
5080 // Update inline allocation limit for old spaces.
5081 PagedSpaces spaces(this);
5082 for (PagedSpace* space = spaces.next(); space != NULL;
5083 space = spaces.next()) {
5084 space->EmptyAllocationInfo();
5089 V8_DECLARE_ONCE(initialize_gc_once);
5091 static void InitializeGCOnce() {
5092 InitializeScavengingVisitorsTables();
5093 NewSpaceScavenger::Initialize();
5094 MarkCompactCollector::Initialize();
5098 bool Heap::SetUp() {
5100 allocation_timeout_ = FLAG_gc_interval;
5103 // Initialize heap spaces and initial maps and objects. Whenever something
5104 // goes wrong, just return false. The caller should check the results and
5105 // call Heap::TearDown() to release allocated memory.
5107 // If the heap is not yet configured (e.g. through the API), configure it.
5108 // Configuration is based on the flags new-space-size (really the semispace
5109 // size) and old-space-size if set or the initial values of semispace_size_
5110 // and old_generation_size_ otherwise.
5112 if (!ConfigureHeapDefault()) return false;
5115 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5117 MarkMapPointersAsEncoded(false);
5119 // Set up memory allocator.
5120 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5123 // Set up new space.
5124 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5127 new_space_top_after_last_gc_ = new_space()->top();
5129 // Initialize old pointer space.
5130 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5131 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5132 if (old_pointer_space_ == NULL) return false;
5133 if (!old_pointer_space_->SetUp()) return false;
5135 // Initialize old data space.
5136 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5138 if (old_data_space_ == NULL) return false;
5139 if (!old_data_space_->SetUp()) return false;
5141 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5143 // Initialize the code space, set its maximum capacity to the old
5144 // generation size. It needs executable memory.
5146 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5147 if (code_space_ == NULL) return false;
5148 if (!code_space_->SetUp()) return false;
5150 // Initialize map space.
5151 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5152 if (map_space_ == NULL) return false;
5153 if (!map_space_->SetUp()) return false;
5155 // Initialize simple cell space.
5156 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5157 if (cell_space_ == NULL) return false;
5158 if (!cell_space_->SetUp()) return false;
5160 // Initialize global property cell space.
5161 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5162 PROPERTY_CELL_SPACE);
5163 if (property_cell_space_ == NULL) return false;
5164 if (!property_cell_space_->SetUp()) return false;
5166 // The large object code space may contain code or data. We set the memory
5167 // to be non-executable here for safety, but this means we need to enable it
5168 // explicitly when allocating large code objects.
5169 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5170 if (lo_space_ == NULL) return false;
5171 if (!lo_space_->SetUp()) return false;
5173 // Set up the seed that is used to randomize the string hash function.
5174 DCHECK(hash_seed() == 0);
5175 if (FLAG_randomize_hashes) {
5176 if (FLAG_hash_seed == 0) {
5177 int rnd = isolate()->random_number_generator()->NextInt();
5178 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5180 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5184 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5185 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5187 store_buffer()->SetUp();
5189 mark_compact_collector()->SetUp();
5195 bool Heap::CreateHeapObjects() {
5196 // Create initial maps.
5197 if (!CreateInitialMaps()) return false;
5200 // Create initial objects
5201 CreateInitialObjects();
5202 CHECK_EQ(0, gc_count_);
5204 set_native_contexts_list(undefined_value());
5205 set_array_buffers_list(undefined_value());
5206 set_allocation_sites_list(undefined_value());
5207 weak_object_to_code_table_ = undefined_value();
5212 void Heap::SetStackLimits() {
5213 DCHECK(isolate_ != NULL);
5214 DCHECK(isolate_ == isolate());
5215 // On 64 bit machines, pointers are generally out of range of Smis. We write
5216 // something that looks like an out of range Smi to the GC.
5218 // Set up the special root array entries containing the stack limits.
5219 // These are actually addresses, but the tag makes the GC ignore it.
5220 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5221 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5222 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5223 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5227 void Heap::TearDown() {
5229 if (FLAG_verify_heap) {
5234 UpdateMaximumCommitted();
5236 if (FLAG_print_cumulative_gc_stat) {
5238 PrintF("gc_count=%d ", gc_count_);
5239 PrintF("mark_sweep_count=%d ", ms_count_);
5240 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5241 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5242 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5243 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5244 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5245 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5249 if (FLAG_print_max_heap_committed) {
5251 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5252 MaximumCommittedMemory());
5253 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5254 new_space_.MaximumCommittedMemory());
5255 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5256 old_data_space_->MaximumCommittedMemory());
5257 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5258 old_pointer_space_->MaximumCommittedMemory());
5259 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5260 old_pointer_space_->MaximumCommittedMemory());
5261 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5262 code_space_->MaximumCommittedMemory());
5263 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5264 map_space_->MaximumCommittedMemory());
5265 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5266 cell_space_->MaximumCommittedMemory());
5267 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5268 property_cell_space_->MaximumCommittedMemory());
5269 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5270 lo_space_->MaximumCommittedMemory());
5274 if (FLAG_verify_predictable) {
5275 PrintAlloctionsHash();
5278 TearDownArrayBuffers();
5280 isolate_->global_handles()->TearDown();
5282 external_string_table_.TearDown();
5284 mark_compact_collector()->TearDown();
5286 new_space_.TearDown();
5288 if (old_pointer_space_ != NULL) {
5289 old_pointer_space_->TearDown();
5290 delete old_pointer_space_;
5291 old_pointer_space_ = NULL;
5294 if (old_data_space_ != NULL) {
5295 old_data_space_->TearDown();
5296 delete old_data_space_;
5297 old_data_space_ = NULL;
5300 if (code_space_ != NULL) {
5301 code_space_->TearDown();
5306 if (map_space_ != NULL) {
5307 map_space_->TearDown();
5312 if (cell_space_ != NULL) {
5313 cell_space_->TearDown();
5318 if (property_cell_space_ != NULL) {
5319 property_cell_space_->TearDown();
5320 delete property_cell_space_;
5321 property_cell_space_ = NULL;
5324 if (lo_space_ != NULL) {
5325 lo_space_->TearDown();
5330 store_buffer()->TearDown();
5331 incremental_marking()->TearDown();
5333 isolate_->memory_allocator()->TearDown();
5337 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5338 GCType gc_type, bool pass_isolate) {
5339 DCHECK(callback != NULL);
5340 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5341 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5342 return gc_prologue_callbacks_.Add(pair);
5346 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5347 DCHECK(callback != NULL);
5348 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5349 if (gc_prologue_callbacks_[i].callback == callback) {
5350 gc_prologue_callbacks_.Remove(i);
5358 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5359 GCType gc_type, bool pass_isolate) {
5360 DCHECK(callback != NULL);
5361 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5362 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5363 return gc_epilogue_callbacks_.Add(pair);
5367 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5368 DCHECK(callback != NULL);
5369 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5370 if (gc_epilogue_callbacks_[i].callback == callback) {
5371 gc_epilogue_callbacks_.Remove(i);
5379 // TODO(ishell): Find a better place for this.
5380 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5381 Handle<DependentCode> dep) {
5382 DCHECK(!InNewSpace(*obj));
5383 DCHECK(!InNewSpace(*dep));
5384 // This handle scope keeps the table handle local to this function, which
5385 // allows us to safely skip write barriers in table update operations.
5386 HandleScope scope(isolate());
5387 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5389 table = WeakHashTable::Put(table, obj, dep);
5391 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5392 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5394 set_weak_object_to_code_table(*table);
5395 DCHECK_EQ(*dep, table->Lookup(obj));
5399 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5400 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5401 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5402 return DependentCode::cast(empty_fixed_array());
5406 void Heap::EnsureWeakObjectToCodeTable() {
5407 if (!weak_object_to_code_table()->IsHashTable()) {
5408 set_weak_object_to_code_table(
5409 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5415 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5416 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5421 class PrintHandleVisitor : public ObjectVisitor {
5423 void VisitPointers(Object** start, Object** end) {
5424 for (Object** p = start; p < end; p++)
5425 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5426 reinterpret_cast<void*>(*p));
5431 void Heap::PrintHandles() {
5432 PrintF("Handles:\n");
5433 PrintHandleVisitor v;
5434 isolate_->handle_scope_implementer()->Iterate(&v);
5440 Space* AllSpaces::next() {
5441 switch (counter_++) {
5443 return heap_->new_space();
5444 case OLD_POINTER_SPACE:
5445 return heap_->old_pointer_space();
5446 case OLD_DATA_SPACE:
5447 return heap_->old_data_space();
5449 return heap_->code_space();
5451 return heap_->map_space();
5453 return heap_->cell_space();
5454 case PROPERTY_CELL_SPACE:
5455 return heap_->property_cell_space();
5457 return heap_->lo_space();
5464 PagedSpace* PagedSpaces::next() {
5465 switch (counter_++) {
5466 case OLD_POINTER_SPACE:
5467 return heap_->old_pointer_space();
5468 case OLD_DATA_SPACE:
5469 return heap_->old_data_space();
5471 return heap_->code_space();
5473 return heap_->map_space();
5475 return heap_->cell_space();
5476 case PROPERTY_CELL_SPACE:
5477 return heap_->property_cell_space();
5484 OldSpace* OldSpaces::next() {
5485 switch (counter_++) {
5486 case OLD_POINTER_SPACE:
5487 return heap_->old_pointer_space();
5488 case OLD_DATA_SPACE:
5489 return heap_->old_data_space();
5491 return heap_->code_space();
5498 SpaceIterator::SpaceIterator(Heap* heap)
5500 current_space_(FIRST_SPACE),
5505 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5507 current_space_(FIRST_SPACE),
5509 size_func_(size_func) {}
5512 SpaceIterator::~SpaceIterator() {
5513 // Delete active iterator if any.
5518 bool SpaceIterator::has_next() {
5519 // Iterate until no more spaces.
5520 return current_space_ != LAST_SPACE;
5524 ObjectIterator* SpaceIterator::next() {
5525 if (iterator_ != NULL) {
5528 // Move to the next space
5530 if (current_space_ > LAST_SPACE) {
5535 // Return iterator for the new current space.
5536 return CreateIterator();
5540 // Create an iterator for the space to iterate.
5541 ObjectIterator* SpaceIterator::CreateIterator() {
5542 DCHECK(iterator_ == NULL);
5544 switch (current_space_) {
5546 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5548 case OLD_POINTER_SPACE:
5550 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5552 case OLD_DATA_SPACE:
5553 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5556 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5559 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5562 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5564 case PROPERTY_CELL_SPACE:
5566 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5569 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5573 // Return the newly allocated iterator;
5574 DCHECK(iterator_ != NULL);
5579 class HeapObjectsFilter {
5581 virtual ~HeapObjectsFilter() {}
5582 virtual bool SkipObject(HeapObject* object) = 0;
5586 class UnreachableObjectsFilter : public HeapObjectsFilter {
5588 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5589 MarkReachableObjects();
5592 ~UnreachableObjectsFilter() {
5593 heap_->mark_compact_collector()->ClearMarkbits();
5596 bool SkipObject(HeapObject* object) {
5597 MarkBit mark_bit = Marking::MarkBitFrom(object);
5598 return !mark_bit.Get();
5602 class MarkingVisitor : public ObjectVisitor {
5604 MarkingVisitor() : marking_stack_(10) {}
5606 void VisitPointers(Object** start, Object** end) {
5607 for (Object** p = start; p < end; p++) {
5608 if (!(*p)->IsHeapObject()) continue;
5609 HeapObject* obj = HeapObject::cast(*p);
5610 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5611 if (!mark_bit.Get()) {
5613 marking_stack_.Add(obj);
5618 void TransitiveClosure() {
5619 while (!marking_stack_.is_empty()) {
5620 HeapObject* obj = marking_stack_.RemoveLast();
5626 List<HeapObject*> marking_stack_;
5629 void MarkReachableObjects() {
5630 MarkingVisitor visitor;
5631 heap_->IterateRoots(&visitor, VISIT_ALL);
5632 visitor.TransitiveClosure();
5636 DisallowHeapAllocation no_allocation_;
5640 HeapIterator::HeapIterator(Heap* heap)
5641 : make_heap_iterable_helper_(heap),
5642 no_heap_allocation_(),
5644 filtering_(HeapIterator::kNoFiltering),
5650 HeapIterator::HeapIterator(Heap* heap,
5651 HeapIterator::HeapObjectsFiltering filtering)
5652 : make_heap_iterable_helper_(heap),
5653 no_heap_allocation_(),
5655 filtering_(filtering),
5661 HeapIterator::~HeapIterator() { Shutdown(); }
5664 void HeapIterator::Init() {
5665 // Start the iteration.
5666 space_iterator_ = new SpaceIterator(heap_);
5667 switch (filtering_) {
5668 case kFilterUnreachable:
5669 filter_ = new UnreachableObjectsFilter(heap_);
5674 object_iterator_ = space_iterator_->next();
5678 void HeapIterator::Shutdown() {
5680 // Assert that in filtering mode we have iterated through all
5681 // objects. Otherwise, heap will be left in an inconsistent state.
5682 if (filtering_ != kNoFiltering) {
5683 DCHECK(object_iterator_ == NULL);
5686 // Make sure the last iterator is deallocated.
5687 delete space_iterator_;
5688 space_iterator_ = NULL;
5689 object_iterator_ = NULL;
5695 HeapObject* HeapIterator::next() {
5696 if (filter_ == NULL) return NextObject();
5698 HeapObject* obj = NextObject();
5699 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5704 HeapObject* HeapIterator::NextObject() {
5705 // No iterator means we are done.
5706 if (object_iterator_ == NULL) return NULL;
5708 if (HeapObject* obj = object_iterator_->next_object()) {
5709 // If the current iterator has more objects we are fine.
5712 // Go though the spaces looking for one that has objects.
5713 while (space_iterator_->has_next()) {
5714 object_iterator_ = space_iterator_->next();
5715 if (HeapObject* obj = object_iterator_->next_object()) {
5720 // Done with the last space.
5721 object_iterator_ = NULL;
5726 void HeapIterator::reset() {
5727 // Restart the iterator.
5735 Object* const PathTracer::kAnyGlobalObject = NULL;
5737 class PathTracer::MarkVisitor : public ObjectVisitor {
5739 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5740 void VisitPointers(Object** start, Object** end) {
5741 // Scan all HeapObject pointers in [start, end)
5742 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5743 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5748 PathTracer* tracer_;
5752 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5754 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5755 void VisitPointers(Object** start, Object** end) {
5756 // Scan all HeapObject pointers in [start, end)
5757 for (Object** p = start; p < end; p++) {
5758 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5763 PathTracer* tracer_;
5767 void PathTracer::VisitPointers(Object** start, Object** end) {
5768 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5769 // Visit all HeapObject pointers in [start, end)
5770 for (Object** p = start; !done && (p < end); p++) {
5771 if ((*p)->IsHeapObject()) {
5773 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5779 void PathTracer::Reset() {
5780 found_target_ = false;
5781 object_stack_.Clear();
5785 void PathTracer::TracePathFrom(Object** root) {
5786 DCHECK((search_target_ == kAnyGlobalObject) ||
5787 search_target_->IsHeapObject());
5788 found_target_in_trace_ = false;
5791 MarkVisitor mark_visitor(this);
5792 MarkRecursively(root, &mark_visitor);
5794 UnmarkVisitor unmark_visitor(this);
5795 UnmarkRecursively(root, &unmark_visitor);
5801 static bool SafeIsNativeContext(HeapObject* obj) {
5802 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5806 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5807 if (!(*p)->IsHeapObject()) return;
5809 HeapObject* obj = HeapObject::cast(*p);
5811 MapWord map_word = obj->map_word();
5812 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5814 if (found_target_in_trace_) return; // stop if target found
5815 object_stack_.Add(obj);
5816 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5817 (obj == search_target_)) {
5818 found_target_in_trace_ = true;
5819 found_target_ = true;
5823 bool is_native_context = SafeIsNativeContext(obj);
5826 Map* map = Map::cast(map_word.ToMap());
5828 MapWord marked_map_word =
5829 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5830 obj->set_map_word(marked_map_word);
5832 // Scan the object body.
5833 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5834 // This is specialized to scan Context's properly.
5836 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5838 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5839 Context::FIRST_WEAK_SLOT * kPointerSize);
5840 mark_visitor->VisitPointers(start, end);
5842 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5845 // Scan the map after the body because the body is a lot more interesting
5846 // when doing leak detection.
5847 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5849 if (!found_target_in_trace_) { // don't pop if found the target
5850 object_stack_.RemoveLast();
5855 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5856 if (!(*p)->IsHeapObject()) return;
5858 HeapObject* obj = HeapObject::cast(*p);
5860 MapWord map_word = obj->map_word();
5861 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5863 MapWord unmarked_map_word =
5864 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5865 obj->set_map_word(unmarked_map_word);
5867 Map* map = Map::cast(unmarked_map_word.ToMap());
5869 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5871 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5875 void PathTracer::ProcessResults() {
5876 if (found_target_) {
5877 OFStream os(stdout);
5878 os << "=====================================\n"
5879 << "==== Path to object ====\n"
5880 << "=====================================\n\n";
5882 DCHECK(!object_stack_.is_empty());
5883 for (int i = 0; i < object_stack_.length(); i++) {
5884 if (i > 0) os << "\n |\n |\n V\n\n";
5885 object_stack_[i]->Print(os);
5887 os << "=====================================\n";
5892 // Triggers a depth-first traversal of reachable objects from one
5893 // given root object and finds a path to a specific heap object and
5895 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5896 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5897 tracer.VisitPointer(&root);
5901 // Triggers a depth-first traversal of reachable objects from roots
5902 // and finds a path to a specific heap object and prints it.
5903 void Heap::TracePathToObject(Object* target) {
5904 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5905 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5909 // Triggers a depth-first traversal of reachable objects from roots
5910 // and finds a path to any global object and prints it. Useful for
5911 // determining the source for leaks of global objects.
5912 void Heap::TracePathToGlobal() {
5913 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5915 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5920 void Heap::UpdateCumulativeGCStatistics(double duration,
5921 double spent_in_mutator,
5922 double marking_time) {
5923 if (FLAG_print_cumulative_gc_stat) {
5924 total_gc_time_ms_ += duration;
5925 max_gc_pause_ = Max(max_gc_pause_, duration);
5926 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5927 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5928 } else if (FLAG_trace_gc_verbose) {
5929 total_gc_time_ms_ += duration;
5932 marking_time_ += marking_time;
5936 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5937 DisallowHeapAllocation no_gc;
5938 // Uses only lower 32 bits if pointers are larger.
5939 uintptr_t addr_hash =
5940 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5941 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5945 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5946 DisallowHeapAllocation no_gc;
5947 int index = (Hash(map, name) & kHashMask);
5948 for (int i = 0; i < kEntriesPerBucket; i++) {
5949 Key& key = keys_[index + i];
5950 if ((key.map == *map) && key.name->Equals(*name)) {
5951 return field_offsets_[index + i];
5958 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5960 DisallowHeapAllocation no_gc;
5961 if (!name->IsUniqueName()) {
5962 if (!StringTable::InternalizeStringIfExists(
5963 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5967 // This cache is cleared only between mark compact passes, so we expect the
5968 // cache to only contain old space names.
5969 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5971 int index = (Hash(map, name) & kHashMask);
5972 // After a GC there will be free slots, so we use them in order (this may
5973 // help to get the most frequently used one in position 0).
5974 for (int i = 0; i < kEntriesPerBucket; i++) {
5975 Key& key = keys_[index];
5976 Object* free_entry_indicator = NULL;
5977 if (key.map == free_entry_indicator) {
5980 field_offsets_[index + i] = field_offset;
5984 // No free entry found in this bucket, so we move them all down one and
5985 // put the new entry at position zero.
5986 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5987 Key& key = keys_[index + i];
5988 Key& key2 = keys_[index + i - 1];
5990 field_offsets_[index + i] = field_offsets_[index + i - 1];
5993 // Write the new first entry.
5994 Key& key = keys_[index];
5997 field_offsets_[index] = field_offset;
6001 void KeyedLookupCache::Clear() {
6002 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6006 void DescriptorLookupCache::Clear() {
6007 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6011 void ExternalStringTable::CleanUp() {
6013 for (int i = 0; i < new_space_strings_.length(); ++i) {
6014 if (new_space_strings_[i] == heap_->the_hole_value()) {
6017 DCHECK(new_space_strings_[i]->IsExternalString());
6018 if (heap_->InNewSpace(new_space_strings_[i])) {
6019 new_space_strings_[last++] = new_space_strings_[i];
6021 old_space_strings_.Add(new_space_strings_[i]);
6024 new_space_strings_.Rewind(last);
6025 new_space_strings_.Trim();
6028 for (int i = 0; i < old_space_strings_.length(); ++i) {
6029 if (old_space_strings_[i] == heap_->the_hole_value()) {
6032 DCHECK(old_space_strings_[i]->IsExternalString());
6033 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6034 old_space_strings_[last++] = old_space_strings_[i];
6036 old_space_strings_.Rewind(last);
6037 old_space_strings_.Trim();
6039 if (FLAG_verify_heap) {
6046 void ExternalStringTable::TearDown() {
6047 for (int i = 0; i < new_space_strings_.length(); ++i) {
6048 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6050 new_space_strings_.Free();
6051 for (int i = 0; i < old_space_strings_.length(); ++i) {
6052 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6054 old_space_strings_.Free();
6058 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6059 chunk->set_next_chunk(chunks_queued_for_free_);
6060 chunks_queued_for_free_ = chunk;
6064 void Heap::FreeQueuedChunks() {
6065 if (chunks_queued_for_free_ == NULL) return;
6068 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6069 next = chunk->next_chunk();
6070 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6072 if (chunk->owner()->identity() == LO_SPACE) {
6073 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6074 // If FromAnyPointerAddress encounters a slot that belongs to a large
6075 // chunk queued for deletion it will fail to find the chunk because
6076 // it try to perform a search in the list of pages owned by of the large
6077 // object space and queued chunks were detached from that list.
6078 // To work around this we split large chunk into normal kPageSize aligned
6079 // pieces and initialize size, owner and flags field of every piece.
6080 // If FromAnyPointerAddress encounters a slot that belongs to one of
6081 // these smaller pieces it will treat it as a slot on a normal Page.
6082 Address chunk_end = chunk->address() + chunk->size();
6083 MemoryChunk* inner =
6084 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6085 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6086 while (inner <= inner_last) {
6087 // Size of a large chunk is always a multiple of
6088 // OS::AllocateAlignment() so there is always
6089 // enough space for a fake MemoryChunk header.
6090 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6091 // Guard against overflow.
6092 if (area_end < inner->address()) area_end = chunk_end;
6093 inner->SetArea(inner->address(), area_end);
6094 inner->set_size(Page::kPageSize);
6095 inner->set_owner(lo_space());
6096 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6097 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6101 isolate_->heap()->store_buffer()->Compact();
6102 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6103 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6104 next = chunk->next_chunk();
6105 isolate_->memory_allocator()->Free(chunk);
6107 chunks_queued_for_free_ = NULL;
6111 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6112 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6113 // Tag the page pointer to make it findable in the dump file.
6115 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6117 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6119 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6120 reinterpret_cast<Address>(p);
6121 remembered_unmapped_pages_index_++;
6122 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6126 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6127 memset(object_counts_, 0, sizeof(object_counts_));
6128 memset(object_sizes_, 0, sizeof(object_sizes_));
6129 if (clear_last_time_stats) {
6130 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6131 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6136 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6139 void Heap::CheckpointObjectStats() {
6140 base::LockGuard<base::Mutex> lock_guard(
6141 checkpoint_object_stats_mutex.Pointer());
6142 Counters* counters = isolate()->counters();
6143 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6144 counters->count_of_##name()->Increment( \
6145 static_cast<int>(object_counts_[name])); \
6146 counters->count_of_##name()->Decrement( \
6147 static_cast<int>(object_counts_last_time_[name])); \
6148 counters->size_of_##name()->Increment( \
6149 static_cast<int>(object_sizes_[name])); \
6150 counters->size_of_##name()->Decrement( \
6151 static_cast<int>(object_sizes_last_time_[name]));
6152 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6153 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6155 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6156 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6157 counters->count_of_CODE_TYPE_##name()->Increment( \
6158 static_cast<int>(object_counts_[index])); \
6159 counters->count_of_CODE_TYPE_##name()->Decrement( \
6160 static_cast<int>(object_counts_last_time_[index])); \
6161 counters->size_of_CODE_TYPE_##name()->Increment( \
6162 static_cast<int>(object_sizes_[index])); \
6163 counters->size_of_CODE_TYPE_##name()->Decrement( \
6164 static_cast<int>(object_sizes_last_time_[index]));
6165 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6166 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6167 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6168 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6169 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6170 static_cast<int>(object_counts_[index])); \
6171 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6172 static_cast<int>(object_counts_last_time_[index])); \
6173 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6174 static_cast<int>(object_sizes_[index])); \
6175 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6176 static_cast<int>(object_sizes_last_time_[index]));
6177 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6178 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6179 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6181 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6182 counters->count_of_CODE_AGE_##name()->Increment( \
6183 static_cast<int>(object_counts_[index])); \
6184 counters->count_of_CODE_AGE_##name()->Decrement( \
6185 static_cast<int>(object_counts_last_time_[index])); \
6186 counters->size_of_CODE_AGE_##name()->Increment( \
6187 static_cast<int>(object_sizes_[index])); \
6188 counters->size_of_CODE_AGE_##name()->Decrement( \
6189 static_cast<int>(object_sizes_last_time_[index]));
6190 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6191 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6193 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6194 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6198 } // namespace v8::internal