1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/accessors.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h" // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h" // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
45 #include "src/regexp-macro-assembler.h"
46 #include "src/mips64/regexp-macro-assembler-mips64.h"
54 : amount_of_external_allocated_memory_(0),
55 amount_of_external_allocated_memory_at_last_global_gc_(0),
58 // semispace_size_ should be a power of 2 and old_generation_size_ should
59 // be a multiple of Page::kPageSize.
60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62 initial_semispace_size_(Page::kPageSize),
63 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64 max_executable_size_(256ul * (kPointerSize / 4) * MB),
65 // Variables set based on semispace_size_ and old_generation_size_ in
67 // Will be 4 * reserved_semispace_size_ to ensure that young
68 // generation can be aligned to its size.
69 maximum_committed_(0),
70 survived_since_last_expansion_(0),
72 always_allocate_scope_depth_(0),
73 contexts_disposed_(0),
75 flush_monomorphic_ics_(false),
76 scan_on_scavenge_pages_(0),
78 old_pointer_space_(NULL),
79 old_data_space_(NULL),
83 property_cell_space_(NULL),
86 gc_post_processing_depth_(0),
87 allocations_count_(0),
88 raw_allocations_hash_(0),
89 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
92 remembered_unmapped_pages_index_(0),
93 unflattened_strings_length_(0),
95 allocation_timeout_(0),
97 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98 old_gen_exhausted_(false),
99 inline_allocation_disabled_(false),
100 store_buffer_rebuilder_(store_buffer()),
101 hidden_string_(NULL),
102 gc_safe_size_of_old_object_(NULL),
103 total_regexp_code_generated_(0),
105 high_survival_rate_period_length_(0),
106 promoted_objects_size_(0),
108 semi_space_copied_object_size_(0),
109 semi_space_copied_rate_(0),
110 nodes_died_in_new_space_(0),
111 nodes_copied_in_new_space_(0),
113 maximum_size_scavenges_(0),
115 total_gc_time_ms_(0.0),
116 max_alive_after_gc_(0),
117 min_in_mutator_(kMaxInt),
120 mark_compact_collector_(this),
123 incremental_marking_(this),
124 gc_count_at_last_idle_gc_(0),
125 full_codegen_bytes_generated_(0),
126 crankshaft_codegen_bytes_generated_(0),
127 gcs_since_last_deopt_(0),
129 no_weak_object_verification_scope_depth_(0),
131 allocation_sites_scratchpad_length_(0),
132 promotion_queue_(this),
134 external_string_table_(this),
135 chunks_queued_for_free_(NULL),
136 gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
144 // Ensure old_generation_size_ is a multiple of kPageSize.
145 DCHECK(MB >= Page::kPageSize);
147 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
148 set_native_contexts_list(NULL);
149 set_array_buffers_list(Smi::FromInt(0));
150 set_allocation_sites_list(Smi::FromInt(0));
151 set_encountered_weak_collections(Smi::FromInt(0));
152 // Put a dummy entry in the remembered pages so we can find the list the
153 // minidump even if there are no real unmapped pages.
154 RememberUnmappedPage(NULL, false);
156 ClearObjectStats(true);
160 intptr_t Heap::Capacity() {
161 if (!HasBeenSetUp()) return 0;
163 return new_space_.Capacity() + old_pointer_space_->Capacity() +
164 old_data_space_->Capacity() + code_space_->Capacity() +
165 map_space_->Capacity() + cell_space_->Capacity() +
166 property_cell_space_->Capacity();
170 intptr_t Heap::CommittedMemory() {
171 if (!HasBeenSetUp()) return 0;
173 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
174 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
176 property_cell_space_->CommittedMemory() + lo_space_->Size();
180 size_t Heap::CommittedPhysicalMemory() {
181 if (!HasBeenSetUp()) return 0;
183 return new_space_.CommittedPhysicalMemory() +
184 old_pointer_space_->CommittedPhysicalMemory() +
185 old_data_space_->CommittedPhysicalMemory() +
186 code_space_->CommittedPhysicalMemory() +
187 map_space_->CommittedPhysicalMemory() +
188 cell_space_->CommittedPhysicalMemory() +
189 property_cell_space_->CommittedPhysicalMemory() +
190 lo_space_->CommittedPhysicalMemory();
194 intptr_t Heap::CommittedMemoryExecutable() {
195 if (!HasBeenSetUp()) return 0;
197 return isolate()->memory_allocator()->SizeExecutable();
201 void Heap::UpdateMaximumCommitted() {
202 if (!HasBeenSetUp()) return;
204 intptr_t current_committed_memory = CommittedMemory();
205 if (current_committed_memory > maximum_committed_) {
206 maximum_committed_ = current_committed_memory;
211 intptr_t Heap::Available() {
212 if (!HasBeenSetUp()) return 0;
214 return new_space_.Available() + old_pointer_space_->Available() +
215 old_data_space_->Available() + code_space_->Available() +
216 map_space_->Available() + cell_space_->Available() +
217 property_cell_space_->Available();
221 bool Heap::HasBeenSetUp() {
222 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
224 property_cell_space_ != NULL && lo_space_ != NULL;
228 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
229 if (IntrusiveMarking::IsMarked(object)) {
230 return IntrusiveMarking::SizeOfMarkedObject(object);
232 return object->SizeFromMap(object->map());
236 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
237 const char** reason) {
238 // Is global GC requested?
239 if (space != NEW_SPACE) {
240 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241 *reason = "GC in old space requested";
242 return MARK_COMPACTOR;
245 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246 *reason = "GC in old space forced by flags";
247 return MARK_COMPACTOR;
250 // Is enough data promoted to justify a global GC?
251 if (OldGenerationAllocationLimitReached()) {
252 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253 *reason = "promotion limit reached";
254 return MARK_COMPACTOR;
257 // Have allocation in OLD and LO failed?
258 if (old_gen_exhausted_) {
260 ->gc_compactor_caused_by_oldspace_exhaustion()
262 *reason = "old generations exhausted";
263 return MARK_COMPACTOR;
266 // Is there enough space left in OLD to guarantee that a scavenge can
269 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270 // for object promotion. It counts only the bytes that the memory
271 // allocator has not yet allocated from the OS and assigned to any space,
272 // and does not count available bytes already in the old space or code
273 // space. Undercounting is safe---we may get an unrequested full GC when
274 // a scavenge would have succeeded.
275 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
277 ->gc_compactor_caused_by_oldspace_exhaustion()
279 *reason = "scavenge might not succeed";
280 return MARK_COMPACTOR;
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
291 void Heap::ReportStatisticsBeforeGC() {
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set. The following logic is used to avoid
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297 if (FLAG_heap_stats) {
298 ReportHeapStatistics("Before GC");
299 } else if (FLAG_log_gc) {
300 new_space_.ReportStatistics();
302 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
313 void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
315 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
317 ", available: %6" V8_PTR_PREFIX "d KB\n",
318 isolate_->memory_allocator()->Size() / KB,
319 isolate_->memory_allocator()->Available() / KB);
320 PrintPID("New space, used: %6" V8_PTR_PREFIX
322 ", available: %6" V8_PTR_PREFIX
324 ", committed: %6" V8_PTR_PREFIX "d KB\n",
325 new_space_.Size() / KB, new_space_.Available() / KB,
326 new_space_.CommittedMemory() / KB);
327 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
329 ", available: %6" V8_PTR_PREFIX
331 ", committed: %6" V8_PTR_PREFIX "d KB\n",
332 old_pointer_space_->SizeOfObjects() / KB,
333 old_pointer_space_->Available() / KB,
334 old_pointer_space_->CommittedMemory() / KB);
335 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
337 ", available: %6" V8_PTR_PREFIX
339 ", committed: %6" V8_PTR_PREFIX "d KB\n",
340 old_data_space_->SizeOfObjects() / KB,
341 old_data_space_->Available() / KB,
342 old_data_space_->CommittedMemory() / KB);
343 PrintPID("Code space, used: %6" V8_PTR_PREFIX
345 ", available: %6" V8_PTR_PREFIX
347 ", committed: %6" V8_PTR_PREFIX "d KB\n",
348 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX
352 ", available: %6" V8_PTR_PREFIX
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
356 map_space_->CommittedMemory() / KB);
357 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
359 ", available: %6" V8_PTR_PREFIX
361 ", committed: %6" V8_PTR_PREFIX "d KB\n",
362 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
363 cell_space_->CommittedMemory() / KB);
364 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
366 ", available: %6" V8_PTR_PREFIX
368 ", committed: %6" V8_PTR_PREFIX "d KB\n",
369 property_cell_space_->SizeOfObjects() / KB,
370 property_cell_space_->Available() / KB,
371 property_cell_space_->CommittedMemory() / KB);
372 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
374 ", available: %6" V8_PTR_PREFIX
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
378 lo_space_->CommittedMemory() / KB);
379 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
381 ", available: %6" V8_PTR_PREFIX
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 this->SizeOfObjects() / KB, this->Available() / KB,
385 this->CommittedMemory() / KB);
386 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
394 void Heap::ReportStatisticsAfterGC() {
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
398 if (FLAG_heap_stats) {
399 new_space_.CollectStatistics();
400 ReportHeapStatistics("After GC");
401 } else if (FLAG_log_gc) {
402 new_space_.ReportStatistics();
405 if (FLAG_log_gc) new_space_.ReportStatistics();
410 void Heap::GarbageCollectionPrologue() {
412 AllowHeapAllocation for_the_first_part_of_prologue;
413 ClearJSFunctionResultCaches();
415 unflattened_strings_length_ = 0;
417 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
418 mark_compact_collector()->EnableCodeFlushing(true);
422 if (FLAG_verify_heap) {
428 // Reset GC statistics.
429 promoted_objects_size_ = 0;
430 semi_space_copied_object_size_ = 0;
431 nodes_died_in_new_space_ = 0;
432 nodes_copied_in_new_space_ = 0;
435 UpdateMaximumCommitted();
438 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
440 if (FLAG_gc_verbose) Print();
442 ReportStatisticsBeforeGC();
445 store_buffer()->GCPrologue();
447 if (isolate()->concurrent_osr_enabled()) {
448 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
451 if (new_space_.IsAtMaximumCapacity()) {
452 maximum_size_scavenges_++;
454 maximum_size_scavenges_ = 0;
456 CheckNewSpaceExpansionCriteria();
460 intptr_t Heap::SizeOfObjects() {
462 AllSpaces spaces(this);
463 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464 total += space->SizeOfObjects();
470 void Heap::ClearAllICsByKind(Code::Kind kind) {
471 HeapObjectIterator it(code_space());
473 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474 Code* code = Code::cast(object);
475 Code::Kind current_kind = code->kind();
476 if (current_kind == Code::FUNCTION ||
477 current_kind == Code::OPTIMIZED_FUNCTION) {
478 code->ClearInlineCaches(kind);
484 void Heap::RepairFreeListsAfterBoot() {
485 PagedSpaces spaces(this);
486 for (PagedSpace* space = spaces.next(); space != NULL;
487 space = spaces.next()) {
488 space->RepairFreeListsAfterBoot();
493 void Heap::ProcessPretenuringFeedback() {
494 if (FLAG_allocation_site_pretenuring) {
495 int tenure_decisions = 0;
496 int dont_tenure_decisions = 0;
497 int allocation_mementos_found = 0;
498 int allocation_sites = 0;
499 int active_allocation_sites = 0;
501 // If the scratchpad overflowed, we have to iterate over the allocation
503 // TODO(hpayer): We iterate over the whole list of allocation sites when
504 // we grew to the maximum semi-space size to deopt maybe tenured
505 // allocation sites. We could hold the maybe tenured allocation sites
506 // in a seperate data structure if this is a performance problem.
507 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508 bool use_scratchpad =
509 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
510 !deopt_maybe_tenured;
513 Object* list_element = allocation_sites_list();
514 bool trigger_deoptimization = false;
515 bool maximum_size_scavenge = MaximumSizeScavenge();
516 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517 : list_element->IsAllocationSite()) {
518 AllocationSite* site =
520 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521 : AllocationSite::cast(list_element);
522 allocation_mementos_found += site->memento_found_count();
523 if (site->memento_found_count() > 0) {
524 active_allocation_sites++;
525 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526 trigger_deoptimization = true;
528 if (site->GetPretenureMode() == TENURED) {
531 dont_tenure_decisions++;
536 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537 site->set_deopt_dependent_code(true);
538 trigger_deoptimization = true;
541 if (use_scratchpad) {
544 list_element = site->weak_next();
548 if (trigger_deoptimization) {
549 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
552 FlushAllocationSitesScratchpad();
554 if (FLAG_trace_pretenuring_statistics &&
555 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556 dont_tenure_decisions > 0)) {
558 "GC: (mode, #visited allocation sites, #active allocation sites, "
559 "#mementos, #tenure decisions, #donttenure decisions) "
560 "(%s, %d, %d, %d, %d, %d)\n",
561 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562 active_allocation_sites, allocation_mementos_found, tenure_decisions,
563 dont_tenure_decisions);
569 void Heap::DeoptMarkedAllocationSites() {
570 // TODO(hpayer): If iterating over the allocation sites list becomes a
571 // performance issue, use a cache heap data structure instead (similar to the
572 // allocation sites scratchpad).
573 Object* list_element = allocation_sites_list();
574 while (list_element->IsAllocationSite()) {
575 AllocationSite* site = AllocationSite::cast(list_element);
576 if (site->deopt_dependent_code()) {
577 site->dependent_code()->MarkCodeForDeoptimization(
578 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
579 site->set_deopt_dependent_code(false);
581 list_element = site->weak_next();
583 Deoptimizer::DeoptimizeMarkedCode(isolate_);
587 void Heap::GarbageCollectionEpilogue() {
588 store_buffer()->GCEpilogue();
590 // In release mode, we only zap the from space under heap verification.
591 if (Heap::ShouldZapGarbage()) {
595 // Process pretenuring feedback and update allocation sites.
596 ProcessPretenuringFeedback();
599 if (FLAG_verify_heap) {
604 AllowHeapAllocation for_the_rest_of_the_epilogue;
607 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608 if (FLAG_print_handles) PrintHandles();
609 if (FLAG_gc_verbose) Print();
610 if (FLAG_code_stats) ReportCodeStatistics("After GC");
612 if (FLAG_deopt_every_n_garbage_collections > 0) {
613 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614 // the topmost optimized frame can be deoptimized safely, because it
615 // might not have a lazy bailout point right after its current PC.
616 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
617 Deoptimizer::DeoptimizeAll(isolate());
618 gcs_since_last_deopt_ = 0;
622 UpdateMaximumCommitted();
624 isolate_->counters()->alive_after_last_gc()->Set(
625 static_cast<int>(SizeOfObjects()));
627 isolate_->counters()->string_table_capacity()->Set(
628 string_table()->Capacity());
629 isolate_->counters()->number_of_symbols()->Set(
630 string_table()->NumberOfElements());
632 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
633 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
635 (crankshaft_codegen_bytes_generated_ +
636 full_codegen_bytes_generated_)));
639 if (CommittedMemory() > 0) {
640 isolate_->counters()->external_fragmentation_total()->AddSample(
641 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
643 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
648 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
651 isolate_->counters()->heap_fraction_code_space()->AddSample(
652 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
654 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656 isolate_->counters()->heap_fraction_cell_space()->AddSample(
657 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
659 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
662 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
665 isolate_->counters()->heap_sample_total_committed()->AddSample(
666 static_cast<int>(CommittedMemory() / KB));
667 isolate_->counters()->heap_sample_total_used()->AddSample(
668 static_cast<int>(SizeOfObjects() / KB));
669 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670 static_cast<int>(map_space()->CommittedMemory() / KB));
671 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672 static_cast<int>(cell_space()->CommittedMemory() / KB));
674 ->heap_sample_property_cell_space_committed()
676 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678 static_cast<int>(code_space()->CommittedMemory() / KB));
680 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681 static_cast<int>(MaximumCommittedMemory() / KB));
684 #define UPDATE_COUNTERS_FOR_SPACE(space) \
685 isolate_->counters()->space##_bytes_available()->Set( \
686 static_cast<int>(space()->Available())); \
687 isolate_->counters()->space##_bytes_committed()->Set( \
688 static_cast<int>(space()->CommittedMemory())); \
689 isolate_->counters()->space##_bytes_used()->Set( \
690 static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
692 if (space()->CommittedMemory() > 0) { \
693 isolate_->counters()->external_fragmentation_##space()->AddSample( \
694 static_cast<int>(100 - \
695 (space()->SizeOfObjects() * 100.0) / \
696 space()->CommittedMemory())); \
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
715 ReportStatisticsAfterGC();
718 // Remember the last top pointer so that we can later find out
719 // whether we allocated in new space since the last GC.
720 new_space_top_after_last_gc_ = new_space()->top();
724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725 const v8::GCCallbackFlags gc_callback_flags) {
726 // Since we are ignoring the return value, the exact choice of space does
727 // not matter, so long as we do not specify NEW_SPACE, which would not
729 mark_compact_collector_.SetFlags(flags);
730 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
731 mark_compact_collector_.SetFlags(kNoGCFlags);
735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736 // Since we are ignoring the return value, the exact choice of space does
737 // not matter, so long as we do not specify NEW_SPACE, which would not
739 // Major GC would invoke weak handle callbacks on weakly reachable
740 // handles, but won't collect weakly reachable objects until next
741 // major GC. Therefore if we collect aggressively and weak handle callback
742 // has been invoked, we rerun major GC to release objects which become
744 // Note: as weak callbacks can execute arbitrary code, we cannot
745 // hope that eventually there will be no weak callbacks invocations.
746 // Therefore stop recollecting after several attempts.
747 if (isolate()->concurrent_recompilation_enabled()) {
748 // The optimizing compiler may be unnecessarily holding on to memory.
749 DisallowHeapAllocation no_recursive_gc;
750 isolate()->optimizing_compiler_thread()->Flush();
752 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
753 kReduceMemoryFootprintMask);
754 isolate_->compilation_cache()->Clear();
755 const int kMaxNumberOfAttempts = 7;
756 const int kMinNumberOfAttempts = 2;
757 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759 attempt + 1 >= kMinNumberOfAttempts) {
763 mark_compact_collector()->SetFlags(kNoGCFlags);
766 incremental_marking()->UncommitMarkingDeque();
770 void Heap::EnsureFillerObjectAtTop() {
771 // There may be an allocation memento behind every object in new space.
772 // If we evacuate a not full new space or if we are on the last page of
773 // the new space, then there may be uninitialized memory behind the top
774 // pointer of the new space page. We store a filler object there to
775 // identify the unused space.
776 Address from_top = new_space_.top();
777 Address from_limit = new_space_.limit();
778 if (from_top < from_limit) {
779 int remaining_in_page = static_cast<int>(from_limit - from_top);
780 CreateFillerObjectAt(from_top, remaining_in_page);
785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786 const char* collector_reason,
787 const v8::GCCallbackFlags gc_callback_flags) {
788 // The VM is in the GC state until exiting this function.
789 VMState<GC> state(isolate_);
792 // Reset the allocation timeout to the GC interval, but make sure to
793 // allow at least a few allocations after a collection. The reason
794 // for this is that we have a lot of allocation sequences and we
795 // assume that a garbage collection will allow the subsequent
796 // allocation attempts to go through.
797 allocation_timeout_ = Max(6, FLAG_gc_interval);
800 EnsureFillerObjectAtTop();
802 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803 if (FLAG_trace_incremental_marking) {
804 PrintF("[IncrementalMarking] Scavenge during marking.\n");
808 if (collector == MARK_COMPACTOR &&
809 !mark_compact_collector()->abort_incremental_marking() &&
810 !incremental_marking()->IsStopped() &&
811 !incremental_marking()->should_hurry() &&
812 FLAG_incremental_marking_steps) {
813 // Make progress in incremental marking.
814 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
816 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
817 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818 if (FLAG_trace_incremental_marking) {
819 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
821 collector = SCAVENGER;
822 collector_reason = "incremental marking delaying mark-sweep";
826 bool next_gc_likely_to_collect_more = false;
829 tracer()->Start(collector, gc_reason, collector_reason);
830 DCHECK(AllowHeapAllocation::IsAllowed());
831 DisallowHeapAllocation no_allocation_during_gc;
832 GarbageCollectionPrologue();
835 HistogramTimerScope histogram_timer_scope(
836 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837 : isolate_->counters()->gc_compactor());
838 next_gc_likely_to_collect_more =
839 PerformGarbageCollection(collector, gc_callback_flags);
842 GarbageCollectionEpilogue();
846 // Start incremental marking for the next cycle. The heap snapshot
847 // generator needs incremental marking to stay off after it aborted.
848 if (!mark_compact_collector()->abort_incremental_marking() &&
849 WorthActivatingIncrementalMarking()) {
850 incremental_marking()->Start();
853 return next_gc_likely_to_collect_more;
857 int Heap::NotifyContextDisposed() {
858 if (isolate()->concurrent_recompilation_enabled()) {
859 // Flush the queued recompilation tasks.
860 isolate()->optimizing_compiler_thread()->Flush();
862 flush_monomorphic_ics_ = true;
864 return ++contexts_disposed_;
868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
870 if (len == 0) return;
872 DCHECK(array->map() != fixed_cow_array_map());
873 Object** dst_objects = array->data_start() + dst_index;
874 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875 if (!InNewSpace(array)) {
876 for (int i = 0; i < len; i++) {
877 // TODO(hpayer): check store buffer for entries
878 if (InNewSpace(dst_objects[i])) {
879 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
883 incremental_marking()->RecordWrites(array);
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
891 void VisitPointers(Object** start, Object** end) {
892 // Visit all HeapObject pointers in [start, end).
893 for (Object** p = start; p < end; p++) {
894 if ((*p)->IsHeapObject()) {
895 // Check that the string is actually internalized.
896 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897 (*p)->IsInternalizedString());
904 static void VerifyStringTable(Heap* heap) {
905 StringTableVerifier verifier;
906 heap->string_table()->IterateElements(&verifier);
908 #endif // VERIFY_HEAP
911 static bool AbortIncrementalMarkingAndCollectGarbage(
912 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
913 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914 bool result = heap->CollectGarbage(space, gc_reason);
915 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921 bool gc_performed = true;
923 static const int kThreshold = 20;
924 while (gc_performed && counter++ < kThreshold) {
925 gc_performed = false;
926 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928 if (sizes[space] != 0) {
929 AllocationResult allocation;
930 if (space == NEW_SPACE) {
931 allocation = new_space()->AllocateRaw(sizes[space]);
933 allocation = paged_space(space)->AllocateRaw(sizes[space]);
936 if (!allocation.To(&node)) {
937 if (space == NEW_SPACE) {
938 Heap::CollectGarbage(NEW_SPACE,
939 "failed to reserve space in the new space");
941 AbortIncrementalMarkingAndCollectGarbage(
942 this, static_cast<AllocationSpace>(space),
943 "failed to reserve space in paged space");
948 // Mark with a free list node, in case we have a GC before
950 node->set_size(this, sizes[space]);
951 locations_out[space] = node->address();
958 // Failed to reserve the space after several attempts.
959 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
964 void Heap::EnsureFromSpaceIsCommitted() {
965 if (new_space_.CommitFromSpaceIfNeeded()) return;
967 // Committing memory to from space failed.
968 // Memory is exhausted and we will die.
969 V8::FatalProcessOutOfMemory("Committing semi space failed.");
973 void Heap::ClearJSFunctionResultCaches() {
974 if (isolate_->bootstrapper()->IsActive()) return;
976 Object* context = native_contexts_list();
977 while (!context->IsUndefined()) {
978 // Get the caches for this context. GC can happen when the context
979 // is not fully initialized, so the caches can be undefined.
980 Object* caches_or_undefined =
981 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
982 if (!caches_or_undefined->IsUndefined()) {
983 FixedArray* caches = FixedArray::cast(caches_or_undefined);
985 int length = caches->length();
986 for (int i = 0; i < length; i++) {
987 JSFunctionResultCache::cast(caches->get(i))->Clear();
990 // Get the next context:
991 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
996 void Heap::ClearNormalizedMapCaches() {
997 if (isolate_->bootstrapper()->IsActive() &&
998 !incremental_marking()->IsMarking()) {
1002 Object* context = native_contexts_list();
1003 while (!context->IsUndefined()) {
1004 // GC can happen when the context is not fully initialized,
1005 // so the cache can be undefined.
1007 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1008 if (!cache->IsUndefined()) {
1009 NormalizedMapCache::cast(cache)->Clear();
1011 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1016 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1017 if (start_new_space_size == 0) return;
1019 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1020 static_cast<double>(start_new_space_size) * 100);
1022 semi_space_copied_rate_ =
1023 (static_cast<double>(semi_space_copied_object_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1026 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1028 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1029 high_survival_rate_period_length_++;
1031 high_survival_rate_period_length_ = 0;
1035 bool Heap::PerformGarbageCollection(
1036 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1037 int freed_global_handles = 0;
1039 if (collector != SCAVENGER) {
1040 PROFILE(isolate_, CodeMovingGCEvent());
1044 if (FLAG_verify_heap) {
1045 VerifyStringTable(this);
1050 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1053 GCCallbacksScope scope(this);
1054 if (scope.CheckReenter()) {
1055 AllowHeapAllocation allow_allocation;
1056 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1057 VMState<EXTERNAL> state(isolate_);
1058 HandleScope handle_scope(isolate_);
1059 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1063 EnsureFromSpaceIsCommitted();
1065 int start_new_space_size = Heap::new_space()->SizeAsInt();
1067 if (IsHighSurvivalRate()) {
1068 // We speed up the incremental marker if it is running so that it
1069 // does not fall behind the rate of promotion, which would cause a
1070 // constantly growing old space.
1071 incremental_marking()->NotifyOfHighPromotionRate();
1074 if (collector == MARK_COMPACTOR) {
1075 // Perform mark-sweep with optional compaction.
1077 sweep_generation_++;
1078 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1079 // allocates and triggers GC. The real limit is set at after
1080 // PostGarbageCollectionProcessing.
1081 old_generation_allocation_limit_ =
1082 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1083 old_gen_exhausted_ = false;
1088 UpdateSurvivalStatistics(start_new_space_size);
1090 isolate_->counters()->objs_since_last_young()->Set(0);
1092 // Callbacks that fire after this point might trigger nested GCs and
1093 // restart incremental marking, the assertion can't be moved down.
1094 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1096 gc_post_processing_depth_++;
1098 AllowHeapAllocation allow_allocation;
1099 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1100 freed_global_handles =
1101 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1103 gc_post_processing_depth_--;
1105 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1107 // Update relocatables.
1108 Relocatable::PostGarbageCollectionProcessing(isolate_);
1110 if (collector == MARK_COMPACTOR) {
1111 // Register the amount of external allocated memory.
1112 amount_of_external_allocated_memory_at_last_global_gc_ =
1113 amount_of_external_allocated_memory_;
1114 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1115 PromotedSpaceSizeOfObjects(), freed_global_handles);
1119 GCCallbacksScope scope(this);
1120 if (scope.CheckReenter()) {
1121 AllowHeapAllocation allow_allocation;
1122 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1123 VMState<EXTERNAL> state(isolate_);
1124 HandleScope handle_scope(isolate_);
1125 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1130 if (FLAG_verify_heap) {
1131 VerifyStringTable(this);
1135 return freed_global_handles > 0;
1139 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1140 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1141 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1142 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1143 v8::GCPrologueCallback callback =
1144 reinterpret_cast<v8::GCPrologueCallback>(
1145 gc_prologue_callbacks_[i].callback);
1146 callback(gc_type, flags);
1148 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1149 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1156 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1157 GCCallbackFlags gc_callback_flags) {
1158 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1160 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_epilogue_callbacks_[i].callback);
1164 callback(gc_type, gc_callback_flags);
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1174 void Heap::MarkCompact() {
1175 gc_state_ = MARK_COMPACT;
1176 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1178 uint64_t size_of_objects_before_gc = SizeOfObjects();
1180 mark_compact_collector_.Prepare();
1184 MarkCompactPrologue();
1186 mark_compact_collector_.CollectGarbage();
1188 LOG(isolate_, ResourceEvent("markcompact", "end"));
1190 gc_state_ = NOT_IN_GC;
1192 isolate_->counters()->objs_since_last_full()->Set(0);
1194 flush_monomorphic_ics_ = false;
1196 if (FLAG_allocation_site_pretenuring) {
1197 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1202 void Heap::MarkCompactPrologue() {
1203 // At any old GC clear the keyed lookup cache to enable collection of unused
1205 isolate_->keyed_lookup_cache()->Clear();
1206 isolate_->context_slot_cache()->Clear();
1207 isolate_->descriptor_lookup_cache()->Clear();
1208 RegExpResultsCache::Clear(string_split_cache());
1209 RegExpResultsCache::Clear(regexp_multiple_cache());
1211 isolate_->compilation_cache()->MarkCompactPrologue();
1213 CompletelyClearInstanceofCache();
1215 FlushNumberStringCache();
1216 if (FLAG_cleanup_code_caches_at_gc) {
1217 polymorphic_code_cache()->set_cache(undefined_value());
1220 ClearNormalizedMapCaches();
1224 // Helper class for copying HeapObjects
1225 class ScavengeVisitor : public ObjectVisitor {
1227 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1229 void VisitPointer(Object** p) { ScavengePointer(p); }
1231 void VisitPointers(Object** start, Object** end) {
1232 // Copy all HeapObject pointers in [start, end)
1233 for (Object** p = start; p < end; p++) ScavengePointer(p);
1237 void ScavengePointer(Object** p) {
1238 Object* object = *p;
1239 if (!heap_->InNewSpace(object)) return;
1240 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1241 reinterpret_cast<HeapObject*>(object));
1249 // Visitor class to verify pointers in code or data space do not point into
1251 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1253 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1254 void VisitPointers(Object** start, Object** end) {
1255 for (Object** current = start; current < end; current++) {
1256 if ((*current)->IsHeapObject()) {
1257 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1267 static void VerifyNonPointerSpacePointers(Heap* heap) {
1268 // Verify that there are no pointers to new space in spaces where we
1269 // do not expect them.
1270 VerifyNonPointerSpacePointersVisitor v(heap);
1271 HeapObjectIterator code_it(heap->code_space());
1272 for (HeapObject* object = code_it.Next(); object != NULL;
1273 object = code_it.Next())
1274 object->Iterate(&v);
1276 HeapObjectIterator data_it(heap->old_data_space());
1277 for (HeapObject* object = data_it.Next(); object != NULL;
1278 object = data_it.Next())
1279 object->Iterate(&v);
1281 #endif // VERIFY_HEAP
1284 void Heap::CheckNewSpaceExpansionCriteria() {
1285 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1286 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1287 // Grow the size of new space if there is room to grow, enough data
1288 // has survived scavenge since the last expansion and we are not in
1289 // high promotion mode.
1291 survived_since_last_expansion_ = 0;
1296 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1297 return heap->InNewSpace(*p) &&
1298 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1302 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1303 StoreBufferEvent event) {
1304 heap->store_buffer_rebuilder_.Callback(page, event);
1308 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1309 if (event == kStoreBufferStartScanningPagesEvent) {
1310 start_of_current_page_ = NULL;
1311 current_page_ = NULL;
1312 } else if (event == kStoreBufferScanningPageEvent) {
1313 if (current_page_ != NULL) {
1314 // If this page already overflowed the store buffer during this iteration.
1315 if (current_page_->scan_on_scavenge()) {
1316 // Then we should wipe out the entries that have been added for it.
1317 store_buffer_->SetTop(start_of_current_page_);
1318 } else if (store_buffer_->Top() - start_of_current_page_ >=
1319 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1320 // Did we find too many pointers in the previous page? The heuristic is
1321 // that no page can take more then 1/5 the remaining slots in the store
1323 current_page_->set_scan_on_scavenge(true);
1324 store_buffer_->SetTop(start_of_current_page_);
1326 // In this case the page we scanned took a reasonable number of slots in
1327 // the store buffer. It has now been rehabilitated and is no longer
1328 // marked scan_on_scavenge.
1329 DCHECK(!current_page_->scan_on_scavenge());
1332 start_of_current_page_ = store_buffer_->Top();
1333 current_page_ = page;
1334 } else if (event == kStoreBufferFullEvent) {
1335 // The current page overflowed the store buffer again. Wipe out its entries
1336 // in the store buffer and mark it scan-on-scavenge again. This may happen
1337 // several times while scanning.
1338 if (current_page_ == NULL) {
1339 // Store Buffer overflowed while scanning promoted objects. These are not
1340 // in any particular page, though they are likely to be clustered by the
1341 // allocation routines.
1342 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1344 // Store Buffer overflowed while scanning a particular old space page for
1345 // pointers to new space.
1346 DCHECK(current_page_ == page);
1347 DCHECK(page != NULL);
1348 current_page_->set_scan_on_scavenge(true);
1349 DCHECK(start_of_current_page_ != store_buffer_->Top());
1350 store_buffer_->SetTop(start_of_current_page_);
1358 void PromotionQueue::Initialize() {
1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1360 // entries (where each is a pair of intptr_t). This allows us to simplify
1361 // the test fpr when to switch pages.
1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1367 emergency_stack_ = NULL;
1371 void PromotionQueue::RelocateQueueHead() {
1372 DCHECK(emergency_stack_ == NULL);
1374 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1375 intptr_t* head_start = rear_;
1376 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1379 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1381 emergency_stack_ = new List<Entry>(2 * entries_count);
1383 while (head_start != head_end) {
1384 int size = static_cast<int>(*(head_start++));
1385 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1386 emergency_stack_->Add(Entry(obj, size));
1392 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1394 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1396 virtual Object* RetainAs(Object* object) {
1397 if (!heap_->InFromSpace(object)) {
1401 MapWord map_word = HeapObject::cast(object)->map_word();
1402 if (map_word.IsForwardingAddress()) {
1403 return map_word.ToForwardingAddress();
1413 void Heap::Scavenge() {
1414 RelocationLock relocation_lock(this);
1417 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1420 gc_state_ = SCAVENGE;
1422 // Implements Cheney's copying algorithm
1423 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1425 // Clear descriptor cache.
1426 isolate_->descriptor_lookup_cache()->Clear();
1428 // Used for updating survived_since_last_expansion_ at function end.
1429 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1431 SelectScavengingVisitorsTable();
1433 incremental_marking()->PrepareForScavenge();
1435 // Flip the semispaces. After flipping, to space is empty, from space has
1438 new_space_.ResetAllocationInfo();
1440 // We need to sweep newly copied objects which can be either in the
1441 // to space or promoted to the old generation. For to-space
1442 // objects, we treat the bottom of the to space as a queue. Newly
1443 // copied and unswept objects lie between a 'front' mark and the
1444 // allocation pointer.
1446 // Promoted objects can go into various old-generation spaces, and
1447 // can be allocated internally in the spaces (from the free list).
1448 // We treat the top of the to space as a queue of addresses of
1449 // promoted objects. The addresses of newly promoted and unswept
1450 // objects lie between a 'front' mark and a 'rear' mark that is
1451 // updated as a side effect of promoting an object.
1453 // There is guaranteed to be enough room at the top of the to space
1454 // for the addresses of promoted objects: every object promoted
1455 // frees up its size in bytes from the top of the new space, and
1456 // objects are at least one pointer in size.
1457 Address new_space_front = new_space_.ToSpaceStart();
1458 promotion_queue_.Initialize();
1461 store_buffer()->Clean();
1464 ScavengeVisitor scavenge_visitor(this);
1466 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1468 // Copy objects reachable from the old generation.
1470 StoreBufferRebuildScope scope(this, store_buffer(),
1471 &ScavengeStoreBufferCallback);
1472 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1475 // Copy objects reachable from simple cells by scavenging cell values
1477 HeapObjectIterator cell_iterator(cell_space_);
1478 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1479 heap_object = cell_iterator.Next()) {
1480 if (heap_object->IsCell()) {
1481 Cell* cell = Cell::cast(heap_object);
1482 Address value_address = cell->ValueAddress();
1483 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1487 // Copy objects reachable from global property cells by scavenging global
1488 // property cell values directly.
1489 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1490 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1491 heap_object != NULL;
1492 heap_object = js_global_property_cell_iterator.Next()) {
1493 if (heap_object->IsPropertyCell()) {
1494 PropertyCell* cell = PropertyCell::cast(heap_object);
1495 Address value_address = cell->ValueAddress();
1496 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1497 Address type_address = cell->TypeAddress();
1498 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1502 // Copy objects reachable from the encountered weak collections list.
1503 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1505 // Copy objects reachable from the code flushing candidates list.
1506 MarkCompactCollector* collector = mark_compact_collector();
1507 if (collector->is_code_flushing_enabled()) {
1508 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1511 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1513 while (isolate()->global_handles()->IterateObjectGroups(
1514 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1515 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1517 isolate()->global_handles()->RemoveObjectGroups();
1518 isolate()->global_handles()->RemoveImplicitRefGroups();
1520 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1521 &IsUnscavengedHeapObject);
1522 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1524 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1526 UpdateNewSpaceReferencesInExternalStringTable(
1527 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1529 promotion_queue_.Destroy();
1531 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1533 ScavengeWeakObjectRetainer weak_object_retainer(this);
1534 ProcessWeakReferences(&weak_object_retainer);
1536 DCHECK(new_space_front == new_space_.top());
1539 new_space_.set_age_mark(new_space_.top());
1541 new_space_.LowerInlineAllocationLimit(
1542 new_space_.inline_allocation_limit_step());
1544 // Update how much has survived scavenge.
1545 IncrementYoungSurvivorsCounter(static_cast<int>(
1546 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1548 LOG(isolate_, ResourceEvent("scavenge", "end"));
1550 gc_state_ = NOT_IN_GC;
1554 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1556 MapWord first_word = HeapObject::cast(*p)->map_word();
1558 if (!first_word.IsForwardingAddress()) {
1559 // Unreachable external string can be finalized.
1560 heap->FinalizeExternalString(String::cast(*p));
1564 // String is still reachable.
1565 return String::cast(first_word.ToForwardingAddress());
1569 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1570 ExternalStringTableUpdaterCallback updater_func) {
1572 if (FLAG_verify_heap) {
1573 external_string_table_.Verify();
1577 if (external_string_table_.new_space_strings_.is_empty()) return;
1579 Object** start = &external_string_table_.new_space_strings_[0];
1580 Object** end = start + external_string_table_.new_space_strings_.length();
1581 Object** last = start;
1583 for (Object** p = start; p < end; ++p) {
1584 DCHECK(InFromSpace(*p));
1585 String* target = updater_func(this, p);
1587 if (target == NULL) continue;
1589 DCHECK(target->IsExternalString());
1591 if (InNewSpace(target)) {
1592 // String is still in new space. Update the table entry.
1596 // String got promoted. Move it to the old string list.
1597 external_string_table_.AddOldString(target);
1601 DCHECK(last <= end);
1602 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1606 void Heap::UpdateReferencesInExternalStringTable(
1607 ExternalStringTableUpdaterCallback updater_func) {
1608 // Update old space string references.
1609 if (external_string_table_.old_space_strings_.length() > 0) {
1610 Object** start = &external_string_table_.old_space_strings_[0];
1611 Object** end = start + external_string_table_.old_space_strings_.length();
1612 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1615 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1619 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1620 ProcessArrayBuffers(retainer);
1621 ProcessNativeContexts(retainer);
1622 // TODO(mvstanton): AllocationSites only need to be processed during
1623 // MARK_COMPACT, as they live in old space. Verify and address.
1624 ProcessAllocationSites(retainer);
1628 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1629 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1630 // Update the head of the list of contexts.
1631 set_native_contexts_list(head);
1635 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1636 Object* array_buffer_obj =
1637 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1638 set_array_buffers_list(array_buffer_obj);
1642 void Heap::TearDownArrayBuffers() {
1643 Object* undefined = undefined_value();
1644 for (Object* o = array_buffers_list(); o != undefined;) {
1645 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1646 Runtime::FreeArrayBuffer(isolate(), buffer);
1647 o = buffer->weak_next();
1649 set_array_buffers_list(undefined);
1653 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1654 Object* allocation_site_obj =
1655 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1656 set_allocation_sites_list(allocation_site_obj);
1660 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1661 DisallowHeapAllocation no_allocation_scope;
1662 Object* cur = allocation_sites_list();
1663 bool marked = false;
1664 while (cur->IsAllocationSite()) {
1665 AllocationSite* casted = AllocationSite::cast(cur);
1666 if (casted->GetPretenureMode() == flag) {
1667 casted->ResetPretenureDecision();
1668 casted->set_deopt_dependent_code(true);
1671 cur = casted->weak_next();
1673 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1677 void Heap::EvaluateOldSpaceLocalPretenuring(
1678 uint64_t size_of_objects_before_gc) {
1679 uint64_t size_of_objects_after_gc = SizeOfObjects();
1680 double old_generation_survival_rate =
1681 (static_cast<double>(size_of_objects_after_gc) * 100) /
1682 static_cast<double>(size_of_objects_before_gc);
1684 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1685 // Too many objects died in the old generation, pretenuring of wrong
1686 // allocation sites may be the cause for that. We have to deopt all
1687 // dependent code registered in the allocation sites to re-evaluate
1688 // our pretenuring decisions.
1689 ResetAllAllocationSitesDependentCode(TENURED);
1690 if (FLAG_trace_pretenuring) {
1692 "Deopt all allocation sites dependent code due to low survival "
1693 "rate in the old generation %f\n",
1694 old_generation_survival_rate);
1700 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1701 DisallowHeapAllocation no_allocation;
1702 // All external strings are listed in the external string table.
1704 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1706 explicit ExternalStringTableVisitorAdapter(
1707 v8::ExternalResourceVisitor* visitor)
1708 : visitor_(visitor) {}
1709 virtual void VisitPointers(Object** start, Object** end) {
1710 for (Object** p = start; p < end; p++) {
1711 DCHECK((*p)->IsExternalString());
1712 visitor_->VisitExternalString(
1713 Utils::ToLocal(Handle<String>(String::cast(*p))));
1718 v8::ExternalResourceVisitor* visitor_;
1719 } external_string_table_visitor(visitor);
1721 external_string_table_.Iterate(&external_string_table_visitor);
1725 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1727 static inline void VisitPointer(Heap* heap, Object** p) {
1728 Object* object = *p;
1729 if (!heap->InNewSpace(object)) return;
1730 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1731 reinterpret_cast<HeapObject*>(object));
1736 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1737 Address new_space_front) {
1739 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1740 // The addresses new_space_front and new_space_.top() define a
1741 // queue of unprocessed copied objects. Process them until the
1743 while (new_space_front != new_space_.top()) {
1744 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1745 HeapObject* object = HeapObject::FromAddress(new_space_front);
1747 NewSpaceScavenger::IterateBody(object->map(), object);
1750 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1754 // Promote and process all the to-be-promoted objects.
1756 StoreBufferRebuildScope scope(this, store_buffer(),
1757 &ScavengeStoreBufferCallback);
1758 while (!promotion_queue()->is_empty()) {
1761 promotion_queue()->remove(&target, &size);
1763 // Promoted object might be already partially visited
1764 // during old space pointer iteration. Thus we search specificly
1765 // for pointers to from semispace instead of looking for pointers
1767 DCHECK(!target->IsMap());
1768 IterateAndMarkPointersToFromSpace(
1769 target->address(), target->address() + size, &ScavengeObject);
1773 // Take another spin if there are now unswept objects in new space
1774 // (there are currently no more unswept promoted objects).
1775 } while (new_space_front != new_space_.top());
1777 return new_space_front;
1781 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1783 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1785 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1786 kDoubleAlignmentMask) == 0); // NOLINT
1789 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1792 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1794 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1795 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1796 return HeapObject::FromAddress(object->address() + kPointerSize);
1798 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1805 enum LoggingAndProfiling {
1806 LOGGING_AND_PROFILING_ENABLED,
1807 LOGGING_AND_PROFILING_DISABLED
1811 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1814 template <MarksHandling marks_handling,
1815 LoggingAndProfiling logging_and_profiling_mode>
1816 class ScavengingVisitor : public StaticVisitorBase {
1818 static void Initialize() {
1819 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1820 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1821 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1822 table_.Register(kVisitByteArray, &EvacuateByteArray);
1823 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1824 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1825 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1826 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1829 kVisitNativeContext,
1830 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1835 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1836 ConsString::kSize>);
1840 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1841 SlicedString::kSize>);
1845 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1849 kVisitSharedFunctionInfo,
1850 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1851 SharedFunctionInfo::kSize>);
1853 table_.Register(kVisitJSWeakCollection,
1854 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1856 table_.Register(kVisitJSArrayBuffer,
1857 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1859 table_.Register(kVisitJSTypedArray,
1860 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1862 table_.Register(kVisitJSDataView,
1863 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1865 table_.Register(kVisitJSRegExp,
1866 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1868 if (marks_handling == IGNORE_MARKS) {
1871 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1872 JSFunction::kSize>);
1874 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1877 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1878 kVisitDataObject, kVisitDataObjectGeneric>();
1880 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1881 kVisitJSObject, kVisitJSObjectGeneric>();
1883 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1884 kVisitStruct, kVisitStructGeneric>();
1887 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1892 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1894 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1895 bool should_record = false;
1897 should_record = FLAG_heap_stats;
1899 should_record = should_record || FLAG_log_gc;
1900 if (should_record) {
1901 if (heap->new_space()->Contains(obj)) {
1902 heap->new_space()->RecordAllocation(obj);
1904 heap->new_space()->RecordPromotion(obj);
1909 // Helper function used by CopyObject to copy a source object to an
1910 // allocated target object and update the forwarding pointer in the source
1911 // object. Returns the target object.
1912 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1913 HeapObject* target, int size)) {
1914 // If we migrate into to-space, then the to-space top pointer should be
1915 // right after the target object. Incorporate double alignment
1917 DCHECK(!heap->InToSpace(target) ||
1918 target->address() + size == heap->new_space()->top() ||
1919 target->address() + size + kPointerSize == heap->new_space()->top());
1921 // Make sure that we do not overwrite the promotion queue which is at
1922 // the end of to-space.
1923 DCHECK(!heap->InToSpace(target) ||
1924 heap->promotion_queue()->IsBelowPromotionQueue(
1925 heap->new_space()->top()));
1927 // Copy the content of source to target.
1928 heap->CopyBlock(target->address(), source->address(), size);
1930 // Set the forwarding address.
1931 source->set_map_word(MapWord::FromForwardingAddress(target));
1933 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1934 // Update NewSpace stats if necessary.
1935 RecordCopiedObject(heap, target);
1936 heap->OnMoveEvent(target, source, size);
1939 if (marks_handling == TRANSFER_MARKS) {
1940 if (Marking::TransferColor(source, target)) {
1941 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1946 template <int alignment>
1947 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1948 HeapObject* object, int object_size) {
1949 Heap* heap = map->GetHeap();
1951 int allocation_size = object_size;
1952 if (alignment != kObjectAlignment) {
1953 DCHECK(alignment == kDoubleAlignment);
1954 allocation_size += kPointerSize;
1957 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1958 AllocationResult allocation =
1959 heap->new_space()->AllocateRaw(allocation_size);
1961 HeapObject* target = NULL; // Initialization to please compiler.
1962 if (allocation.To(&target)) {
1963 // Order is important here: Set the promotion limit before storing a
1964 // filler for double alignment or migrating the object. Otherwise we
1965 // may end up overwriting promotion queue entries when we migrate the
1967 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1969 if (alignment != kObjectAlignment) {
1970 target = EnsureDoubleAligned(heap, target, allocation_size);
1973 // Order is important: slot might be inside of the target if target
1974 // was allocated over a dead object and slot comes from the store
1977 MigrateObject(heap, object, target, object_size);
1979 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1986 template <ObjectContents object_contents, int alignment>
1987 static inline bool PromoteObject(Map* map, HeapObject** slot,
1988 HeapObject* object, int object_size) {
1989 Heap* heap = map->GetHeap();
1991 int allocation_size = object_size;
1992 if (alignment != kObjectAlignment) {
1993 DCHECK(alignment == kDoubleAlignment);
1994 allocation_size += kPointerSize;
1997 AllocationResult allocation;
1998 if (object_contents == DATA_OBJECT) {
1999 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2000 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2002 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2003 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2006 HeapObject* target = NULL; // Initialization to please compiler.
2007 if (allocation.To(&target)) {
2008 if (alignment != kObjectAlignment) {
2009 target = EnsureDoubleAligned(heap, target, allocation_size);
2012 // Order is important: slot might be inside of the target if target
2013 // was allocated over a dead object and slot comes from the store
2016 MigrateObject(heap, object, target, object_size);
2018 if (object_contents == POINTER_OBJECT) {
2019 if (map->instance_type() == JS_FUNCTION_TYPE) {
2020 heap->promotion_queue()->insert(target,
2021 JSFunction::kNonWeakFieldsEndOffset);
2023 heap->promotion_queue()->insert(target, object_size);
2026 heap->IncrementPromotedObjectsSize(object_size);
2033 template <ObjectContents object_contents, int alignment>
2034 static inline void EvacuateObject(Map* map, HeapObject** slot,
2035 HeapObject* object, int object_size) {
2036 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2037 SLOW_DCHECK(object->Size() == object_size);
2038 Heap* heap = map->GetHeap();
2040 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2041 // A semi-space copy may fail due to fragmentation. In that case, we
2042 // try to promote the object.
2043 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2048 if (PromoteObject<object_contents, alignment>(map, slot, object,
2053 // If promotion failed, we try to copy the object to the other semi-space
2054 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2060 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2061 HeapObject* object) {
2062 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2063 JSFunction::kSize>(map, slot, object);
2065 MapWord map_word = object->map_word();
2066 DCHECK(map_word.IsForwardingAddress());
2067 HeapObject* target = map_word.ToForwardingAddress();
2069 MarkBit mark_bit = Marking::MarkBitFrom(target);
2070 if (Marking::IsBlack(mark_bit)) {
2071 // This object is black and it might not be rescanned by marker.
2072 // We should explicitly record code entry slot for compaction because
2073 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2074 // miss it as it is not HeapObject-tagged.
2075 Address code_entry_slot =
2076 target->address() + JSFunction::kCodeEntryOffset;
2077 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2078 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2079 code_entry_slot, code);
2084 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2085 HeapObject* object) {
2086 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2087 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2092 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2093 HeapObject* object) {
2094 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2095 int object_size = FixedDoubleArray::SizeFor(length);
2096 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2101 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2102 HeapObject* object) {
2103 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2104 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2109 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2110 HeapObject* object) {
2111 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2112 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2117 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2118 HeapObject* object) {
2119 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2120 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2125 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2126 HeapObject* object) {
2127 int object_size = SeqOneByteString::cast(object)
2128 ->SeqOneByteStringSize(map->instance_type());
2129 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2134 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2135 HeapObject* object) {
2136 int object_size = SeqTwoByteString::cast(object)
2137 ->SeqTwoByteStringSize(map->instance_type());
2138 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2143 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2144 HeapObject* object) {
2145 DCHECK(IsShortcutCandidate(map->instance_type()));
2147 Heap* heap = map->GetHeap();
2149 if (marks_handling == IGNORE_MARKS &&
2150 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2152 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2156 if (!heap->InNewSpace(first)) {
2157 object->set_map_word(MapWord::FromForwardingAddress(first));
2161 MapWord first_word = first->map_word();
2162 if (first_word.IsForwardingAddress()) {
2163 HeapObject* target = first_word.ToForwardingAddress();
2166 object->set_map_word(MapWord::FromForwardingAddress(target));
2170 heap->DoScavengeObject(first->map(), slot, first);
2171 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2175 int object_size = ConsString::kSize;
2176 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2180 template <ObjectContents object_contents>
2181 class ObjectEvacuationStrategy {
2183 template <int object_size>
2184 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2185 HeapObject* object) {
2186 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2190 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2191 int object_size = map->instance_size();
2192 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2197 static VisitorDispatchTable<ScavengingCallback> table_;
2201 template <MarksHandling marks_handling,
2202 LoggingAndProfiling logging_and_profiling_mode>
2203 VisitorDispatchTable<ScavengingCallback>
2204 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2207 static void InitializeScavengingVisitorsTables() {
2208 ScavengingVisitor<TRANSFER_MARKS,
2209 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2210 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2211 ScavengingVisitor<TRANSFER_MARKS,
2212 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2213 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2217 void Heap::SelectScavengingVisitorsTable() {
2218 bool logging_and_profiling =
2219 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2220 isolate()->cpu_profiler()->is_profiling() ||
2221 (isolate()->heap_profiler() != NULL &&
2222 isolate()->heap_profiler()->is_tracking_object_moves());
2224 if (!incremental_marking()->IsMarking()) {
2225 if (!logging_and_profiling) {
2226 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2227 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2229 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2230 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2233 if (!logging_and_profiling) {
2234 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2235 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2237 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2238 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2241 if (incremental_marking()->IsCompacting()) {
2242 // When compacting forbid short-circuiting of cons-strings.
2243 // Scavenging code relies on the fact that new space object
2244 // can't be evacuated into evacuation candidate but
2245 // short-circuiting violates this assumption.
2246 scavenging_visitors_table_.Register(
2247 StaticVisitorBase::kVisitShortcutCandidate,
2248 scavenging_visitors_table_.GetVisitorById(
2249 StaticVisitorBase::kVisitConsString));
2255 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2256 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2257 MapWord first_word = object->map_word();
2258 SLOW_DCHECK(!first_word.IsForwardingAddress());
2259 Map* map = first_word.ToMap();
2260 map->GetHeap()->DoScavengeObject(map, p, object);
2264 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2265 int instance_size) {
2267 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2268 if (!allocation.To(&result)) return allocation;
2270 // Map::cast cannot be used due to uninitialized map field.
2271 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2272 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2273 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2274 reinterpret_cast<Map*>(result)->set_visitor_id(
2275 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2276 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2277 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2278 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2279 reinterpret_cast<Map*>(result)->set_bit_field(0);
2280 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2281 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2282 Map::OwnsDescriptors::encode(true);
2283 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2288 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2290 ElementsKind elements_kind) {
2292 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2293 if (!allocation.To(&result)) return allocation;
2295 result->set_map_no_write_barrier(meta_map());
2296 Map* map = Map::cast(result);
2297 map->set_instance_type(instance_type);
2298 map->set_visitor_id(
2299 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2300 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2301 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2302 map->set_instance_size(instance_size);
2303 map->set_inobject_properties(0);
2304 map->set_pre_allocated_property_fields(0);
2305 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2306 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2307 SKIP_WRITE_BARRIER);
2308 map->init_back_pointer(undefined_value());
2309 map->set_unused_property_fields(0);
2310 map->set_instance_descriptors(empty_descriptor_array());
2311 map->set_bit_field(0);
2312 map->set_bit_field2(1 << Map::kIsExtensible);
2313 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2314 Map::OwnsDescriptors::encode(true);
2315 map->set_bit_field3(bit_field3);
2316 map->set_elements_kind(elements_kind);
2322 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2323 AllocationSpace space) {
2326 AllocationResult allocation = AllocateRaw(size, space, space);
2327 if (!allocation.To(&obj)) return allocation;
2330 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2331 DCHECK(chunk->owner()->identity() == space);
2333 CreateFillerObjectAt(obj->address(), size);
2338 const Heap::StringTypeTable Heap::string_type_table[] = {
2339 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2340 { type, size, k##camel_name##MapRootIndex } \
2342 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2343 #undef STRING_TYPE_ELEMENT
2347 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2348 #define CONSTANT_STRING_ELEMENT(name, contents) \
2349 { contents, k##name##RootIndex } \
2351 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2352 #undef CONSTANT_STRING_ELEMENT
2356 const Heap::StructTable Heap::struct_table[] = {
2357 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2358 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2360 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2361 #undef STRUCT_TABLE_ELEMENT
2365 bool Heap::CreateInitialMaps() {
2368 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2369 if (!allocation.To(&obj)) return false;
2371 // Map::cast cannot be used due to uninitialized map field.
2372 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2373 set_meta_map(new_meta_map);
2374 new_meta_map->set_map(new_meta_map);
2376 { // Partial map allocation
2377 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2380 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2381 set_##field_name##_map(map); \
2384 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2385 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2386 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2387 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2388 constant_pool_array);
2390 #undef ALLOCATE_PARTIAL_MAP
2393 // Allocate the empty array.
2395 AllocationResult allocation = AllocateEmptyFixedArray();
2396 if (!allocation.To(&obj)) return false;
2398 set_empty_fixed_array(FixedArray::cast(obj));
2401 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2402 if (!allocation.To(&obj)) return false;
2404 set_null_value(Oddball::cast(obj));
2405 Oddball::cast(obj)->set_kind(Oddball::kNull);
2408 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2409 if (!allocation.To(&obj)) return false;
2411 set_undefined_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2413 DCHECK(!InNewSpace(undefined_value()));
2415 // Set preliminary exception sentinel value before actually initializing it.
2416 set_exception(null_value());
2418 // Allocate the empty descriptor array.
2420 AllocationResult allocation = AllocateEmptyFixedArray();
2421 if (!allocation.To(&obj)) return false;
2423 set_empty_descriptor_array(DescriptorArray::cast(obj));
2425 // Allocate the constant pool array.
2427 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2428 if (!allocation.To(&obj)) return false;
2430 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2432 // Fix the instance_descriptors for the existing maps.
2433 meta_map()->set_code_cache(empty_fixed_array());
2434 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2435 meta_map()->init_back_pointer(undefined_value());
2436 meta_map()->set_instance_descriptors(empty_descriptor_array());
2438 fixed_array_map()->set_code_cache(empty_fixed_array());
2439 fixed_array_map()->set_dependent_code(
2440 DependentCode::cast(empty_fixed_array()));
2441 fixed_array_map()->init_back_pointer(undefined_value());
2442 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2444 undefined_map()->set_code_cache(empty_fixed_array());
2445 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2446 undefined_map()->init_back_pointer(undefined_value());
2447 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2449 null_map()->set_code_cache(empty_fixed_array());
2450 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2451 null_map()->init_back_pointer(undefined_value());
2452 null_map()->set_instance_descriptors(empty_descriptor_array());
2454 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2455 constant_pool_array_map()->set_dependent_code(
2456 DependentCode::cast(empty_fixed_array()));
2457 constant_pool_array_map()->init_back_pointer(undefined_value());
2458 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2460 // Fix prototype object for existing maps.
2461 meta_map()->set_prototype(null_value());
2462 meta_map()->set_constructor(null_value());
2464 fixed_array_map()->set_prototype(null_value());
2465 fixed_array_map()->set_constructor(null_value());
2467 undefined_map()->set_prototype(null_value());
2468 undefined_map()->set_constructor(null_value());
2470 null_map()->set_prototype(null_value());
2471 null_map()->set_constructor(null_value());
2473 constant_pool_array_map()->set_prototype(null_value());
2474 constant_pool_array_map()->set_constructor(null_value());
2477 #define ALLOCATE_MAP(instance_type, size, field_name) \
2480 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2481 set_##field_name##_map(map); \
2484 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2485 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2487 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2488 DCHECK(fixed_array_map() != fixed_cow_array_map());
2490 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2491 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2492 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2493 mutable_heap_number)
2494 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2495 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2497 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2498 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2503 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2505 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2506 const StringTypeTable& entry = string_type_table[i];
2508 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2509 if (!allocation.To(&obj)) return false;
2511 // Mark cons string maps as unstable, because their objects can change
2513 Map* map = Map::cast(obj);
2514 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2515 roots_[entry.index] = map;
2518 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2519 undetectable_string_map()->set_is_undetectable();
2521 ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2522 undetectable_one_byte_string_map()->set_is_undetectable();
2524 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2525 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2526 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2528 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2529 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2530 external_##type##_array)
2532 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2533 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2535 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2538 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2539 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2541 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2543 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2545 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2546 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2547 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2548 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2551 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2552 const StructTable& entry = struct_table[i];
2554 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2555 roots_[entry.index] = map;
2558 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569 native_context_map()->set_dictionary_map(true);
2570 native_context_map()->set_visitor_id(
2571 StaticVisitorBase::kVisitNativeContext);
2573 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574 shared_function_info)
2576 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2577 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2578 external_map()->set_is_extensible(false);
2579 #undef ALLOCATE_VARSIZE_MAP
2585 ByteArray* byte_array;
2586 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2587 set_empty_byte_array(byte_array);
2590 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2592 ExternalArray* obj; \
2593 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2595 set_empty_external_##type##_array(obj); \
2598 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2603 FixedTypedArrayBase* obj; \
2604 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2606 set_empty_fixed_##type##_array(obj); \
2609 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2610 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2612 DCHECK(!InNewSpace(empty_fixed_array()));
2617 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2618 PretenureFlag pretenure) {
2619 // Statically ensure that it is safe to allocate heap numbers in paged
2621 int size = HeapNumber::kSize;
2622 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2624 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2628 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2629 if (!allocation.To(&result)) return allocation;
2632 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2633 HeapObject::cast(result)->set_map_no_write_barrier(map);
2634 HeapNumber::cast(result)->set_value(value);
2639 AllocationResult Heap::AllocateCell(Object* value) {
2640 int size = Cell::kSize;
2641 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2645 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2646 if (!allocation.To(&result)) return allocation;
2648 result->set_map_no_write_barrier(cell_map());
2649 Cell::cast(result)->set_value(value);
2654 AllocationResult Heap::AllocatePropertyCell() {
2655 int size = PropertyCell::kSize;
2656 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2659 AllocationResult allocation =
2660 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2661 if (!allocation.To(&result)) return allocation;
2663 result->set_map_no_write_barrier(global_property_cell_map());
2664 PropertyCell* cell = PropertyCell::cast(result);
2665 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2666 SKIP_WRITE_BARRIER);
2667 cell->set_value(the_hole_value());
2668 cell->set_type(HeapType::None());
2673 void Heap::CreateApiObjects() {
2674 HandleScope scope(isolate());
2675 Factory* factory = isolate()->factory();
2676 Handle<Map> new_neander_map =
2677 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2679 // Don't use Smi-only elements optimizations for objects with the neander
2680 // map. There are too many cases where element values are set directly with a
2681 // bottleneck to trap the Smi-only -> fast elements transition, and there
2682 // appears to be no benefit for optimize this case.
2683 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2684 set_neander_map(*new_neander_map);
2686 Handle<JSObject> listeners = factory->NewNeanderObject();
2687 Handle<FixedArray> elements = factory->NewFixedArray(2);
2688 elements->set(0, Smi::FromInt(0));
2689 listeners->set_elements(*elements);
2690 set_message_listeners(*listeners);
2694 void Heap::CreateJSEntryStub() {
2695 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2696 set_js_entry_code(*stub.GetCode());
2700 void Heap::CreateJSConstructEntryStub() {
2701 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2702 set_js_construct_entry_code(*stub.GetCode());
2706 void Heap::CreateFixedStubs() {
2707 // Here we create roots for fixed stubs. They are needed at GC
2708 // for cooking and uncooking (check out frames.cc).
2709 // The eliminates the need for doing dictionary lookup in the
2710 // stub cache for these stubs.
2711 HandleScope scope(isolate());
2713 // Create stubs that should be there, so we don't unexpectedly have to
2714 // create them if we need them during the creation of another stub.
2715 // Stub creation mixes raw pointers and handles in an unsafe manner so
2716 // we cannot create stubs while we are creating stubs.
2717 CodeStub::GenerateStubsAheadOfTime(isolate());
2719 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2720 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2723 // gcc-4.4 has problem generating correct code of following snippet:
2724 // { JSEntryStub stub;
2725 // js_entry_code_ = *stub.GetCode();
2727 // { JSConstructEntryStub stub;
2728 // js_construct_entry_code_ = *stub.GetCode();
2730 // To workaround the problem, make separate functions without inlining.
2731 Heap::CreateJSEntryStub();
2732 Heap::CreateJSConstructEntryStub();
2736 void Heap::CreateInitialObjects() {
2737 HandleScope scope(isolate());
2738 Factory* factory = isolate()->factory();
2740 // The -0 value must be set before NewNumber works.
2741 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2742 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2745 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2746 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2748 // The hole has not been created yet, but we want to put something
2749 // predictable in the gaps in the string table, so lets make that Smi zero.
2750 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2752 // Allocate initial string table.
2753 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2755 // Finish initializing oddballs after creating the string table.
2756 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2757 factory->nan_value(), Oddball::kUndefined);
2759 // Initialize the null_value.
2760 Oddball::Initialize(isolate(), factory->null_value(), "null",
2761 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2763 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2764 handle(Smi::FromInt(1), isolate()),
2767 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2768 handle(Smi::FromInt(0), isolate()),
2771 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2772 handle(Smi::FromInt(-1), isolate()),
2773 Oddball::kTheHole));
2775 set_uninitialized_value(*factory->NewOddball(
2776 factory->uninitialized_map(), "uninitialized",
2777 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2779 set_arguments_marker(*factory->NewOddball(
2780 factory->arguments_marker_map(), "arguments_marker",
2781 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2783 set_no_interceptor_result_sentinel(*factory->NewOddball(
2784 factory->no_interceptor_result_sentinel_map(),
2785 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2788 set_termination_exception(*factory->NewOddball(
2789 factory->termination_exception_map(), "termination_exception",
2790 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2792 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2793 handle(Smi::FromInt(-5), isolate()),
2794 Oddball::kException));
2796 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2797 Handle<String> str =
2798 factory->InternalizeUtf8String(constant_string_table[i].contents);
2799 roots_[constant_string_table[i].index] = *str;
2802 // Allocate the hidden string which is used to identify the hidden properties
2803 // in JSObjects. The hash code has a special value so that it will not match
2804 // the empty string when searching for the property. It cannot be part of the
2805 // loop above because it needs to be allocated manually with the special
2806 // hash code in place. The hash code for the hidden_string is zero to ensure
2807 // that it will always be at the first entry in property descriptors.
2808 hidden_string_ = *factory->NewOneByteInternalizedString(
2809 OneByteVector("", 0), String::kEmptyStringHash);
2811 // Create the code_stubs dictionary. The initial size is set to avoid
2812 // expanding the dictionary during bootstrapping.
2813 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2815 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2816 // is set to avoid expanding the dictionary during bootstrapping.
2817 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2819 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2820 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2822 set_instanceof_cache_function(Smi::FromInt(0));
2823 set_instanceof_cache_map(Smi::FromInt(0));
2824 set_instanceof_cache_answer(Smi::FromInt(0));
2828 // Allocate the dictionary of intrinsic function names.
2829 Handle<NameDictionary> intrinsic_names =
2830 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2831 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2832 set_intrinsic_function_names(*intrinsic_names);
2834 set_number_string_cache(
2835 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2837 // Allocate cache for single character one byte strings.
2838 set_single_character_string_cache(
2839 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2841 // Allocate cache for string split and regexp-multiple.
2842 set_string_split_cache(*factory->NewFixedArray(
2843 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2844 set_regexp_multiple_cache(*factory->NewFixedArray(
2845 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2847 // Allocate cache for external strings pointing to native source code.
2848 set_natives_source_cache(
2849 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2851 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2853 // The symbol registry is initialized lazily.
2854 set_symbol_registry(undefined_value());
2856 // Allocate object to hold object observation state.
2857 set_observation_state(*factory->NewJSObjectFromMap(
2858 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2860 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2861 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2862 set_microtask_queue(empty_fixed_array());
2864 set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2865 set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2866 set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2867 set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2868 set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2869 set_generic_symbol(*factory->NewPrivateOwnSymbol());
2870 set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2871 set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2872 set_observed_symbol(*factory->NewPrivateOwnSymbol());
2873 set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2874 set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2875 set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2877 Handle<SeededNumberDictionary> slow_element_dictionary =
2878 SeededNumberDictionary::New(isolate(), 0, TENURED);
2879 slow_element_dictionary->set_requires_slow_elements();
2880 set_empty_slow_element_dictionary(*slow_element_dictionary);
2882 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2884 // Handling of script id generation is in Factory::NewScript.
2885 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2887 set_allocation_sites_scratchpad(
2888 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2889 InitializeAllocationSitesScratchpad();
2891 // Initialize keyed lookup cache.
2892 isolate_->keyed_lookup_cache()->Clear();
2894 // Initialize context slot cache.
2895 isolate_->context_slot_cache()->Clear();
2897 // Initialize descriptor cache.
2898 isolate_->descriptor_lookup_cache()->Clear();
2900 // Initialize compilation cache.
2901 isolate_->compilation_cache()->Clear();
2905 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2906 RootListIndex writable_roots[] = {
2907 kStoreBufferTopRootIndex,
2908 kStackLimitRootIndex,
2909 kNumberStringCacheRootIndex,
2910 kInstanceofCacheFunctionRootIndex,
2911 kInstanceofCacheMapRootIndex,
2912 kInstanceofCacheAnswerRootIndex,
2913 kCodeStubsRootIndex,
2914 kNonMonomorphicCacheRootIndex,
2915 kPolymorphicCodeCacheRootIndex,
2916 kLastScriptIdRootIndex,
2917 kEmptyScriptRootIndex,
2918 kRealStackLimitRootIndex,
2919 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2920 kConstructStubDeoptPCOffsetRootIndex,
2921 kGetterStubDeoptPCOffsetRootIndex,
2922 kSetterStubDeoptPCOffsetRootIndex,
2923 kStringTableRootIndex,
2926 for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2927 if (root_index == writable_roots[i]) return true;
2933 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2934 return !RootCanBeWrittenAfterInitialization(root_index) &&
2935 !InNewSpace(roots_array_start()[root_index]);
2939 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2940 Object* key_pattern, ResultsCacheType type) {
2942 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2943 if (type == STRING_SPLIT_SUBSTRINGS) {
2944 DCHECK(key_pattern->IsString());
2945 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2946 cache = heap->string_split_cache();
2948 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2949 DCHECK(key_pattern->IsFixedArray());
2950 cache = heap->regexp_multiple_cache();
2953 uint32_t hash = key_string->Hash();
2954 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2955 ~(kArrayEntriesPerCacheEntry - 1));
2956 if (cache->get(index + kStringOffset) == key_string &&
2957 cache->get(index + kPatternOffset) == key_pattern) {
2958 return cache->get(index + kArrayOffset);
2961 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2962 if (cache->get(index + kStringOffset) == key_string &&
2963 cache->get(index + kPatternOffset) == key_pattern) {
2964 return cache->get(index + kArrayOffset);
2966 return Smi::FromInt(0);
2970 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
2971 Handle<Object> key_pattern,
2972 Handle<FixedArray> value_array,
2973 ResultsCacheType type) {
2974 Factory* factory = isolate->factory();
2975 Handle<FixedArray> cache;
2976 if (!key_string->IsInternalizedString()) return;
2977 if (type == STRING_SPLIT_SUBSTRINGS) {
2978 DCHECK(key_pattern->IsString());
2979 if (!key_pattern->IsInternalizedString()) return;
2980 cache = factory->string_split_cache();
2982 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2983 DCHECK(key_pattern->IsFixedArray());
2984 cache = factory->regexp_multiple_cache();
2987 uint32_t hash = key_string->Hash();
2988 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2989 ~(kArrayEntriesPerCacheEntry - 1));
2990 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2991 cache->set(index + kStringOffset, *key_string);
2992 cache->set(index + kPatternOffset, *key_pattern);
2993 cache->set(index + kArrayOffset, *value_array);
2996 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2997 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2998 cache->set(index2 + kStringOffset, *key_string);
2999 cache->set(index2 + kPatternOffset, *key_pattern);
3000 cache->set(index2 + kArrayOffset, *value_array);
3002 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3003 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3004 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3005 cache->set(index + kStringOffset, *key_string);
3006 cache->set(index + kPatternOffset, *key_pattern);
3007 cache->set(index + kArrayOffset, *value_array);
3010 // If the array is a reasonably short list of substrings, convert it into a
3011 // list of internalized strings.
3012 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3013 for (int i = 0; i < value_array->length(); i++) {
3014 Handle<String> str(String::cast(value_array->get(i)), isolate);
3015 Handle<String> internalized_str = factory->InternalizeString(str);
3016 value_array->set(i, *internalized_str);
3019 // Convert backing store to a copy-on-write array.
3020 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3024 void RegExpResultsCache::Clear(FixedArray* cache) {
3025 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3026 cache->set(i, Smi::FromInt(0));
3031 int Heap::FullSizeNumberStringCacheLength() {
3032 // Compute the size of the number string cache based on the max newspace size.
3033 // The number string cache has a minimum size based on twice the initial cache
3034 // size to ensure that it is bigger after being made 'full size'.
3035 int number_string_cache_size = max_semi_space_size_ / 512;
3036 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3037 Min(0x4000, number_string_cache_size));
3038 // There is a string and a number per entry so the length is twice the number
3040 return number_string_cache_size * 2;
3044 void Heap::FlushNumberStringCache() {
3045 // Flush the number to string cache.
3046 int len = number_string_cache()->length();
3047 for (int i = 0; i < len; i++) {
3048 number_string_cache()->set_undefined(i);
3053 void Heap::FlushAllocationSitesScratchpad() {
3054 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3055 allocation_sites_scratchpad()->set_undefined(i);
3057 allocation_sites_scratchpad_length_ = 0;
3061 void Heap::InitializeAllocationSitesScratchpad() {
3062 DCHECK(allocation_sites_scratchpad()->length() ==
3063 kAllocationSiteScratchpadSize);
3064 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3065 allocation_sites_scratchpad()->set_undefined(i);
3070 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3071 ScratchpadSlotMode mode) {
3072 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3073 // We cannot use the normal write-barrier because slots need to be
3074 // recorded with non-incremental marking as well. We have to explicitly
3075 // record the slot to take evacuation candidates into account.
3076 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3077 site, SKIP_WRITE_BARRIER);
3078 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3079 allocation_sites_scratchpad_length_);
3081 if (mode == RECORD_SCRATCHPAD_SLOT) {
3082 // We need to allow slots buffer overflow here since the evacuation
3083 // candidates are not part of the global list of old space pages and
3084 // releasing an evacuation candidate due to a slots buffer overflow
3085 // results in lost pages.
3086 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3087 SlotsBuffer::IGNORE_OVERFLOW);
3089 allocation_sites_scratchpad_length_++;
3094 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3095 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3099 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3100 ExternalArrayType array_type) {
3101 switch (array_type) {
3102 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3103 case kExternal##Type##Array: \
3104 return kExternal##Type##ArrayMapRootIndex;
3106 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3107 #undef ARRAY_TYPE_TO_ROOT_INDEX
3111 return kUndefinedValueRootIndex;
3116 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3117 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3121 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3122 ExternalArrayType array_type) {
3123 switch (array_type) {
3124 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3125 case kExternal##Type##Array: \
3126 return kFixed##Type##ArrayMapRootIndex;
3128 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3129 #undef ARRAY_TYPE_TO_ROOT_INDEX
3133 return kUndefinedValueRootIndex;
3138 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3139 ElementsKind elementsKind) {
3140 switch (elementsKind) {
3141 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3142 case EXTERNAL_##TYPE##_ELEMENTS: \
3143 return kEmptyExternal##Type##ArrayRootIndex;
3145 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3146 #undef ELEMENT_KIND_TO_ROOT_INDEX
3150 return kUndefinedValueRootIndex;
3155 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3156 ElementsKind elementsKind) {
3157 switch (elementsKind) {
3158 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3159 case TYPE##_ELEMENTS: \
3160 return kEmptyFixed##Type##ArrayRootIndex;
3162 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3163 #undef ELEMENT_KIND_TO_ROOT_INDEX
3166 return kUndefinedValueRootIndex;
3171 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3172 return ExternalArray::cast(
3173 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3177 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3178 return FixedTypedArrayBase::cast(
3179 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3183 AllocationResult Heap::AllocateForeign(Address address,
3184 PretenureFlag pretenure) {
3185 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3186 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3187 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3189 AllocationResult allocation = Allocate(foreign_map(), space);
3190 if (!allocation.To(&result)) return allocation;
3191 result->set_foreign_address(address);
3196 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3197 if (length < 0 || length > ByteArray::kMaxLength) {
3198 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3200 int size = ByteArray::SizeFor(length);
3201 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3204 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3205 if (!allocation.To(&result)) return allocation;
3208 result->set_map_no_write_barrier(byte_array_map());
3209 ByteArray::cast(result)->set_length(length);
3214 void Heap::CreateFillerObjectAt(Address addr, int size) {
3215 if (size == 0) return;
3216 HeapObject* filler = HeapObject::FromAddress(addr);
3217 if (size == kPointerSize) {
3218 filler->set_map_no_write_barrier(one_pointer_filler_map());
3219 } else if (size == 2 * kPointerSize) {
3220 filler->set_map_no_write_barrier(two_pointer_filler_map());
3222 filler->set_map_no_write_barrier(free_space_map());
3223 FreeSpace::cast(filler)->set_size(size);
3228 bool Heap::CanMoveObjectStart(HeapObject* object) {
3229 Address address = object->address();
3230 bool is_in_old_pointer_space = InOldPointerSpace(address);
3231 bool is_in_old_data_space = InOldDataSpace(address);
3233 if (lo_space()->Contains(object)) return false;
3235 Page* page = Page::FromAddress(address);
3236 // We can move the object start if:
3237 // (1) the object is not in old pointer or old data space,
3238 // (2) the page of the object was already swept,
3239 // (3) the page was already concurrently swept. This case is an optimization
3240 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3241 // pages is set after sweeping all pages.
3242 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3243 page->WasSwept() || page->SweepingCompleted();
3247 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3248 if (incremental_marking()->IsMarking() &&
3249 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3250 if (mode == FROM_GC) {
3251 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3253 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3259 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3260 int elements_to_trim) {
3261 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3262 const int bytes_to_trim = elements_to_trim * element_size;
3263 Map* map = object->map();
3265 // For now this trick is only applied to objects in new and paged space.
3266 // In large object space the object's start must coincide with chunk
3267 // and thus the trick is just not applicable.
3268 DCHECK(!lo_space()->Contains(object));
3269 DCHECK(object->map() != fixed_cow_array_map());
3271 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3272 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3273 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3275 const int len = object->length();
3276 DCHECK(elements_to_trim <= len);
3278 // Calculate location of new array start.
3279 Address new_start = object->address() + bytes_to_trim;
3281 // Technically in new space this write might be omitted (except for
3282 // debug mode which iterates through the heap), but to play safer
3284 CreateFillerObjectAt(object->address(), bytes_to_trim);
3286 // Initialize header of the trimmed array. Since left trimming is only
3287 // performed on pages which are not concurrently swept creating a filler
3288 // object does not require synchronization.
3289 DCHECK(CanMoveObjectStart(object));
3290 Object** former_start = HeapObject::RawField(object, 0);
3291 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3292 former_start[new_start_index] = map;
3293 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3294 FixedArrayBase* new_object =
3295 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3297 // Maintain consistency of live bytes during incremental marking
3298 marking()->TransferMark(object->address(), new_start);
3299 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3301 // Notify the heap profiler of change in object layout.
3302 OnMoveEvent(new_object, object, new_object->Size());
3307 // Force instantiation of templatized method.
3309 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3311 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3314 template<Heap::InvocationMode mode>
3315 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3316 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3317 const int bytes_to_trim = elements_to_trim * element_size;
3319 // For now this trick is only applied to objects in new and paged space.
3320 DCHECK(object->map() != fixed_cow_array_map());
3322 const int len = object->length();
3323 DCHECK(elements_to_trim < len);
3325 // Calculate location of new array end.
3326 Address new_end = object->address() + object->Size() - bytes_to_trim;
3328 // Technically in new space this write might be omitted (except for
3329 // debug mode which iterates through the heap), but to play safer
3331 // We do not create a filler for objects in large object space.
3332 // TODO(hpayer): We should shrink the large object page if the size
3333 // of the object changed significantly.
3334 if (!lo_space()->Contains(object)) {
3335 CreateFillerObjectAt(new_end, bytes_to_trim);
3338 // Initialize header of the trimmed array. We are storing the new length
3339 // using release store after creating a filler for the left-over space to
3340 // avoid races with the sweeper thread.
3341 object->synchronized_set_length(len - elements_to_trim);
3343 // Maintain consistency of live bytes during incremental marking
3344 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3346 // Notify the heap profiler of change in object layout. The array may not be
3347 // moved during GC, and size has to be adjusted nevertheless.
3348 HeapProfiler* profiler = isolate()->heap_profiler();
3349 if (profiler->is_tracking_allocations()) {
3350 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3355 AllocationResult Heap::AllocateExternalArray(int length,
3356 ExternalArrayType array_type,
3357 void* external_pointer,
3358 PretenureFlag pretenure) {
3359 int size = ExternalArray::kAlignedSize;
3360 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3363 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3364 if (!allocation.To(&result)) return allocation;
3367 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3368 ExternalArray::cast(result)->set_length(length);
3369 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3373 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3374 ElementsKind* element_kind) {
3375 switch (array_type) {
3376 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3377 case kExternal##Type##Array: \
3378 *element_size = size; \
3379 *element_kind = TYPE##_ELEMENTS; \
3382 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3383 #undef TYPED_ARRAY_CASE
3386 *element_size = 0; // Bogus
3387 *element_kind = UINT8_ELEMENTS; // Bogus
3393 AllocationResult Heap::AllocateFixedTypedArray(int length,
3394 ExternalArrayType array_type,
3395 PretenureFlag pretenure) {
3397 ElementsKind elements_kind;
3398 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3399 int size = OBJECT_POINTER_ALIGN(length * element_size +
3400 FixedTypedArrayBase::kDataOffset);
3401 #ifndef V8_HOST_ARCH_64_BIT
3402 if (array_type == kExternalFloat64Array) {
3403 size += kPointerSize;
3406 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3409 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3410 if (!allocation.To(&object)) return allocation;
3412 if (array_type == kExternalFloat64Array) {
3413 object = EnsureDoubleAligned(this, object, size);
3416 object->set_map(MapForFixedTypedArray(array_type));
3417 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3418 elements->set_length(length);
3419 memset(elements->DataPtr(), 0, elements->DataSize());
3424 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3425 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3426 AllocationResult allocation =
3427 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3430 if (!allocation.To(&result)) return allocation;
3433 Address address = result->address();
3434 // Code objects which should stay at a fixed address are allocated either
3435 // in the first page of code space (objects on the first page of each space
3436 // are never moved) or in large object space.
3437 if (!code_space_->FirstPage()->Contains(address) &&
3438 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3439 // Discard the first code allocation, which was on a page where it could
3441 CreateFillerObjectAt(result->address(), object_size);
3442 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3443 if (!allocation.To(&result)) return allocation;
3444 OnAllocationEvent(result, object_size);
3448 result->set_map_no_write_barrier(code_map());
3449 Code* code = Code::cast(result);
3450 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3451 isolate_->code_range()->contains(code->address()));
3452 code->set_gc_metadata(Smi::FromInt(0));
3453 code->set_ic_age(global_ic_age_);
3458 AllocationResult Heap::CopyCode(Code* code) {
3459 AllocationResult allocation;
3460 HeapObject* new_constant_pool;
3461 if (FLAG_enable_ool_constant_pool &&
3462 code->constant_pool() != empty_constant_pool_array()) {
3463 // Copy the constant pool, since edits to the copied code may modify
3464 // the constant pool.
3465 allocation = CopyConstantPoolArray(code->constant_pool());
3466 if (!allocation.To(&new_constant_pool)) return allocation;
3468 new_constant_pool = empty_constant_pool_array();
3472 // Allocate an object the same size as the code object.
3473 int obj_size = code->Size();
3474 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3475 if (!allocation.To(&result)) return allocation;
3477 // Copy code object.
3478 Address old_addr = code->address();
3479 Address new_addr = result->address();
3480 CopyBlock(new_addr, old_addr, obj_size);
3481 Code* new_code = Code::cast(result);
3483 // Update the constant pool.
3484 new_code->set_constant_pool(new_constant_pool);
3486 // Relocate the copy.
3487 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3488 isolate_->code_range()->contains(code->address()));
3489 new_code->Relocate(new_addr - old_addr);
3494 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3495 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3496 // do not risk leaving uninitialized Code object (and breaking the heap).
3497 ByteArray* reloc_info_array;
3499 AllocationResult allocation =
3500 AllocateByteArray(reloc_info.length(), TENURED);
3501 if (!allocation.To(&reloc_info_array)) return allocation;
3503 HeapObject* new_constant_pool;
3504 if (FLAG_enable_ool_constant_pool &&
3505 code->constant_pool() != empty_constant_pool_array()) {
3506 // Copy the constant pool, since edits to the copied code may modify
3507 // the constant pool.
3508 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3509 if (!allocation.To(&new_constant_pool)) return allocation;
3511 new_constant_pool = empty_constant_pool_array();
3514 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3516 int new_obj_size = Code::SizeFor(new_body_size);
3518 Address old_addr = code->address();
3520 size_t relocation_offset =
3521 static_cast<size_t>(code->instruction_end() - old_addr);
3524 AllocationResult allocation =
3525 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3526 if (!allocation.To(&result)) return allocation;
3528 // Copy code object.
3529 Address new_addr = result->address();
3531 // Copy header and instructions.
3532 CopyBytes(new_addr, old_addr, relocation_offset);
3534 Code* new_code = Code::cast(result);
3535 new_code->set_relocation_info(reloc_info_array);
3537 // Update constant pool.
3538 new_code->set_constant_pool(new_constant_pool);
3540 // Copy patched rinfo.
3541 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3542 static_cast<size_t>(reloc_info.length()));
3544 // Relocate the copy.
3545 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3546 isolate_->code_range()->contains(code->address()));
3547 new_code->Relocate(new_addr - old_addr);
3550 if (FLAG_verify_heap) code->ObjectVerify();
3556 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3557 AllocationSite* allocation_site) {
3558 memento->set_map_no_write_barrier(allocation_memento_map());
3559 DCHECK(allocation_site->map() == allocation_site_map());
3560 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3561 if (FLAG_allocation_site_pretenuring) {
3562 allocation_site->IncrementMementoCreateCount();
3567 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3568 AllocationSite* allocation_site) {
3569 DCHECK(gc_state_ == NOT_IN_GC);
3570 DCHECK(map->instance_type() != MAP_TYPE);
3571 // If allocation failures are disallowed, we may allocate in a different
3572 // space when new space is full and the object is not a large object.
3573 AllocationSpace retry_space =
3574 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3575 int size = map->instance_size();
3576 if (allocation_site != NULL) {
3577 size += AllocationMemento::kSize;
3580 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3581 if (!allocation.To(&result)) return allocation;
3582 // No need for write barrier since object is white and map is in old space.
3583 result->set_map_no_write_barrier(map);
3584 if (allocation_site != NULL) {
3585 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3586 reinterpret_cast<Address>(result) + map->instance_size());
3587 InitializeAllocationMemento(alloc_memento, allocation_site);
3593 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3595 obj->set_properties(properties);
3596 obj->initialize_elements();
3597 // TODO(1240798): Initialize the object's body using valid initial values
3598 // according to the object's initial map. For example, if the map's
3599 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3600 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3601 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3602 // verification code has to cope with (temporarily) invalid objects. See
3603 // for example, JSArray::JSArrayVerify).
3605 // We cannot always fill with one_pointer_filler_map because objects
3606 // created from API functions expect their internal fields to be initialized
3607 // with undefined_value.
3608 // Pre-allocated fields need to be initialized with undefined_value as well
3609 // so that object accesses before the constructor completes (e.g. in the
3610 // debugger) will not cause a crash.
3611 if (map->constructor()->IsJSFunction() &&
3612 JSFunction::cast(map->constructor())
3613 ->IsInobjectSlackTrackingInProgress()) {
3614 // We might want to shrink the object later.
3615 DCHECK(obj->GetInternalFieldCount() == 0);
3616 filler = Heap::one_pointer_filler_map();
3618 filler = Heap::undefined_value();
3620 obj->InitializeBody(map, Heap::undefined_value(), filler);
3624 AllocationResult Heap::AllocateJSObjectFromMap(
3625 Map* map, PretenureFlag pretenure, bool allocate_properties,
3626 AllocationSite* allocation_site) {
3627 // JSFunctions should be allocated using AllocateFunction to be
3628 // properly initialized.
3629 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3631 // Both types of global objects should be allocated using
3632 // AllocateGlobalObject to be properly initialized.
3633 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3634 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3636 // Allocate the backing storage for the properties.
3637 FixedArray* properties;
3638 if (allocate_properties) {
3639 int prop_size = map->InitialPropertiesLength();
3640 DCHECK(prop_size >= 0);
3642 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3643 if (!allocation.To(&properties)) return allocation;
3646 properties = empty_fixed_array();
3649 // Allocate the JSObject.
3650 int size = map->instance_size();
3651 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3653 AllocationResult allocation = Allocate(map, space, allocation_site);
3654 if (!allocation.To(&js_obj)) return allocation;
3656 // Initialize the JSObject.
3657 InitializeJSObjectFromMap(js_obj, properties, map);
3658 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3659 js_obj->HasFixedTypedArrayElements());
3664 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3665 PretenureFlag pretenure,
3666 AllocationSite* allocation_site) {
3667 DCHECK(constructor->has_initial_map());
3669 // Allocate the object based on the constructors initial map.
3670 AllocationResult allocation = AllocateJSObjectFromMap(
3671 constructor->initial_map(), pretenure, true, allocation_site);
3673 // Make sure result is NOT a global object if valid.
3675 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3681 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3682 // Never used to copy functions. If functions need to be copied we
3683 // have to be careful to clear the literals array.
3684 SLOW_DCHECK(!source->IsJSFunction());
3687 Map* map = source->map();
3688 int object_size = map->instance_size();
3691 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3693 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3695 // If we're forced to always allocate, we use the general allocation
3696 // functions which may leave us with an object in old space.
3697 if (always_allocate()) {
3699 AllocationResult allocation =
3700 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3701 if (!allocation.To(&clone)) return allocation;
3703 Address clone_address = clone->address();
3704 CopyBlock(clone_address, source->address(), object_size);
3705 // Update write barrier for all fields that lie beyond the header.
3706 RecordWrites(clone_address, JSObject::kHeaderSize,
3707 (object_size - JSObject::kHeaderSize) / kPointerSize);
3709 wb_mode = SKIP_WRITE_BARRIER;
3712 int adjusted_object_size =
3713 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3714 AllocationResult allocation =
3715 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3716 if (!allocation.To(&clone)) return allocation;
3718 SLOW_DCHECK(InNewSpace(clone));
3719 // Since we know the clone is allocated in new space, we can copy
3720 // the contents without worrying about updating the write barrier.
3721 CopyBlock(clone->address(), source->address(), object_size);
3724 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3725 reinterpret_cast<Address>(clone) + object_size);
3726 InitializeAllocationMemento(alloc_memento, site);
3730 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3731 source->GetElementsKind());
3732 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3733 FixedArray* properties = FixedArray::cast(source->properties());
3734 // Update elements if necessary.
3735 if (elements->length() > 0) {
3736 FixedArrayBase* elem;
3738 AllocationResult allocation;
3739 if (elements->map() == fixed_cow_array_map()) {
3740 allocation = FixedArray::cast(elements);
3741 } else if (source->HasFastDoubleElements()) {
3742 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3744 allocation = CopyFixedArray(FixedArray::cast(elements));
3746 if (!allocation.To(&elem)) return allocation;
3748 JSObject::cast(clone)->set_elements(elem, wb_mode);
3750 // Update properties if necessary.
3751 if (properties->length() > 0) {
3754 AllocationResult allocation = CopyFixedArray(properties);
3755 if (!allocation.To(&prop)) return allocation;
3757 JSObject::cast(clone)->set_properties(prop, wb_mode);
3759 // Return the new clone.
3764 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3766 // Only works for one byte strings.
3767 DCHECK(vector.length() == len);
3768 MemCopy(chars, vector.start(), len);
3771 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3773 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3774 unsigned stream_length = vector.length();
3775 while (stream_length != 0) {
3776 unsigned consumed = 0;
3777 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3778 DCHECK(c != unibrow::Utf8::kBadChar);
3779 DCHECK(consumed <= stream_length);
3780 stream_length -= consumed;
3782 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3785 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3786 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3793 DCHECK(stream_length == 0);
3798 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3799 DCHECK(s->length() == len);
3800 String::WriteToFlat(s, chars, 0, len);
3804 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3805 DCHECK(s->length() == len);
3806 String::WriteToFlat(s, chars, 0, len);
3810 template <bool is_one_byte, typename T>
3811 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3812 uint32_t hash_field) {
3814 // Compute map and object size.
3818 DCHECK_LE(0, chars);
3819 DCHECK_GE(String::kMaxLength, chars);
3821 map = one_byte_internalized_string_map();
3822 size = SeqOneByteString::SizeFor(chars);
3824 map = internalized_string_map();
3825 size = SeqTwoByteString::SizeFor(chars);
3827 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3832 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3833 if (!allocation.To(&result)) return allocation;
3836 result->set_map_no_write_barrier(map);
3837 // Set length and hash fields of the allocated string.
3838 String* answer = String::cast(result);
3839 answer->set_length(chars);
3840 answer->set_hash_field(hash_field);
3842 DCHECK_EQ(size, answer->Size());
3845 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3847 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3853 // Need explicit instantiations.
3854 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3857 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3860 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3861 Vector<const char>, int, uint32_t);
3864 AllocationResult Heap::AllocateRawOneByteString(int length,
3865 PretenureFlag pretenure) {
3866 DCHECK_LE(0, length);
3867 DCHECK_GE(String::kMaxLength, length);
3868 int size = SeqOneByteString::SizeFor(length);
3869 DCHECK(size <= SeqOneByteString::kMaxSize);
3870 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3874 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3875 if (!allocation.To(&result)) return allocation;
3878 // Partially initialize the object.
3879 result->set_map_no_write_barrier(one_byte_string_map());
3880 String::cast(result)->set_length(length);
3881 String::cast(result)->set_hash_field(String::kEmptyHashField);
3882 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3888 AllocationResult Heap::AllocateRawTwoByteString(int length,
3889 PretenureFlag pretenure) {
3890 DCHECK_LE(0, length);
3891 DCHECK_GE(String::kMaxLength, length);
3892 int size = SeqTwoByteString::SizeFor(length);
3893 DCHECK(size <= SeqTwoByteString::kMaxSize);
3894 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3898 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3899 if (!allocation.To(&result)) return allocation;
3902 // Partially initialize the object.
3903 result->set_map_no_write_barrier(string_map());
3904 String::cast(result)->set_length(length);
3905 String::cast(result)->set_hash_field(String::kEmptyHashField);
3906 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3911 AllocationResult Heap::AllocateEmptyFixedArray() {
3912 int size = FixedArray::SizeFor(0);
3915 AllocationResult allocation =
3916 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3917 if (!allocation.To(&result)) return allocation;
3919 // Initialize the object.
3920 result->set_map_no_write_barrier(fixed_array_map());
3921 FixedArray::cast(result)->set_length(0);
3926 AllocationResult Heap::AllocateEmptyExternalArray(
3927 ExternalArrayType array_type) {
3928 return AllocateExternalArray(0, array_type, NULL, TENURED);
3932 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3933 if (!InNewSpace(src)) {
3937 int len = src->length();
3940 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3941 if (!allocation.To(&obj)) return allocation;
3943 obj->set_map_no_write_barrier(fixed_array_map());
3944 FixedArray* result = FixedArray::cast(obj);
3945 result->set_length(len);
3948 DisallowHeapAllocation no_gc;
3949 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3950 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3952 // TODO(mvstanton): The map is set twice because of protection against calling
3953 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3954 // we might then be able to remove this whole method.
3955 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3960 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3961 ExternalArrayType array_type) {
3962 return AllocateFixedTypedArray(0, array_type, TENURED);
3966 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3967 int len = src->length();
3970 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3971 if (!allocation.To(&obj)) return allocation;
3973 if (InNewSpace(obj)) {
3974 obj->set_map_no_write_barrier(map);
3975 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3976 FixedArray::SizeFor(len) - kPointerSize);
3979 obj->set_map_no_write_barrier(map);
3980 FixedArray* result = FixedArray::cast(obj);
3981 result->set_length(len);
3984 DisallowHeapAllocation no_gc;
3985 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3986 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3991 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3993 int len = src->length();
3996 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3997 if (!allocation.To(&obj)) return allocation;
3999 obj->set_map_no_write_barrier(map);
4000 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4001 src->address() + FixedDoubleArray::kLengthOffset,
4002 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4007 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4010 if (src->is_extended_layout()) {
4011 ConstantPoolArray::NumberOfEntries small(src,
4012 ConstantPoolArray::SMALL_SECTION);
4013 ConstantPoolArray::NumberOfEntries extended(
4014 src, ConstantPoolArray::EXTENDED_SECTION);
4015 AllocationResult allocation =
4016 AllocateExtendedConstantPoolArray(small, extended);
4017 if (!allocation.To(&obj)) return allocation;
4019 ConstantPoolArray::NumberOfEntries small(src,
4020 ConstantPoolArray::SMALL_SECTION);
4021 AllocationResult allocation = AllocateConstantPoolArray(small);
4022 if (!allocation.To(&obj)) return allocation;
4024 obj->set_map_no_write_barrier(map);
4025 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4026 src->address() + ConstantPoolArray::kFirstEntryOffset,
4027 src->size() - ConstantPoolArray::kFirstEntryOffset);
4032 AllocationResult Heap::AllocateRawFixedArray(int length,
4033 PretenureFlag pretenure) {
4034 if (length < 0 || length > FixedArray::kMaxLength) {
4035 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4037 int size = FixedArray::SizeFor(length);
4038 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4040 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4044 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4045 PretenureFlag pretenure,
4047 DCHECK(length >= 0);
4048 DCHECK(empty_fixed_array()->IsFixedArray());
4049 if (length == 0) return empty_fixed_array();
4051 DCHECK(!InNewSpace(filler));
4054 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4055 if (!allocation.To(&result)) return allocation;
4058 result->set_map_no_write_barrier(fixed_array_map());
4059 FixedArray* array = FixedArray::cast(result);
4060 array->set_length(length);
4061 MemsetPointer(array->data_start(), filler, length);
4066 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4067 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4071 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4072 if (length == 0) return empty_fixed_array();
4076 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4077 if (!allocation.To(&obj)) return allocation;
4080 obj->set_map_no_write_barrier(fixed_array_map());
4081 FixedArray::cast(obj)->set_length(length);
4086 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4087 int length, PretenureFlag pretenure) {
4088 if (length == 0) return empty_fixed_array();
4090 HeapObject* elements;
4091 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4092 if (!allocation.To(&elements)) return allocation;
4094 elements->set_map_no_write_barrier(fixed_double_array_map());
4095 FixedDoubleArray::cast(elements)->set_length(length);
4100 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4101 PretenureFlag pretenure) {
4102 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4103 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4105 int size = FixedDoubleArray::SizeFor(length);
4106 #ifndef V8_HOST_ARCH_64_BIT
4107 size += kPointerSize;
4109 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4113 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4114 if (!allocation.To(&object)) return allocation;
4117 return EnsureDoubleAligned(this, object, size);
4121 AllocationResult Heap::AllocateConstantPoolArray(
4122 const ConstantPoolArray::NumberOfEntries& small) {
4123 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4124 int size = ConstantPoolArray::SizeFor(small);
4125 #ifndef V8_HOST_ARCH_64_BIT
4126 size += kPointerSize;
4128 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4132 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4133 if (!allocation.To(&object)) return allocation;
4135 object = EnsureDoubleAligned(this, object, size);
4136 object->set_map_no_write_barrier(constant_pool_array_map());
4138 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4139 constant_pool->Init(small);
4140 constant_pool->ClearPtrEntries(isolate());
4141 return constant_pool;
4145 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4146 const ConstantPoolArray::NumberOfEntries& small,
4147 const ConstantPoolArray::NumberOfEntries& extended) {
4148 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4149 CHECK(extended.are_in_range(0, kMaxInt));
4150 int size = ConstantPoolArray::SizeForExtended(small, extended);
4151 #ifndef V8_HOST_ARCH_64_BIT
4152 size += kPointerSize;
4154 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4158 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4159 if (!allocation.To(&object)) return allocation;
4161 object = EnsureDoubleAligned(this, object, size);
4162 object->set_map_no_write_barrier(constant_pool_array_map());
4164 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4165 constant_pool->InitExtended(small, extended);
4166 constant_pool->ClearPtrEntries(isolate());
4167 return constant_pool;
4171 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4172 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4173 int size = ConstantPoolArray::SizeFor(small);
4176 AllocationResult allocation =
4177 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4178 if (!allocation.To(&result)) return allocation;
4180 result->set_map_no_write_barrier(constant_pool_array_map());
4181 ConstantPoolArray::cast(result)->Init(small);
4186 AllocationResult Heap::AllocateSymbol() {
4187 // Statically ensure that it is safe to allocate symbols in paged spaces.
4188 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4191 AllocationResult allocation =
4192 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4193 if (!allocation.To(&result)) return allocation;
4195 result->set_map_no_write_barrier(symbol_map());
4197 // Generate a random hash value.
4201 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4203 } while (hash == 0 && attempts < 30);
4204 if (hash == 0) hash = 1; // never return 0
4206 Symbol::cast(result)
4207 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4208 Symbol::cast(result)->set_name(undefined_value());
4209 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4211 DCHECK(!Symbol::cast(result)->is_private());
4216 AllocationResult Heap::AllocateStruct(InstanceType type) {
4219 #define MAKE_CASE(NAME, Name, name) \
4221 map = name##_map(); \
4223 STRUCT_LIST(MAKE_CASE)
4229 int size = map->instance_size();
4230 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4233 AllocationResult allocation = Allocate(map, space);
4234 if (!allocation.To(&result)) return allocation;
4236 result->InitializeBody(size);
4241 bool Heap::IsHeapIterable() {
4242 // TODO(hpayer): This function is not correct. Allocation folding in old
4243 // space breaks the iterability.
4244 return new_space_top_after_last_gc_ == new_space()->top();
4248 void Heap::MakeHeapIterable() {
4249 DCHECK(AllowHeapAllocation::IsAllowed());
4250 if (!IsHeapIterable()) {
4251 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4253 if (mark_compact_collector()->sweeping_in_progress()) {
4254 mark_compact_collector()->EnsureSweepingCompleted();
4256 DCHECK(IsHeapIterable());
4260 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4261 incremental_marking()->Step(step_size,
4262 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4264 if (incremental_marking()->IsComplete()) {
4265 bool uncommit = false;
4266 if (gc_count_at_last_idle_gc_ == gc_count_) {
4267 // No GC since the last full GC, the mutator is probably not active.
4268 isolate_->compilation_cache()->Clear();
4271 CollectAllGarbage(kReduceMemoryFootprintMask,
4272 "idle notification: finalize incremental");
4273 gc_idle_time_handler_.NotifyIdleMarkCompact();
4274 gc_count_at_last_idle_gc_ = gc_count_;
4276 new_space_.Shrink();
4277 UncommitFromSpace();
4283 bool Heap::WorthActivatingIncrementalMarking() {
4284 return incremental_marking()->IsStopped() &&
4285 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
4289 bool Heap::IdleNotification(int idle_time_in_ms) {
4290 // If incremental marking is off, we do not perform idle notification.
4291 if (!FLAG_incremental_marking) return true;
4292 base::ElapsedTimer timer;
4294 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4296 HistogramTimerScope idle_notification_scope(
4297 isolate_->counters()->gc_idle_notification());
4299 GCIdleTimeHandler::HeapState heap_state;
4300 heap_state.contexts_disposed = contexts_disposed_;
4301 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4302 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4303 // TODO(ulan): Start incremental marking only for large heaps.
4304 heap_state.can_start_incremental_marking =
4305 incremental_marking()->ShouldActivate();
4306 heap_state.sweeping_in_progress =
4307 mark_compact_collector()->sweeping_in_progress();
4308 heap_state.mark_compact_speed_in_bytes_per_ms =
4309 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4310 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4311 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4312 heap_state.scavenge_speed_in_bytes_per_ms =
4313 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4314 heap_state.available_new_space_memory = new_space_.Available();
4315 heap_state.new_space_capacity = new_space_.Capacity();
4316 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4317 static_cast<size_t>(
4318 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4320 GCIdleTimeAction action =
4321 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4323 bool result = false;
4324 switch (action.type) {
4328 case DO_INCREMENTAL_MARKING:
4329 if (incremental_marking()->IsStopped()) {
4330 incremental_marking()->Start();
4332 AdvanceIdleIncrementalMarking(action.parameter);
4335 HistogramTimerScope scope(isolate_->counters()->gc_context());
4336 const char* message = contexts_disposed_
4337 ? "idle notification: contexts disposed"
4338 : "idle notification: finalize idle round";
4339 CollectAllGarbage(kReduceMemoryFootprintMask, message);
4340 gc_idle_time_handler_.NotifyIdleMarkCompact();
4344 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4346 case DO_FINALIZE_SWEEPING:
4347 mark_compact_collector()->EnsureSweepingCompleted();
4353 int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4354 if (actual_time_ms <= idle_time_in_ms) {
4355 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4356 idle_time_in_ms - actual_time_ms);
4358 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4359 actual_time_ms - idle_time_in_ms);
4362 if (FLAG_trace_idle_notification) {
4363 PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4364 idle_time_in_ms, actual_time_ms);
4369 contexts_disposed_ = 0;
4376 void Heap::Print() {
4377 if (!HasBeenSetUp()) return;
4378 isolate()->PrintStack(stdout);
4379 AllSpaces spaces(this);
4380 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4386 void Heap::ReportCodeStatistics(const char* title) {
4387 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4388 PagedSpace::ResetCodeStatistics(isolate());
4389 // We do not look for code in new space, map space, or old space. If code
4390 // somehow ends up in those spaces, we would miss it here.
4391 code_space_->CollectCodeStatistics();
4392 lo_space_->CollectCodeStatistics();
4393 PagedSpace::ReportCodeStatistics(isolate());
4397 // This function expects that NewSpace's allocated objects histogram is
4398 // populated (via a call to CollectStatistics or else as a side effect of a
4399 // just-completed scavenge collection).
4400 void Heap::ReportHeapStatistics(const char* title) {
4402 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4404 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4405 old_generation_allocation_limit_);
4408 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4409 isolate_->global_handles()->PrintStats();
4412 PrintF("Heap statistics : ");
4413 isolate_->memory_allocator()->ReportStatistics();
4414 PrintF("To space : ");
4415 new_space_.ReportStatistics();
4416 PrintF("Old pointer space : ");
4417 old_pointer_space_->ReportStatistics();
4418 PrintF("Old data space : ");
4419 old_data_space_->ReportStatistics();
4420 PrintF("Code space : ");
4421 code_space_->ReportStatistics();
4422 PrintF("Map space : ");
4423 map_space_->ReportStatistics();
4424 PrintF("Cell space : ");
4425 cell_space_->ReportStatistics();
4426 PrintF("PropertyCell space : ");
4427 property_cell_space_->ReportStatistics();
4428 PrintF("Large object space : ");
4429 lo_space_->ReportStatistics();
4430 PrintF(">>>>>> ========================================= >>>>>>\n");
4435 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4438 bool Heap::Contains(Address addr) {
4439 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4440 return HasBeenSetUp() &&
4441 (new_space_.ToSpaceContains(addr) ||
4442 old_pointer_space_->Contains(addr) ||
4443 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4444 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4445 property_cell_space_->Contains(addr) ||
4446 lo_space_->SlowContains(addr));
4450 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4451 return InSpace(value->address(), space);
4455 bool Heap::InSpace(Address addr, AllocationSpace space) {
4456 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4457 if (!HasBeenSetUp()) return false;
4461 return new_space_.ToSpaceContains(addr);
4462 case OLD_POINTER_SPACE:
4463 return old_pointer_space_->Contains(addr);
4464 case OLD_DATA_SPACE:
4465 return old_data_space_->Contains(addr);
4467 return code_space_->Contains(addr);
4469 return map_space_->Contains(addr);
4471 return cell_space_->Contains(addr);
4472 case PROPERTY_CELL_SPACE:
4473 return property_cell_space_->Contains(addr);
4475 return lo_space_->SlowContains(addr);
4485 void Heap::Verify() {
4486 CHECK(HasBeenSetUp());
4487 HandleScope scope(isolate());
4489 store_buffer()->Verify();
4491 if (mark_compact_collector()->sweeping_in_progress()) {
4492 // We have to wait here for the sweeper threads to have an iterable heap.
4493 mark_compact_collector()->EnsureSweepingCompleted();
4496 VerifyPointersVisitor visitor;
4497 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4499 VerifySmisVisitor smis_visitor;
4500 IterateSmiRoots(&smis_visitor);
4502 new_space_.Verify();
4504 old_pointer_space_->Verify(&visitor);
4505 map_space_->Verify(&visitor);
4507 VerifyPointersVisitor no_dirty_regions_visitor;
4508 old_data_space_->Verify(&no_dirty_regions_visitor);
4509 code_space_->Verify(&no_dirty_regions_visitor);
4510 cell_space_->Verify(&no_dirty_regions_visitor);
4511 property_cell_space_->Verify(&no_dirty_regions_visitor);
4513 lo_space_->Verify();
4518 void Heap::ZapFromSpace() {
4519 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4520 new_space_.FromSpaceEnd());
4521 while (it.has_next()) {
4522 NewSpacePage* page = it.next();
4523 for (Address cursor = page->area_start(), limit = page->area_end();
4524 cursor < limit; cursor += kPointerSize) {
4525 Memory::Address_at(cursor) = kFromSpaceZapValue;
4531 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4532 ObjectSlotCallback callback) {
4533 Address slot_address = start;
4535 // We are not collecting slots on new space objects during mutation
4536 // thus we have to scan for pointers to evacuation candidates when we
4537 // promote objects. But we should not record any slots in non-black
4538 // objects. Grey object's slots would be rescanned.
4539 // White object might not survive until the end of collection
4540 // it would be a violation of the invariant to record it's slots.
4541 bool record_slots = false;
4542 if (incremental_marking()->IsCompacting()) {
4543 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4544 record_slots = Marking::IsBlack(mark_bit);
4547 while (slot_address < end) {
4548 Object** slot = reinterpret_cast<Object**>(slot_address);
4549 Object* object = *slot;
4550 // If the store buffer becomes overfull we mark pages as being exempt from
4551 // the store buffer. These pages are scanned to find pointers that point
4552 // to the new space. In that case we may hit newly promoted objects and
4553 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4554 if (object->IsHeapObject()) {
4555 if (Heap::InFromSpace(object)) {
4556 callback(reinterpret_cast<HeapObject**>(slot),
4557 HeapObject::cast(object));
4558 Object* new_object = *slot;
4559 if (InNewSpace(new_object)) {
4560 SLOW_DCHECK(Heap::InToSpace(new_object));
4561 SLOW_DCHECK(new_object->IsHeapObject());
4562 store_buffer_.EnterDirectlyIntoStoreBuffer(
4563 reinterpret_cast<Address>(slot));
4565 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4566 } else if (record_slots &&
4567 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4568 mark_compact_collector()->RecordSlot(slot, slot, object);
4571 slot_address += kPointerSize;
4577 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4580 bool IsAMapPointerAddress(Object** addr) {
4581 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4582 int mod = a % Map::kSize;
4583 return mod >= Map::kPointerFieldsBeginOffset &&
4584 mod < Map::kPointerFieldsEndOffset;
4588 bool EverythingsAPointer(Object** addr) { return true; }
4591 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4592 Object**** store_buffer_position,
4593 Object*** store_buffer_top,
4594 CheckStoreBufferFilter filter,
4595 Address special_garbage_start,
4596 Address special_garbage_end) {
4597 Map* free_space_map = heap->free_space_map();
4598 for (; current < limit; current++) {
4599 Object* o = *current;
4600 Address current_address = reinterpret_cast<Address>(current);
4602 if (o == free_space_map) {
4603 Address current_address = reinterpret_cast<Address>(current);
4604 FreeSpace* free_space =
4605 FreeSpace::cast(HeapObject::FromAddress(current_address));
4606 int skip = free_space->Size();
4607 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4609 current_address += skip - kPointerSize;
4610 current = reinterpret_cast<Object**>(current_address);
4613 // Skip the current linear allocation space between top and limit which is
4614 // unmarked with the free space map, but can contain junk.
4615 if (current_address == special_garbage_start &&
4616 special_garbage_end != special_garbage_start) {
4617 current_address = special_garbage_end - kPointerSize;
4618 current = reinterpret_cast<Object**>(current_address);
4621 if (!(*filter)(current)) continue;
4622 DCHECK(current_address < special_garbage_start ||
4623 current_address >= special_garbage_end);
4624 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4625 // We have to check that the pointer does not point into new space
4626 // without trying to cast it to a heap object since the hash field of
4627 // a string can contain values like 1 and 3 which are tagged null
4629 if (!heap->InNewSpace(o)) continue;
4630 while (**store_buffer_position < current &&
4631 *store_buffer_position < store_buffer_top) {
4632 (*store_buffer_position)++;
4634 if (**store_buffer_position != current ||
4635 *store_buffer_position == store_buffer_top) {
4636 Object** obj_start = current;
4637 while (!(*obj_start)->IsMap()) obj_start--;
4644 // Check that the store buffer contains all intergenerational pointers by
4645 // scanning a page and ensuring that all pointers to young space are in the
4647 void Heap::OldPointerSpaceCheckStoreBuffer() {
4648 OldSpace* space = old_pointer_space();
4649 PageIterator pages(space);
4651 store_buffer()->SortUniq();
4653 while (pages.has_next()) {
4654 Page* page = pages.next();
4655 Object** current = reinterpret_cast<Object**>(page->area_start());
4657 Address end = page->area_end();
4659 Object*** store_buffer_position = store_buffer()->Start();
4660 Object*** store_buffer_top = store_buffer()->Top();
4662 Object** limit = reinterpret_cast<Object**>(end);
4663 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4664 store_buffer_top, &EverythingsAPointer, space->top(),
4670 void Heap::MapSpaceCheckStoreBuffer() {
4671 MapSpace* space = map_space();
4672 PageIterator pages(space);
4674 store_buffer()->SortUniq();
4676 while (pages.has_next()) {
4677 Page* page = pages.next();
4678 Object** current = reinterpret_cast<Object**>(page->area_start());
4680 Address end = page->area_end();
4682 Object*** store_buffer_position = store_buffer()->Start();
4683 Object*** store_buffer_top = store_buffer()->Top();
4685 Object** limit = reinterpret_cast<Object**>(end);
4686 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4687 store_buffer_top, &IsAMapPointerAddress, space->top(),
4693 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4694 LargeObjectIterator it(lo_space());
4695 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4696 // We only have code, sequential strings, or fixed arrays in large
4697 // object space, and only fixed arrays can possibly contain pointers to
4698 // the young generation.
4699 if (object->IsFixedArray()) {
4700 Object*** store_buffer_position = store_buffer()->Start();
4701 Object*** store_buffer_top = store_buffer()->Top();
4702 Object** current = reinterpret_cast<Object**>(object->address());
4704 reinterpret_cast<Object**>(object->address() + object->Size());
4705 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4706 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4713 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4714 IterateStrongRoots(v, mode);
4715 IterateWeakRoots(v, mode);
4719 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4720 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4721 v->Synchronize(VisitorSynchronization::kStringTable);
4722 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4723 // Scavenge collections have special processing for this.
4724 external_string_table_.Iterate(v);
4726 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4730 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4731 // Acquire execution access since we are going to read stack limit values.
4732 ExecutionAccess access(isolate());
4733 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4734 v->Synchronize(VisitorSynchronization::kSmiRootList);
4738 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4739 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4740 v->Synchronize(VisitorSynchronization::kStrongRootList);
4742 v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4743 v->Synchronize(VisitorSynchronization::kInternalizedString);
4745 isolate_->bootstrapper()->Iterate(v);
4746 v->Synchronize(VisitorSynchronization::kBootstrapper);
4747 isolate_->Iterate(v);
4748 v->Synchronize(VisitorSynchronization::kTop);
4749 Relocatable::Iterate(isolate_, v);
4750 v->Synchronize(VisitorSynchronization::kRelocatable);
4752 if (isolate_->deoptimizer_data() != NULL) {
4753 isolate_->deoptimizer_data()->Iterate(v);
4755 v->Synchronize(VisitorSynchronization::kDebug);
4756 isolate_->compilation_cache()->Iterate(v);
4757 v->Synchronize(VisitorSynchronization::kCompilationCache);
4759 // Iterate over local handles in handle scopes.
4760 isolate_->handle_scope_implementer()->Iterate(v);
4761 isolate_->IterateDeferredHandles(v);
4762 v->Synchronize(VisitorSynchronization::kHandleScope);
4764 // Iterate over the builtin code objects and code stubs in the
4765 // heap. Note that it is not necessary to iterate over code objects
4766 // on scavenge collections.
4767 if (mode != VISIT_ALL_IN_SCAVENGE) {
4768 isolate_->builtins()->IterateBuiltins(v);
4770 v->Synchronize(VisitorSynchronization::kBuiltins);
4772 // Iterate over global handles.
4774 case VISIT_ONLY_STRONG:
4775 isolate_->global_handles()->IterateStrongRoots(v);
4777 case VISIT_ALL_IN_SCAVENGE:
4778 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4780 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4782 isolate_->global_handles()->IterateAllRoots(v);
4785 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4787 // Iterate over eternal handles.
4788 if (mode == VISIT_ALL_IN_SCAVENGE) {
4789 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4791 isolate_->eternal_handles()->IterateAllRoots(v);
4793 v->Synchronize(VisitorSynchronization::kEternalHandles);
4795 // Iterate over pointers being held by inactive threads.
4796 isolate_->thread_manager()->Iterate(v);
4797 v->Synchronize(VisitorSynchronization::kThreadManager);
4799 // Iterate over the pointers the Serialization/Deserialization code is
4801 // During garbage collection this keeps the partial snapshot cache alive.
4802 // During deserialization of the startup snapshot this creates the partial
4803 // snapshot cache and deserializes the objects it refers to. During
4804 // serialization this does nothing, since the partial snapshot cache is
4805 // empty. However the next thing we do is create the partial snapshot,
4806 // filling up the partial snapshot cache with objects it needs as we go.
4807 SerializerDeserializer::Iterate(isolate_, v);
4808 // We don't do a v->Synchronize call here, because in debug mode that will
4809 // output a flag to the snapshot. However at this point the serializer and
4810 // deserializer are deliberately a little unsynchronized (see above) so the
4811 // checking of the sync flag in the snapshot would fail.
4815 // TODO(1236194): Since the heap size is configurable on the command line
4816 // and through the API, we should gracefully handle the case that the heap
4817 // size is not big enough to fit all the initial objects.
4818 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4819 int max_executable_size, size_t code_range_size) {
4820 if (HasBeenSetUp()) return false;
4822 // Overwrite default configuration.
4823 if (max_semi_space_size > 0) {
4824 max_semi_space_size_ = max_semi_space_size * MB;
4826 if (max_old_space_size > 0) {
4827 max_old_generation_size_ = max_old_space_size * MB;
4829 if (max_executable_size > 0) {
4830 max_executable_size_ = max_executable_size * MB;
4833 // If max space size flags are specified overwrite the configuration.
4834 if (FLAG_max_semi_space_size > 0) {
4835 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4837 if (FLAG_max_old_space_size > 0) {
4838 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4840 if (FLAG_max_executable_size > 0) {
4841 max_executable_size_ = FLAG_max_executable_size * MB;
4844 if (FLAG_stress_compaction) {
4845 // This will cause more frequent GCs when stressing.
4846 max_semi_space_size_ = Page::kPageSize;
4849 if (Snapshot::HaveASnapshotToStartFrom()) {
4850 // If we are using a snapshot we always reserve the default amount
4851 // of memory for each semispace because code in the snapshot has
4852 // write-barrier code that relies on the size and alignment of new
4853 // space. We therefore cannot use a larger max semispace size
4854 // than the default reserved semispace size.
4855 if (max_semi_space_size_ > reserved_semispace_size_) {
4856 max_semi_space_size_ = reserved_semispace_size_;
4857 if (FLAG_trace_gc) {
4858 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4859 reserved_semispace_size_ >> 10);
4863 // If we are not using snapshots we reserve space for the actual
4864 // max semispace size.
4865 reserved_semispace_size_ = max_semi_space_size_;
4868 // The max executable size must be less than or equal to the max old
4870 if (max_executable_size_ > max_old_generation_size_) {
4871 max_executable_size_ = max_old_generation_size_;
4874 // The new space size must be a power of two to support single-bit testing
4876 max_semi_space_size_ =
4877 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4878 reserved_semispace_size_ =
4879 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4881 if (FLAG_min_semi_space_size > 0) {
4882 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4883 if (initial_semispace_size > max_semi_space_size_) {
4884 initial_semispace_size_ = max_semi_space_size_;
4885 if (FLAG_trace_gc) {
4887 "Min semi-space size cannot be more than the maximum"
4888 "semi-space size of %d MB\n",
4889 max_semi_space_size_);
4892 initial_semispace_size_ = initial_semispace_size;
4896 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4898 // The old generation is paged and needs at least one page for each space.
4899 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4900 max_old_generation_size_ =
4901 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4902 max_old_generation_size_);
4904 // We rely on being able to allocate new arrays in paged spaces.
4905 DCHECK(Page::kMaxRegularHeapObjectSize >=
4907 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4908 AllocationMemento::kSize));
4910 code_range_size_ = code_range_size * MB;
4917 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4920 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4921 *stats->start_marker = HeapStats::kStartMarker;
4922 *stats->end_marker = HeapStats::kEndMarker;
4923 *stats->new_space_size = new_space_.SizeAsInt();
4924 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4925 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4926 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4927 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4928 *stats->old_data_space_capacity = old_data_space_->Capacity();
4929 *stats->code_space_size = code_space_->SizeOfObjects();
4930 *stats->code_space_capacity = code_space_->Capacity();
4931 *stats->map_space_size = map_space_->SizeOfObjects();
4932 *stats->map_space_capacity = map_space_->Capacity();
4933 *stats->cell_space_size = cell_space_->SizeOfObjects();
4934 *stats->cell_space_capacity = cell_space_->Capacity();
4935 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4936 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4937 *stats->lo_space_size = lo_space_->Size();
4938 isolate_->global_handles()->RecordStats(stats);
4939 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4940 *stats->memory_allocator_capacity =
4941 isolate()->memory_allocator()->Size() +
4942 isolate()->memory_allocator()->Available();
4943 *stats->os_error = base::OS::GetLastError();
4944 isolate()->memory_allocator()->Available();
4945 if (take_snapshot) {
4946 HeapIterator iterator(this);
4947 for (HeapObject* obj = iterator.next(); obj != NULL;
4948 obj = iterator.next()) {
4949 InstanceType type = obj->map()->instance_type();
4950 DCHECK(0 <= type && type <= LAST_TYPE);
4951 stats->objects_per_type[type]++;
4952 stats->size_per_type[type] += obj->Size();
4958 intptr_t Heap::PromotedSpaceSizeOfObjects() {
4959 return old_pointer_space_->SizeOfObjects() +
4960 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
4961 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
4962 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
4966 int64_t Heap::PromotedExternalMemorySize() {
4967 if (amount_of_external_allocated_memory_ <=
4968 amount_of_external_allocated_memory_at_last_global_gc_)
4970 return amount_of_external_allocated_memory_ -
4971 amount_of_external_allocated_memory_at_last_global_gc_;
4975 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
4976 int freed_global_handles) {
4977 const int kMaxHandles = 1000;
4978 const int kMinHandles = 100;
4979 double min_factor = 1.1;
4980 double max_factor = 4;
4981 // We set the old generation growing factor to 2 to grow the heap slower on
4982 // memory-constrained devices.
4983 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
4986 // If there are many freed global handles, then the next full GC will
4987 // likely collect a lot of garbage. Choose the heap growing factor
4988 // depending on freed global handles.
4989 // TODO(ulan, hpayer): Take into account mutator utilization.
4991 if (freed_global_handles <= kMinHandles) {
4992 factor = max_factor;
4993 } else if (freed_global_handles >= kMaxHandles) {
4994 factor = min_factor;
4996 // Compute factor using linear interpolation between points
4997 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
4998 factor = max_factor -
4999 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5000 (kMaxHandles - kMinHandles);
5003 if (FLAG_stress_compaction ||
5004 mark_compact_collector()->reduce_memory_footprint_) {
5005 factor = min_factor;
5008 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5009 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5010 limit += new_space_.Capacity();
5011 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5012 return Min(limit, halfway_to_the_max);
5016 void Heap::EnableInlineAllocation() {
5017 if (!inline_allocation_disabled_) return;
5018 inline_allocation_disabled_ = false;
5020 // Update inline allocation limit for new space.
5021 new_space()->UpdateInlineAllocationLimit(0);
5025 void Heap::DisableInlineAllocation() {
5026 if (inline_allocation_disabled_) return;
5027 inline_allocation_disabled_ = true;
5029 // Update inline allocation limit for new space.
5030 new_space()->UpdateInlineAllocationLimit(0);
5032 // Update inline allocation limit for old spaces.
5033 PagedSpaces spaces(this);
5034 for (PagedSpace* space = spaces.next(); space != NULL;
5035 space = spaces.next()) {
5036 space->EmptyAllocationInfo();
5041 V8_DECLARE_ONCE(initialize_gc_once);
5043 static void InitializeGCOnce() {
5044 InitializeScavengingVisitorsTables();
5045 NewSpaceScavenger::Initialize();
5046 MarkCompactCollector::Initialize();
5050 bool Heap::SetUp() {
5052 allocation_timeout_ = FLAG_gc_interval;
5055 // Initialize heap spaces and initial maps and objects. Whenever something
5056 // goes wrong, just return false. The caller should check the results and
5057 // call Heap::TearDown() to release allocated memory.
5059 // If the heap is not yet configured (e.g. through the API), configure it.
5060 // Configuration is based on the flags new-space-size (really the semispace
5061 // size) and old-space-size if set or the initial values of semispace_size_
5062 // and old_generation_size_ otherwise.
5064 if (!ConfigureHeapDefault()) return false;
5067 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5069 MarkMapPointersAsEncoded(false);
5071 // Set up memory allocator.
5072 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5075 // Set up new space.
5076 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5079 new_space_top_after_last_gc_ = new_space()->top();
5081 // Initialize old pointer space.
5082 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5083 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5084 if (old_pointer_space_ == NULL) return false;
5085 if (!old_pointer_space_->SetUp()) return false;
5087 // Initialize old data space.
5088 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5090 if (old_data_space_ == NULL) return false;
5091 if (!old_data_space_->SetUp()) return false;
5093 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5095 // Initialize the code space, set its maximum capacity to the old
5096 // generation size. It needs executable memory.
5098 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5099 if (code_space_ == NULL) return false;
5100 if (!code_space_->SetUp()) return false;
5102 // Initialize map space.
5103 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5104 if (map_space_ == NULL) return false;
5105 if (!map_space_->SetUp()) return false;
5107 // Initialize simple cell space.
5108 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5109 if (cell_space_ == NULL) return false;
5110 if (!cell_space_->SetUp()) return false;
5112 // Initialize global property cell space.
5113 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5114 PROPERTY_CELL_SPACE);
5115 if (property_cell_space_ == NULL) return false;
5116 if (!property_cell_space_->SetUp()) return false;
5118 // The large object code space may contain code or data. We set the memory
5119 // to be non-executable here for safety, but this means we need to enable it
5120 // explicitly when allocating large code objects.
5121 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5122 if (lo_space_ == NULL) return false;
5123 if (!lo_space_->SetUp()) return false;
5125 // Set up the seed that is used to randomize the string hash function.
5126 DCHECK(hash_seed() == 0);
5127 if (FLAG_randomize_hashes) {
5128 if (FLAG_hash_seed == 0) {
5129 int rnd = isolate()->random_number_generator()->NextInt();
5130 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5132 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5136 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5137 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5139 store_buffer()->SetUp();
5141 mark_compact_collector()->SetUp();
5147 bool Heap::CreateHeapObjects() {
5148 // Create initial maps.
5149 if (!CreateInitialMaps()) return false;
5152 // Create initial objects
5153 CreateInitialObjects();
5154 CHECK_EQ(0, gc_count_);
5156 set_native_contexts_list(undefined_value());
5157 set_array_buffers_list(undefined_value());
5158 set_allocation_sites_list(undefined_value());
5159 weak_object_to_code_table_ = undefined_value();
5164 void Heap::SetStackLimits() {
5165 DCHECK(isolate_ != NULL);
5166 DCHECK(isolate_ == isolate());
5167 // On 64 bit machines, pointers are generally out of range of Smis. We write
5168 // something that looks like an out of range Smi to the GC.
5170 // Set up the special root array entries containing the stack limits.
5171 // These are actually addresses, but the tag makes the GC ignore it.
5172 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5173 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5174 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5175 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5179 void Heap::TearDown() {
5181 if (FLAG_verify_heap) {
5186 UpdateMaximumCommitted();
5188 if (FLAG_print_cumulative_gc_stat) {
5190 PrintF("gc_count=%d ", gc_count_);
5191 PrintF("mark_sweep_count=%d ", ms_count_);
5192 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5193 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5194 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5195 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5196 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5197 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5201 if (FLAG_print_max_heap_committed) {
5203 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5204 MaximumCommittedMemory());
5205 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5206 new_space_.MaximumCommittedMemory());
5207 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5208 old_data_space_->MaximumCommittedMemory());
5209 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5210 old_pointer_space_->MaximumCommittedMemory());
5211 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5212 old_pointer_space_->MaximumCommittedMemory());
5213 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5214 code_space_->MaximumCommittedMemory());
5215 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5216 map_space_->MaximumCommittedMemory());
5217 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5218 cell_space_->MaximumCommittedMemory());
5219 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5220 property_cell_space_->MaximumCommittedMemory());
5221 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5222 lo_space_->MaximumCommittedMemory());
5226 if (FLAG_verify_predictable) {
5227 PrintAlloctionsHash();
5230 TearDownArrayBuffers();
5232 isolate_->global_handles()->TearDown();
5234 external_string_table_.TearDown();
5236 mark_compact_collector()->TearDown();
5238 new_space_.TearDown();
5240 if (old_pointer_space_ != NULL) {
5241 old_pointer_space_->TearDown();
5242 delete old_pointer_space_;
5243 old_pointer_space_ = NULL;
5246 if (old_data_space_ != NULL) {
5247 old_data_space_->TearDown();
5248 delete old_data_space_;
5249 old_data_space_ = NULL;
5252 if (code_space_ != NULL) {
5253 code_space_->TearDown();
5258 if (map_space_ != NULL) {
5259 map_space_->TearDown();
5264 if (cell_space_ != NULL) {
5265 cell_space_->TearDown();
5270 if (property_cell_space_ != NULL) {
5271 property_cell_space_->TearDown();
5272 delete property_cell_space_;
5273 property_cell_space_ = NULL;
5276 if (lo_space_ != NULL) {
5277 lo_space_->TearDown();
5282 store_buffer()->TearDown();
5283 incremental_marking()->TearDown();
5285 isolate_->memory_allocator()->TearDown();
5289 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5290 GCType gc_type, bool pass_isolate) {
5291 DCHECK(callback != NULL);
5292 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5293 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5294 return gc_prologue_callbacks_.Add(pair);
5298 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5299 DCHECK(callback != NULL);
5300 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5301 if (gc_prologue_callbacks_[i].callback == callback) {
5302 gc_prologue_callbacks_.Remove(i);
5310 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5311 GCType gc_type, bool pass_isolate) {
5312 DCHECK(callback != NULL);
5313 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5314 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5315 return gc_epilogue_callbacks_.Add(pair);
5319 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5320 DCHECK(callback != NULL);
5321 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5322 if (gc_epilogue_callbacks_[i].callback == callback) {
5323 gc_epilogue_callbacks_.Remove(i);
5331 // TODO(ishell): Find a better place for this.
5332 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5333 Handle<DependentCode> dep) {
5334 DCHECK(!InNewSpace(*obj));
5335 DCHECK(!InNewSpace(*dep));
5336 // This handle scope keeps the table handle local to this function, which
5337 // allows us to safely skip write barriers in table update operations.
5338 HandleScope scope(isolate());
5339 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5341 table = WeakHashTable::Put(table, obj, dep);
5343 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5344 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5346 set_weak_object_to_code_table(*table);
5347 DCHECK_EQ(*dep, table->Lookup(obj));
5351 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5352 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5353 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5354 return DependentCode::cast(empty_fixed_array());
5358 void Heap::EnsureWeakObjectToCodeTable() {
5359 if (!weak_object_to_code_table()->IsHashTable()) {
5360 set_weak_object_to_code_table(
5361 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5367 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5368 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5373 class PrintHandleVisitor : public ObjectVisitor {
5375 void VisitPointers(Object** start, Object** end) {
5376 for (Object** p = start; p < end; p++)
5377 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5378 reinterpret_cast<void*>(*p));
5383 void Heap::PrintHandles() {
5384 PrintF("Handles:\n");
5385 PrintHandleVisitor v;
5386 isolate_->handle_scope_implementer()->Iterate(&v);
5392 Space* AllSpaces::next() {
5393 switch (counter_++) {
5395 return heap_->new_space();
5396 case OLD_POINTER_SPACE:
5397 return heap_->old_pointer_space();
5398 case OLD_DATA_SPACE:
5399 return heap_->old_data_space();
5401 return heap_->code_space();
5403 return heap_->map_space();
5405 return heap_->cell_space();
5406 case PROPERTY_CELL_SPACE:
5407 return heap_->property_cell_space();
5409 return heap_->lo_space();
5416 PagedSpace* PagedSpaces::next() {
5417 switch (counter_++) {
5418 case OLD_POINTER_SPACE:
5419 return heap_->old_pointer_space();
5420 case OLD_DATA_SPACE:
5421 return heap_->old_data_space();
5423 return heap_->code_space();
5425 return heap_->map_space();
5427 return heap_->cell_space();
5428 case PROPERTY_CELL_SPACE:
5429 return heap_->property_cell_space();
5436 OldSpace* OldSpaces::next() {
5437 switch (counter_++) {
5438 case OLD_POINTER_SPACE:
5439 return heap_->old_pointer_space();
5440 case OLD_DATA_SPACE:
5441 return heap_->old_data_space();
5443 return heap_->code_space();
5450 SpaceIterator::SpaceIterator(Heap* heap)
5452 current_space_(FIRST_SPACE),
5457 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5459 current_space_(FIRST_SPACE),
5461 size_func_(size_func) {}
5464 SpaceIterator::~SpaceIterator() {
5465 // Delete active iterator if any.
5470 bool SpaceIterator::has_next() {
5471 // Iterate until no more spaces.
5472 return current_space_ != LAST_SPACE;
5476 ObjectIterator* SpaceIterator::next() {
5477 if (iterator_ != NULL) {
5480 // Move to the next space
5482 if (current_space_ > LAST_SPACE) {
5487 // Return iterator for the new current space.
5488 return CreateIterator();
5492 // Create an iterator for the space to iterate.
5493 ObjectIterator* SpaceIterator::CreateIterator() {
5494 DCHECK(iterator_ == NULL);
5496 switch (current_space_) {
5498 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5500 case OLD_POINTER_SPACE:
5502 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5504 case OLD_DATA_SPACE:
5505 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5508 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5511 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5514 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5516 case PROPERTY_CELL_SPACE:
5518 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5521 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5525 // Return the newly allocated iterator;
5526 DCHECK(iterator_ != NULL);
5531 class HeapObjectsFilter {
5533 virtual ~HeapObjectsFilter() {}
5534 virtual bool SkipObject(HeapObject* object) = 0;
5538 class UnreachableObjectsFilter : public HeapObjectsFilter {
5540 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5541 MarkReachableObjects();
5544 ~UnreachableObjectsFilter() {
5545 heap_->mark_compact_collector()->ClearMarkbits();
5548 bool SkipObject(HeapObject* object) {
5549 MarkBit mark_bit = Marking::MarkBitFrom(object);
5550 return !mark_bit.Get();
5554 class MarkingVisitor : public ObjectVisitor {
5556 MarkingVisitor() : marking_stack_(10) {}
5558 void VisitPointers(Object** start, Object** end) {
5559 for (Object** p = start; p < end; p++) {
5560 if (!(*p)->IsHeapObject()) continue;
5561 HeapObject* obj = HeapObject::cast(*p);
5562 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5563 if (!mark_bit.Get()) {
5565 marking_stack_.Add(obj);
5570 void TransitiveClosure() {
5571 while (!marking_stack_.is_empty()) {
5572 HeapObject* obj = marking_stack_.RemoveLast();
5578 List<HeapObject*> marking_stack_;
5581 void MarkReachableObjects() {
5582 MarkingVisitor visitor;
5583 heap_->IterateRoots(&visitor, VISIT_ALL);
5584 visitor.TransitiveClosure();
5588 DisallowHeapAllocation no_allocation_;
5592 HeapIterator::HeapIterator(Heap* heap)
5593 : make_heap_iterable_helper_(heap),
5594 no_heap_allocation_(),
5596 filtering_(HeapIterator::kNoFiltering),
5602 HeapIterator::HeapIterator(Heap* heap,
5603 HeapIterator::HeapObjectsFiltering filtering)
5604 : make_heap_iterable_helper_(heap),
5605 no_heap_allocation_(),
5607 filtering_(filtering),
5613 HeapIterator::~HeapIterator() { Shutdown(); }
5616 void HeapIterator::Init() {
5617 // Start the iteration.
5618 space_iterator_ = new SpaceIterator(heap_);
5619 switch (filtering_) {
5620 case kFilterUnreachable:
5621 filter_ = new UnreachableObjectsFilter(heap_);
5626 object_iterator_ = space_iterator_->next();
5630 void HeapIterator::Shutdown() {
5632 // Assert that in filtering mode we have iterated through all
5633 // objects. Otherwise, heap will be left in an inconsistent state.
5634 if (filtering_ != kNoFiltering) {
5635 DCHECK(object_iterator_ == NULL);
5638 // Make sure the last iterator is deallocated.
5639 delete space_iterator_;
5640 space_iterator_ = NULL;
5641 object_iterator_ = NULL;
5647 HeapObject* HeapIterator::next() {
5648 if (filter_ == NULL) return NextObject();
5650 HeapObject* obj = NextObject();
5651 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5656 HeapObject* HeapIterator::NextObject() {
5657 // No iterator means we are done.
5658 if (object_iterator_ == NULL) return NULL;
5660 if (HeapObject* obj = object_iterator_->next_object()) {
5661 // If the current iterator has more objects we are fine.
5664 // Go though the spaces looking for one that has objects.
5665 while (space_iterator_->has_next()) {
5666 object_iterator_ = space_iterator_->next();
5667 if (HeapObject* obj = object_iterator_->next_object()) {
5672 // Done with the last space.
5673 object_iterator_ = NULL;
5678 void HeapIterator::reset() {
5679 // Restart the iterator.
5687 Object* const PathTracer::kAnyGlobalObject = NULL;
5689 class PathTracer::MarkVisitor : public ObjectVisitor {
5691 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5692 void VisitPointers(Object** start, Object** end) {
5693 // Scan all HeapObject pointers in [start, end)
5694 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5695 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5700 PathTracer* tracer_;
5704 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5706 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5707 void VisitPointers(Object** start, Object** end) {
5708 // Scan all HeapObject pointers in [start, end)
5709 for (Object** p = start; p < end; p++) {
5710 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5715 PathTracer* tracer_;
5719 void PathTracer::VisitPointers(Object** start, Object** end) {
5720 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5721 // Visit all HeapObject pointers in [start, end)
5722 for (Object** p = start; !done && (p < end); p++) {
5723 if ((*p)->IsHeapObject()) {
5725 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5731 void PathTracer::Reset() {
5732 found_target_ = false;
5733 object_stack_.Clear();
5737 void PathTracer::TracePathFrom(Object** root) {
5738 DCHECK((search_target_ == kAnyGlobalObject) ||
5739 search_target_->IsHeapObject());
5740 found_target_in_trace_ = false;
5743 MarkVisitor mark_visitor(this);
5744 MarkRecursively(root, &mark_visitor);
5746 UnmarkVisitor unmark_visitor(this);
5747 UnmarkRecursively(root, &unmark_visitor);
5753 static bool SafeIsNativeContext(HeapObject* obj) {
5754 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5758 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5759 if (!(*p)->IsHeapObject()) return;
5761 HeapObject* obj = HeapObject::cast(*p);
5763 MapWord map_word = obj->map_word();
5764 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5766 if (found_target_in_trace_) return; // stop if target found
5767 object_stack_.Add(obj);
5768 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5769 (obj == search_target_)) {
5770 found_target_in_trace_ = true;
5771 found_target_ = true;
5775 bool is_native_context = SafeIsNativeContext(obj);
5778 Map* map = Map::cast(map_word.ToMap());
5780 MapWord marked_map_word =
5781 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5782 obj->set_map_word(marked_map_word);
5784 // Scan the object body.
5785 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5786 // This is specialized to scan Context's properly.
5788 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5790 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5791 Context::FIRST_WEAK_SLOT * kPointerSize);
5792 mark_visitor->VisitPointers(start, end);
5794 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5797 // Scan the map after the body because the body is a lot more interesting
5798 // when doing leak detection.
5799 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5801 if (!found_target_in_trace_) { // don't pop if found the target
5802 object_stack_.RemoveLast();
5807 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5808 if (!(*p)->IsHeapObject()) return;
5810 HeapObject* obj = HeapObject::cast(*p);
5812 MapWord map_word = obj->map_word();
5813 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5815 MapWord unmarked_map_word =
5816 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5817 obj->set_map_word(unmarked_map_word);
5819 Map* map = Map::cast(unmarked_map_word.ToMap());
5821 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5823 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5827 void PathTracer::ProcessResults() {
5828 if (found_target_) {
5829 OFStream os(stdout);
5830 os << "=====================================\n"
5831 << "==== Path to object ====\n"
5832 << "=====================================\n\n";
5834 DCHECK(!object_stack_.is_empty());
5835 for (int i = 0; i < object_stack_.length(); i++) {
5836 if (i > 0) os << "\n |\n |\n V\n\n";
5837 object_stack_[i]->Print(os);
5839 os << "=====================================\n";
5844 // Triggers a depth-first traversal of reachable objects from one
5845 // given root object and finds a path to a specific heap object and
5847 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5848 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5849 tracer.VisitPointer(&root);
5853 // Triggers a depth-first traversal of reachable objects from roots
5854 // and finds a path to a specific heap object and prints it.
5855 void Heap::TracePathToObject(Object* target) {
5856 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5857 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5861 // Triggers a depth-first traversal of reachable objects from roots
5862 // and finds a path to any global object and prints it. Useful for
5863 // determining the source for leaks of global objects.
5864 void Heap::TracePathToGlobal() {
5865 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5867 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5872 void Heap::UpdateCumulativeGCStatistics(double duration,
5873 double spent_in_mutator,
5874 double marking_time) {
5875 if (FLAG_print_cumulative_gc_stat) {
5876 total_gc_time_ms_ += duration;
5877 max_gc_pause_ = Max(max_gc_pause_, duration);
5878 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5879 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5880 } else if (FLAG_trace_gc_verbose) {
5881 total_gc_time_ms_ += duration;
5884 marking_time_ += marking_time;
5888 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5889 DisallowHeapAllocation no_gc;
5890 // Uses only lower 32 bits if pointers are larger.
5891 uintptr_t addr_hash =
5892 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5893 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5897 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5898 DisallowHeapAllocation no_gc;
5899 int index = (Hash(map, name) & kHashMask);
5900 for (int i = 0; i < kEntriesPerBucket; i++) {
5901 Key& key = keys_[index + i];
5902 if ((key.map == *map) && key.name->Equals(*name)) {
5903 return field_offsets_[index + i];
5910 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5912 DisallowHeapAllocation no_gc;
5913 if (!name->IsUniqueName()) {
5914 if (!StringTable::InternalizeStringIfExists(
5915 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5919 // This cache is cleared only between mark compact passes, so we expect the
5920 // cache to only contain old space names.
5921 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5923 int index = (Hash(map, name) & kHashMask);
5924 // After a GC there will be free slots, so we use them in order (this may
5925 // help to get the most frequently used one in position 0).
5926 for (int i = 0; i < kEntriesPerBucket; i++) {
5927 Key& key = keys_[index];
5928 Object* free_entry_indicator = NULL;
5929 if (key.map == free_entry_indicator) {
5932 field_offsets_[index + i] = field_offset;
5936 // No free entry found in this bucket, so we move them all down one and
5937 // put the new entry at position zero.
5938 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5939 Key& key = keys_[index + i];
5940 Key& key2 = keys_[index + i - 1];
5942 field_offsets_[index + i] = field_offsets_[index + i - 1];
5945 // Write the new first entry.
5946 Key& key = keys_[index];
5949 field_offsets_[index] = field_offset;
5953 void KeyedLookupCache::Clear() {
5954 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5958 void DescriptorLookupCache::Clear() {
5959 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
5963 void ExternalStringTable::CleanUp() {
5965 for (int i = 0; i < new_space_strings_.length(); ++i) {
5966 if (new_space_strings_[i] == heap_->the_hole_value()) {
5969 DCHECK(new_space_strings_[i]->IsExternalString());
5970 if (heap_->InNewSpace(new_space_strings_[i])) {
5971 new_space_strings_[last++] = new_space_strings_[i];
5973 old_space_strings_.Add(new_space_strings_[i]);
5976 new_space_strings_.Rewind(last);
5977 new_space_strings_.Trim();
5980 for (int i = 0; i < old_space_strings_.length(); ++i) {
5981 if (old_space_strings_[i] == heap_->the_hole_value()) {
5984 DCHECK(old_space_strings_[i]->IsExternalString());
5985 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
5986 old_space_strings_[last++] = old_space_strings_[i];
5988 old_space_strings_.Rewind(last);
5989 old_space_strings_.Trim();
5991 if (FLAG_verify_heap) {
5998 void ExternalStringTable::TearDown() {
5999 for (int i = 0; i < new_space_strings_.length(); ++i) {
6000 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6002 new_space_strings_.Free();
6003 for (int i = 0; i < old_space_strings_.length(); ++i) {
6004 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6006 old_space_strings_.Free();
6010 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6011 chunk->set_next_chunk(chunks_queued_for_free_);
6012 chunks_queued_for_free_ = chunk;
6016 void Heap::FreeQueuedChunks() {
6017 if (chunks_queued_for_free_ == NULL) return;
6020 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6021 next = chunk->next_chunk();
6022 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6024 if (chunk->owner()->identity() == LO_SPACE) {
6025 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6026 // If FromAnyPointerAddress encounters a slot that belongs to a large
6027 // chunk queued for deletion it will fail to find the chunk because
6028 // it try to perform a search in the list of pages owned by of the large
6029 // object space and queued chunks were detached from that list.
6030 // To work around this we split large chunk into normal kPageSize aligned
6031 // pieces and initialize size, owner and flags field of every piece.
6032 // If FromAnyPointerAddress encounters a slot that belongs to one of
6033 // these smaller pieces it will treat it as a slot on a normal Page.
6034 Address chunk_end = chunk->address() + chunk->size();
6035 MemoryChunk* inner =
6036 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6037 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6038 while (inner <= inner_last) {
6039 // Size of a large chunk is always a multiple of
6040 // OS::AllocateAlignment() so there is always
6041 // enough space for a fake MemoryChunk header.
6042 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6043 // Guard against overflow.
6044 if (area_end < inner->address()) area_end = chunk_end;
6045 inner->SetArea(inner->address(), area_end);
6046 inner->set_size(Page::kPageSize);
6047 inner->set_owner(lo_space());
6048 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6049 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6053 isolate_->heap()->store_buffer()->Compact();
6054 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6055 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6056 next = chunk->next_chunk();
6057 isolate_->memory_allocator()->Free(chunk);
6059 chunks_queued_for_free_ = NULL;
6063 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6064 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6065 // Tag the page pointer to make it findable in the dump file.
6067 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6069 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6071 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6072 reinterpret_cast<Address>(p);
6073 remembered_unmapped_pages_index_++;
6074 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6078 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6079 memset(object_counts_, 0, sizeof(object_counts_));
6080 memset(object_sizes_, 0, sizeof(object_sizes_));
6081 if (clear_last_time_stats) {
6082 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6083 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6088 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6091 void Heap::CheckpointObjectStats() {
6092 base::LockGuard<base::Mutex> lock_guard(
6093 checkpoint_object_stats_mutex.Pointer());
6094 Counters* counters = isolate()->counters();
6095 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6096 counters->count_of_##name()->Increment( \
6097 static_cast<int>(object_counts_[name])); \
6098 counters->count_of_##name()->Decrement( \
6099 static_cast<int>(object_counts_last_time_[name])); \
6100 counters->size_of_##name()->Increment( \
6101 static_cast<int>(object_sizes_[name])); \
6102 counters->size_of_##name()->Decrement( \
6103 static_cast<int>(object_sizes_last_time_[name]));
6104 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6105 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6107 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6108 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6109 counters->count_of_CODE_TYPE_##name()->Increment( \
6110 static_cast<int>(object_counts_[index])); \
6111 counters->count_of_CODE_TYPE_##name()->Decrement( \
6112 static_cast<int>(object_counts_last_time_[index])); \
6113 counters->size_of_CODE_TYPE_##name()->Increment( \
6114 static_cast<int>(object_sizes_[index])); \
6115 counters->size_of_CODE_TYPE_##name()->Decrement( \
6116 static_cast<int>(object_sizes_last_time_[index]));
6117 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6118 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6119 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6120 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6121 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6122 static_cast<int>(object_counts_[index])); \
6123 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6124 static_cast<int>(object_counts_last_time_[index])); \
6125 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6126 static_cast<int>(object_sizes_[index])); \
6127 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6128 static_cast<int>(object_sizes_last_time_[index]));
6129 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6130 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6131 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6133 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6134 counters->count_of_CODE_AGE_##name()->Increment( \
6135 static_cast<int>(object_counts_[index])); \
6136 counters->count_of_CODE_AGE_##name()->Decrement( \
6137 static_cast<int>(object_counts_last_time_[index])); \
6138 counters->size_of_CODE_AGE_##name()->Increment( \
6139 static_cast<int>(object_sizes_[index])); \
6140 counters->size_of_CODE_AGE_##name()->Decrement( \
6141 static_cast<int>(object_sizes_last_time_[index]));
6142 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6143 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6145 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6146 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6150 } // namespace v8::internal