1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
50 #include "store-buffer.h"
51 #include "utils/random-number-generator.h"
52 #include "v8conversions.h"
53 #include "v8threads.h"
55 #include "vm-state-inl.h"
56 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "arm/regexp-macro-assembler-arm.h"
60 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
61 #include "regexp-macro-assembler.h"
62 #include "mips/regexp-macro-assembler-mips.h"
71 code_range_size_(kIs64BitArch ? 512 * MB : 0),
72 // semispace_size_ should be a power of 2 and old_generation_size_ should be
73 // a multiple of Page::kPageSize.
74 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
75 max_semispace_size_(8 * (kPointerSize / 4) * MB),
76 initial_semispace_size_(Page::kPageSize),
77 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
78 max_executable_size_(256ul * (kPointerSize / 4) * MB),
79 // Variables set based on semispace_size_ and old_generation_size_ in
80 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
81 // Will be 4 * reserved_semispace_size_ to ensure that young
82 // generation can be aligned to its size.
83 maximum_committed_(0),
84 survived_since_last_expansion_(0),
86 always_allocate_scope_depth_(0),
87 linear_allocation_scope_depth_(0),
88 contexts_disposed_(0),
90 flush_monomorphic_ics_(false),
91 scan_on_scavenge_pages_(0),
93 old_pointer_space_(NULL),
94 old_data_space_(NULL),
98 property_cell_space_(NULL),
100 gc_state_(NOT_IN_GC),
101 gc_post_processing_depth_(0),
104 remembered_unmapped_pages_index_(0),
105 unflattened_strings_length_(0),
107 allocation_timeout_(0),
108 disallow_allocation_failure_(false),
110 new_space_high_promotion_mode_active_(false),
111 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
112 size_of_old_gen_at_last_old_space_gc_(0),
113 external_allocation_limit_(0),
114 amount_of_external_allocated_memory_(0),
115 amount_of_external_allocated_memory_at_last_global_gc_(0),
116 old_gen_exhausted_(false),
117 inline_allocation_disabled_(false),
118 store_buffer_rebuilder_(store_buffer()),
119 hidden_string_(NULL),
120 gc_safe_size_of_old_object_(NULL),
121 total_regexp_code_generated_(0),
123 young_survivors_after_last_gc_(0),
124 high_survival_rate_period_length_(0),
125 low_survival_rate_period_length_(0),
127 previous_survival_rate_trend_(Heap::STABLE),
128 survival_rate_trend_(Heap::STABLE),
130 total_gc_time_ms_(0.0),
131 max_alive_after_gc_(0),
132 min_in_mutator_(kMaxInt),
133 alive_after_last_gc_(0),
134 last_gc_end_timestamp_(0.0),
137 mark_compact_collector_(this),
140 incremental_marking_(this),
141 number_idle_notifications_(0),
142 last_idle_notification_gc_count_(0),
143 last_idle_notification_gc_count_init_(false),
144 mark_sweeps_since_idle_round_started_(0),
145 gc_count_at_last_idle_gc_(0),
146 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
147 full_codegen_bytes_generated_(0),
148 crankshaft_codegen_bytes_generated_(0),
149 gcs_since_last_deopt_(0),
151 no_weak_object_verification_scope_depth_(0),
153 allocation_sites_scratchpad_length(0),
154 promotion_queue_(this),
156 external_string_table_(this),
157 chunks_queued_for_free_(NULL),
158 relocation_mutex_(NULL) {
159 // Allow build-time customization of the max semispace size. Building
160 // V8 with snapshots and a non-default max semispace size is much
161 // easier if you can define it as part of the build environment.
162 #if defined(V8_MAX_SEMISPACE_SIZE)
163 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
166 // Ensure old_generation_size_ is a multiple of kPageSize.
167 ASSERT(MB >= Page::kPageSize);
169 intptr_t max_virtual = OS::MaxVirtualMemory();
171 if (max_virtual > 0) {
172 if (code_range_size_ > 0) {
173 // Reserve no more than 1/8 of the memory for the code range.
174 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
178 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
179 native_contexts_list_ = NULL;
180 array_buffers_list_ = Smi::FromInt(0);
181 allocation_sites_list_ = Smi::FromInt(0);
182 // Put a dummy entry in the remembered pages so we can find the list the
183 // minidump even if there are no real unmapped pages.
184 RememberUnmappedPage(NULL, false);
186 ClearObjectStats(true);
190 intptr_t Heap::Capacity() {
191 if (!HasBeenSetUp()) return 0;
193 return new_space_.Capacity() +
194 old_pointer_space_->Capacity() +
195 old_data_space_->Capacity() +
196 code_space_->Capacity() +
197 map_space_->Capacity() +
198 cell_space_->Capacity() +
199 property_cell_space_->Capacity();
203 intptr_t Heap::CommittedMemory() {
204 if (!HasBeenSetUp()) return 0;
206 return new_space_.CommittedMemory() +
207 old_pointer_space_->CommittedMemory() +
208 old_data_space_->CommittedMemory() +
209 code_space_->CommittedMemory() +
210 map_space_->CommittedMemory() +
211 cell_space_->CommittedMemory() +
212 property_cell_space_->CommittedMemory() +
217 size_t Heap::CommittedPhysicalMemory() {
218 if (!HasBeenSetUp()) return 0;
220 return new_space_.CommittedPhysicalMemory() +
221 old_pointer_space_->CommittedPhysicalMemory() +
222 old_data_space_->CommittedPhysicalMemory() +
223 code_space_->CommittedPhysicalMemory() +
224 map_space_->CommittedPhysicalMemory() +
225 cell_space_->CommittedPhysicalMemory() +
226 property_cell_space_->CommittedPhysicalMemory() +
227 lo_space_->CommittedPhysicalMemory();
231 intptr_t Heap::CommittedMemoryExecutable() {
232 if (!HasBeenSetUp()) return 0;
234 return isolate()->memory_allocator()->SizeExecutable();
238 void Heap::UpdateMaximumCommitted() {
239 if (!HasBeenSetUp()) return;
241 intptr_t current_committed_memory = CommittedMemory();
242 if (current_committed_memory > maximum_committed_) {
243 maximum_committed_ = current_committed_memory;
248 intptr_t Heap::Available() {
249 if (!HasBeenSetUp()) return 0;
251 return new_space_.Available() +
252 old_pointer_space_->Available() +
253 old_data_space_->Available() +
254 code_space_->Available() +
255 map_space_->Available() +
256 cell_space_->Available() +
257 property_cell_space_->Available();
261 bool Heap::HasBeenSetUp() {
262 return old_pointer_space_ != NULL &&
263 old_data_space_ != NULL &&
264 code_space_ != NULL &&
265 map_space_ != NULL &&
266 cell_space_ != NULL &&
267 property_cell_space_ != NULL &&
272 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
273 if (IntrusiveMarking::IsMarked(object)) {
274 return IntrusiveMarking::SizeOfMarkedObject(object);
276 return object->SizeFromMap(object->map());
280 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
281 const char** reason) {
282 // Is global GC requested?
283 if (space != NEW_SPACE) {
284 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
285 *reason = "GC in old space requested";
286 return MARK_COMPACTOR;
289 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
290 *reason = "GC in old space forced by flags";
291 return MARK_COMPACTOR;
294 // Is enough data promoted to justify a global GC?
295 if (OldGenerationAllocationLimitReached()) {
296 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
297 *reason = "promotion limit reached";
298 return MARK_COMPACTOR;
301 // Have allocation in OLD and LO failed?
302 if (old_gen_exhausted_) {
303 isolate_->counters()->
304 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
305 *reason = "old generations exhausted";
306 return MARK_COMPACTOR;
309 // Is there enough space left in OLD to guarantee that a scavenge can
312 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
313 // for object promotion. It counts only the bytes that the memory
314 // allocator has not yet allocated from the OS and assigned to any space,
315 // and does not count available bytes already in the old space or code
316 // space. Undercounting is safe---we may get an unrequested full GC when
317 // a scavenge would have succeeded.
318 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
319 isolate_->counters()->
320 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
321 *reason = "scavenge might not succeed";
322 return MARK_COMPACTOR;
331 // TODO(1238405): Combine the infrastructure for --heap-stats and
332 // --log-gc to avoid the complicated preprocessor and flag testing.
333 void Heap::ReportStatisticsBeforeGC() {
334 // Heap::ReportHeapStatistics will also log NewSpace statistics when
335 // compiled --log-gc is set. The following logic is used to avoid
338 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
339 if (FLAG_heap_stats) {
340 ReportHeapStatistics("Before GC");
341 } else if (FLAG_log_gc) {
342 new_space_.ReportStatistics();
344 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
347 new_space_.CollectStatistics();
348 new_space_.ReportStatistics();
349 new_space_.ClearHistograms();
355 void Heap::PrintShortHeapStatistics() {
356 if (!FLAG_trace_gc_verbose) return;
357 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB\n",
359 isolate_->memory_allocator()->Size() / KB,
360 isolate_->memory_allocator()->Available() / KB);
361 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
362 ", available: %6" V8_PTR_PREFIX "d KB"
363 ", committed: %6" V8_PTR_PREFIX "d KB\n",
364 new_space_.Size() / KB,
365 new_space_.Available() / KB,
366 new_space_.CommittedMemory() / KB);
367 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
368 ", available: %6" V8_PTR_PREFIX "d KB"
369 ", committed: %6" V8_PTR_PREFIX "d KB\n",
370 old_pointer_space_->SizeOfObjects() / KB,
371 old_pointer_space_->Available() / KB,
372 old_pointer_space_->CommittedMemory() / KB);
373 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
374 ", available: %6" V8_PTR_PREFIX "d KB"
375 ", committed: %6" V8_PTR_PREFIX "d KB\n",
376 old_data_space_->SizeOfObjects() / KB,
377 old_data_space_->Available() / KB,
378 old_data_space_->CommittedMemory() / KB);
379 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
380 ", available: %6" V8_PTR_PREFIX "d KB"
381 ", committed: %6" V8_PTR_PREFIX "d KB\n",
382 code_space_->SizeOfObjects() / KB,
383 code_space_->Available() / KB,
384 code_space_->CommittedMemory() / KB);
385 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
386 ", available: %6" V8_PTR_PREFIX "d KB"
387 ", committed: %6" V8_PTR_PREFIX "d KB\n",
388 map_space_->SizeOfObjects() / KB,
389 map_space_->Available() / KB,
390 map_space_->CommittedMemory() / KB);
391 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
392 ", available: %6" V8_PTR_PREFIX "d KB"
393 ", committed: %6" V8_PTR_PREFIX "d KB\n",
394 cell_space_->SizeOfObjects() / KB,
395 cell_space_->Available() / KB,
396 cell_space_->CommittedMemory() / KB);
397 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
398 ", available: %6" V8_PTR_PREFIX "d KB"
399 ", committed: %6" V8_PTR_PREFIX "d KB\n",
400 property_cell_space_->SizeOfObjects() / KB,
401 property_cell_space_->Available() / KB,
402 property_cell_space_->CommittedMemory() / KB);
403 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
404 ", available: %6" V8_PTR_PREFIX "d KB"
405 ", committed: %6" V8_PTR_PREFIX "d KB\n",
406 lo_space_->SizeOfObjects() / KB,
407 lo_space_->Available() / KB,
408 lo_space_->CommittedMemory() / KB);
409 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
410 ", available: %6" V8_PTR_PREFIX "d KB"
411 ", committed: %6" V8_PTR_PREFIX "d KB\n",
412 this->SizeOfObjects() / KB,
413 this->Available() / KB,
414 this->CommittedMemory() / KB);
415 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
416 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
417 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
421 // TODO(1238405): Combine the infrastructure for --heap-stats and
422 // --log-gc to avoid the complicated preprocessor and flag testing.
423 void Heap::ReportStatisticsAfterGC() {
424 // Similar to the before GC, we use some complicated logic to ensure that
425 // NewSpace statistics are logged exactly once when --log-gc is turned on.
427 if (FLAG_heap_stats) {
428 new_space_.CollectStatistics();
429 ReportHeapStatistics("After GC");
430 } else if (FLAG_log_gc) {
431 new_space_.ReportStatistics();
434 if (FLAG_log_gc) new_space_.ReportStatistics();
439 void Heap::GarbageCollectionPrologue() {
440 { AllowHeapAllocation for_the_first_part_of_prologue;
441 ClearJSFunctionResultCaches();
443 unflattened_strings_length_ = 0;
445 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
446 mark_compact_collector()->EnableCodeFlushing(true);
450 if (FLAG_verify_heap) {
456 UpdateMaximumCommitted();
459 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
461 if (FLAG_gc_verbose) Print();
463 ReportStatisticsBeforeGC();
466 store_buffer()->GCPrologue();
468 if (isolate()->concurrent_osr_enabled()) {
469 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
474 intptr_t Heap::SizeOfObjects() {
476 AllSpaces spaces(this);
477 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
478 total += space->SizeOfObjects();
484 void Heap::ClearAllICsByKind(Code::Kind kind) {
485 HeapObjectIterator it(code_space());
487 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
488 Code* code = Code::cast(object);
489 Code::Kind current_kind = code->kind();
490 if (current_kind == Code::FUNCTION ||
491 current_kind == Code::OPTIMIZED_FUNCTION) {
492 code->ClearInlineCaches(kind);
498 void Heap::RepairFreeListsAfterBoot() {
499 PagedSpaces spaces(this);
500 for (PagedSpace* space = spaces.next();
502 space = spaces.next()) {
503 space->RepairFreeListsAfterBoot();
508 void Heap::ProcessPretenuringFeedback() {
509 if (FLAG_allocation_site_pretenuring &&
510 new_space_high_promotion_mode_active_) {
511 int tenure_decisions = 0;
512 int dont_tenure_decisions = 0;
513 int allocation_mementos_found = 0;
514 int allocation_sites = 0;
515 int active_allocation_sites = 0;
517 // If the scratchpad overflowed, we have to iterate over the allocation
519 bool use_scratchpad =
520 allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize;
523 Object* list_element = allocation_sites_list();
524 bool trigger_deoptimization = false;
525 while (use_scratchpad ?
526 i < allocation_sites_scratchpad_length :
527 list_element->IsAllocationSite()) {
528 AllocationSite* site = use_scratchpad ?
529 allocation_sites_scratchpad[i] : AllocationSite::cast(list_element);
530 allocation_mementos_found += site->memento_found_count();
531 if (site->memento_found_count() > 0) {
532 active_allocation_sites++;
534 if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
535 if (site->GetPretenureMode() == TENURED) {
538 dont_tenure_decisions++;
541 if (use_scratchpad) {
544 list_element = site->weak_next();
548 if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
550 allocation_sites_scratchpad_length = 0;
552 // TODO(mvstanton): Pretenure decisions are only made once for an allocation
553 // site. Find a sane way to decide about revisiting the decision later.
555 if (FLAG_trace_track_allocation_sites &&
556 (allocation_mementos_found > 0 ||
557 tenure_decisions > 0 ||
558 dont_tenure_decisions > 0)) {
559 PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
560 "#mementos, #tenure decisions, #donttenure decisions) "
561 "(%s, %d, %d, %d, %d, %d)\n",
562 use_scratchpad ? "use scratchpad" : "use list",
564 active_allocation_sites,
565 allocation_mementos_found,
567 dont_tenure_decisions);
573 void Heap::GarbageCollectionEpilogue() {
574 store_buffer()->GCEpilogue();
576 // In release mode, we only zap the from space under heap verification.
577 if (Heap::ShouldZapGarbage()) {
582 if (FLAG_verify_heap) {
587 AllowHeapAllocation for_the_rest_of_the_epilogue;
590 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
591 if (FLAG_print_handles) PrintHandles();
592 if (FLAG_gc_verbose) Print();
593 if (FLAG_code_stats) ReportCodeStatistics("After GC");
595 if (FLAG_deopt_every_n_garbage_collections > 0) {
596 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
597 Deoptimizer::DeoptimizeAll(isolate());
598 gcs_since_last_deopt_ = 0;
602 UpdateMaximumCommitted();
604 isolate_->counters()->alive_after_last_gc()->Set(
605 static_cast<int>(SizeOfObjects()));
607 isolate_->counters()->string_table_capacity()->Set(
608 string_table()->Capacity());
609 isolate_->counters()->number_of_symbols()->Set(
610 string_table()->NumberOfElements());
612 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
613 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
614 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
615 (crankshaft_codegen_bytes_generated_
616 + full_codegen_bytes_generated_)));
619 if (CommittedMemory() > 0) {
620 isolate_->counters()->external_fragmentation_total()->AddSample(
621 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
623 isolate_->counters()->heap_fraction_new_space()->
624 AddSample(static_cast<int>(
625 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
626 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
628 (old_pointer_space()->CommittedMemory() * 100.0) /
630 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
632 (old_data_space()->CommittedMemory() * 100.0) /
634 isolate_->counters()->heap_fraction_code_space()->
635 AddSample(static_cast<int>(
636 (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
637 isolate_->counters()->heap_fraction_map_space()->AddSample(
639 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
640 isolate_->counters()->heap_fraction_cell_space()->AddSample(
642 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
643 isolate_->counters()->heap_fraction_property_cell_space()->
644 AddSample(static_cast<int>(
645 (property_cell_space()->CommittedMemory() * 100.0) /
647 isolate_->counters()->heap_fraction_lo_space()->
648 AddSample(static_cast<int>(
649 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
651 isolate_->counters()->heap_sample_total_committed()->AddSample(
652 static_cast<int>(CommittedMemory() / KB));
653 isolate_->counters()->heap_sample_total_used()->AddSample(
654 static_cast<int>(SizeOfObjects() / KB));
655 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
656 static_cast<int>(map_space()->CommittedMemory() / KB));
657 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
658 static_cast<int>(cell_space()->CommittedMemory() / KB));
659 isolate_->counters()->
660 heap_sample_property_cell_space_committed()->
661 AddSample(static_cast<int>(
662 property_cell_space()->CommittedMemory() / KB));
663 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
664 static_cast<int>(code_space()->CommittedMemory() / KB));
666 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
667 static_cast<int>(MaximumCommittedMemory() / KB));
670 #define UPDATE_COUNTERS_FOR_SPACE(space) \
671 isolate_->counters()->space##_bytes_available()->Set( \
672 static_cast<int>(space()->Available())); \
673 isolate_->counters()->space##_bytes_committed()->Set( \
674 static_cast<int>(space()->CommittedMemory())); \
675 isolate_->counters()->space##_bytes_used()->Set( \
676 static_cast<int>(space()->SizeOfObjects()));
677 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
678 if (space()->CommittedMemory() > 0) { \
679 isolate_->counters()->external_fragmentation_##space()->AddSample( \
680 static_cast<int>(100 - \
681 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
683 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
684 UPDATE_COUNTERS_FOR_SPACE(space) \
685 UPDATE_FRAGMENTATION_FOR_SPACE(space)
687 UPDATE_COUNTERS_FOR_SPACE(new_space)
688 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
689 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
690 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
691 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
692 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
693 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
694 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
695 #undef UPDATE_COUNTERS_FOR_SPACE
696 #undef UPDATE_FRAGMENTATION_FOR_SPACE
697 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
700 ReportStatisticsAfterGC();
702 #ifdef ENABLE_DEBUGGER_SUPPORT
703 isolate_->debug()->AfterGarbageCollection();
704 #endif // ENABLE_DEBUGGER_SUPPORT
708 void Heap::CollectAllGarbage(int flags,
709 const char* gc_reason,
710 const v8::GCCallbackFlags gc_callback_flags) {
711 // Since we are ignoring the return value, the exact choice of space does
712 // not matter, so long as we do not specify NEW_SPACE, which would not
714 mark_compact_collector_.SetFlags(flags);
715 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
716 mark_compact_collector_.SetFlags(kNoGCFlags);
720 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
721 // Since we are ignoring the return value, the exact choice of space does
722 // not matter, so long as we do not specify NEW_SPACE, which would not
724 // Major GC would invoke weak handle callbacks on weakly reachable
725 // handles, but won't collect weakly reachable objects until next
726 // major GC. Therefore if we collect aggressively and weak handle callback
727 // has been invoked, we rerun major GC to release objects which become
729 // Note: as weak callbacks can execute arbitrary code, we cannot
730 // hope that eventually there will be no weak callbacks invocations.
731 // Therefore stop recollecting after several attempts.
732 if (isolate()->concurrent_recompilation_enabled()) {
733 // The optimizing compiler may be unnecessarily holding on to memory.
734 DisallowHeapAllocation no_recursive_gc;
735 isolate()->optimizing_compiler_thread()->Flush();
737 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
738 kReduceMemoryFootprintMask);
739 isolate_->compilation_cache()->Clear();
740 const int kMaxNumberOfAttempts = 7;
741 const int kMinNumberOfAttempts = 2;
742 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
743 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
744 attempt + 1 >= kMinNumberOfAttempts) {
748 mark_compact_collector()->SetFlags(kNoGCFlags);
751 incremental_marking()->UncommitMarkingDeque();
755 bool Heap::CollectGarbage(AllocationSpace space,
756 GarbageCollector collector,
757 const char* gc_reason,
758 const char* collector_reason,
759 const v8::GCCallbackFlags gc_callback_flags) {
760 // The VM is in the GC state until exiting this function.
761 VMState<GC> state(isolate_);
764 // Reset the allocation timeout to the GC interval, but make sure to
765 // allow at least a few allocations after a collection. The reason
766 // for this is that we have a lot of allocation sequences and we
767 // assume that a garbage collection will allow the subsequent
768 // allocation attempts to go through.
769 allocation_timeout_ = Max(6, FLAG_gc_interval);
772 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
773 if (FLAG_trace_incremental_marking) {
774 PrintF("[IncrementalMarking] Scavenge during marking.\n");
778 if (collector == MARK_COMPACTOR &&
779 !mark_compact_collector()->abort_incremental_marking() &&
780 !incremental_marking()->IsStopped() &&
781 !incremental_marking()->should_hurry() &&
782 FLAG_incremental_marking_steps) {
783 // Make progress in incremental marking.
784 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
785 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
786 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
787 if (!incremental_marking()->IsComplete()) {
788 if (FLAG_trace_incremental_marking) {
789 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
791 collector = SCAVENGER;
792 collector_reason = "incremental marking delaying mark-sweep";
796 bool next_gc_likely_to_collect_more = false;
798 { GCTracer tracer(this, gc_reason, collector_reason);
799 ASSERT(AllowHeapAllocation::IsAllowed());
800 DisallowHeapAllocation no_allocation_during_gc;
801 GarbageCollectionPrologue();
802 // The GC count was incremented in the prologue. Tell the tracer about
804 tracer.set_gc_count(gc_count_);
806 // Tell the tracer which collector we've selected.
807 tracer.set_collector(collector);
810 HistogramTimerScope histogram_timer_scope(
811 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
812 : isolate_->counters()->gc_compactor());
813 next_gc_likely_to_collect_more =
814 PerformGarbageCollection(collector, &tracer, gc_callback_flags);
817 GarbageCollectionEpilogue();
820 // Start incremental marking for the next cycle. The heap snapshot
821 // generator needs incremental marking to stay off after it aborted.
822 if (!mark_compact_collector()->abort_incremental_marking() &&
823 incremental_marking()->IsStopped() &&
824 incremental_marking()->WorthActivating() &&
825 NextGCIsLikelyToBeFull()) {
826 incremental_marking()->Start();
829 return next_gc_likely_to_collect_more;
833 int Heap::NotifyContextDisposed() {
834 if (isolate()->concurrent_recompilation_enabled()) {
835 // Flush the queued recompilation tasks.
836 isolate()->optimizing_compiler_thread()->Flush();
838 flush_monomorphic_ics_ = true;
840 return ++contexts_disposed_;
844 void Heap::PerformScavenge() {
845 GCTracer tracer(this, NULL, NULL);
846 if (incremental_marking()->IsStopped()) {
847 PerformGarbageCollection(SCAVENGER, &tracer);
849 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
854 void Heap::MoveElements(FixedArray* array,
858 if (len == 0) return;
860 ASSERT(array->map() != fixed_cow_array_map());
861 Object** dst_objects = array->data_start() + dst_index;
862 OS::MemMove(dst_objects,
863 array->data_start() + src_index,
865 if (!InNewSpace(array)) {
866 for (int i = 0; i < len; i++) {
867 // TODO(hpayer): check store buffer for entries
868 if (InNewSpace(dst_objects[i])) {
869 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
873 incremental_marking()->RecordWrites(array);
878 // Helper class for verifying the string table.
879 class StringTableVerifier : public ObjectVisitor {
881 void VisitPointers(Object** start, Object** end) {
882 // Visit all HeapObject pointers in [start, end).
883 for (Object** p = start; p < end; p++) {
884 if ((*p)->IsHeapObject()) {
885 // Check that the string is actually internalized.
886 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
887 (*p)->IsInternalizedString());
894 static void VerifyStringTable(Heap* heap) {
895 StringTableVerifier verifier;
896 heap->string_table()->IterateElements(&verifier);
898 #endif // VERIFY_HEAP
901 static bool AbortIncrementalMarkingAndCollectGarbage(
903 AllocationSpace space,
904 const char* gc_reason = NULL) {
905 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
906 bool result = heap->CollectGarbage(space, gc_reason);
907 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
912 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
913 bool gc_performed = true;
915 static const int kThreshold = 20;
916 while (gc_performed && counter++ < kThreshold) {
917 gc_performed = false;
918 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
919 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
920 if (sizes[space] != 0) {
921 MaybeObject* allocation;
922 if (space == NEW_SPACE) {
923 allocation = new_space()->AllocateRaw(sizes[space]);
925 allocation = paged_space(space)->AllocateRaw(sizes[space]);
928 if (!allocation->To<FreeListNode>(&node)) {
929 if (space == NEW_SPACE) {
930 Heap::CollectGarbage(NEW_SPACE,
931 "failed to reserve space in the new space");
933 AbortIncrementalMarkingAndCollectGarbage(
935 static_cast<AllocationSpace>(space),
936 "failed to reserve space in paged space");
941 // Mark with a free list node, in case we have a GC before
943 node->set_size(this, sizes[space]);
944 locations_out[space] = node->address();
951 // Failed to reserve the space after several attempts.
952 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
957 void Heap::EnsureFromSpaceIsCommitted() {
958 if (new_space_.CommitFromSpaceIfNeeded()) return;
960 // Committing memory to from space failed.
961 // Memory is exhausted and we will die.
962 V8::FatalProcessOutOfMemory("Committing semi space failed.");
966 void Heap::ClearJSFunctionResultCaches() {
967 if (isolate_->bootstrapper()->IsActive()) return;
969 Object* context = native_contexts_list_;
970 while (!context->IsUndefined()) {
971 // Get the caches for this context. GC can happen when the context
972 // is not fully initialized, so the caches can be undefined.
973 Object* caches_or_undefined =
974 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
975 if (!caches_or_undefined->IsUndefined()) {
976 FixedArray* caches = FixedArray::cast(caches_or_undefined);
978 int length = caches->length();
979 for (int i = 0; i < length; i++) {
980 JSFunctionResultCache::cast(caches->get(i))->Clear();
983 // Get the next context:
984 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
989 void Heap::ClearNormalizedMapCaches() {
990 if (isolate_->bootstrapper()->IsActive() &&
991 !incremental_marking()->IsMarking()) {
995 Object* context = native_contexts_list_;
996 while (!context->IsUndefined()) {
997 // GC can happen when the context is not fully initialized,
998 // so the cache can be undefined.
1000 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1001 if (!cache->IsUndefined()) {
1002 NormalizedMapCache::cast(cache)->Clear();
1004 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1009 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
1010 if (start_new_space_size == 0) return;
1012 double survival_rate =
1013 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
1014 start_new_space_size;
1016 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1017 high_survival_rate_period_length_++;
1019 high_survival_rate_period_length_ = 0;
1022 if (survival_rate < kYoungSurvivalRateLowThreshold) {
1023 low_survival_rate_period_length_++;
1025 low_survival_rate_period_length_ = 0;
1028 double survival_rate_diff = survival_rate_ - survival_rate;
1030 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
1031 set_survival_rate_trend(DECREASING);
1032 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
1033 set_survival_rate_trend(INCREASING);
1035 set_survival_rate_trend(STABLE);
1038 survival_rate_ = survival_rate;
1041 bool Heap::PerformGarbageCollection(
1042 GarbageCollector collector,
1044 const v8::GCCallbackFlags gc_callback_flags) {
1045 bool next_gc_likely_to_collect_more = false;
1047 if (collector != SCAVENGER) {
1048 PROFILE(isolate_, CodeMovingGCEvent());
1052 if (FLAG_verify_heap) {
1053 VerifyStringTable(this);
1058 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1061 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1062 VMState<EXTERNAL> state(isolate_);
1063 HandleScope handle_scope(isolate_);
1064 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1067 EnsureFromSpaceIsCommitted();
1069 int start_new_space_size = Heap::new_space()->SizeAsInt();
1071 if (IsHighSurvivalRate()) {
1072 // We speed up the incremental marker if it is running so that it
1073 // does not fall behind the rate of promotion, which would cause a
1074 // constantly growing old space.
1075 incremental_marking()->NotifyOfHighPromotionRate();
1078 if (collector == MARK_COMPACTOR) {
1079 // Perform mark-sweep with optional compaction.
1080 MarkCompact(tracer);
1081 sweep_generation_++;
1083 UpdateSurvivalRateTrend(start_new_space_size);
1085 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1087 old_generation_allocation_limit_ =
1088 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1090 old_gen_exhausted_ = false;
1096 UpdateSurvivalRateTrend(start_new_space_size);
1099 if (!new_space_high_promotion_mode_active_ &&
1100 new_space_.Capacity() == new_space_.MaximumCapacity() &&
1101 IsStableOrIncreasingSurvivalTrend() &&
1102 IsHighSurvivalRate()) {
1103 // Stable high survival rates even though young generation is at
1104 // maximum capacity indicates that most objects will be promoted.
1105 // To decrease scavenger pauses and final mark-sweep pauses, we
1106 // have to limit maximal capacity of the young generation.
1107 SetNewSpaceHighPromotionModeActive(true);
1108 if (FLAG_trace_gc) {
1109 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1110 new_space_.InitialCapacity() / MB);
1112 // The high promotion mode is our indicator to turn on pretenuring. We have
1113 // to deoptimize all optimized code in global pretenuring mode and all
1114 // code which should be tenured in local pretenuring mode.
1115 if (FLAG_pretenuring) {
1116 if (FLAG_allocation_site_pretenuring) {
1117 ResetAllAllocationSitesDependentCode(NOT_TENURED);
1119 isolate_->stack_guard()->FullDeopt();
1122 } else if (new_space_high_promotion_mode_active_ &&
1123 IsStableOrDecreasingSurvivalTrend() &&
1124 IsLowSurvivalRate()) {
1125 // Decreasing low survival rates might indicate that the above high
1126 // promotion mode is over and we should allow the young generation
1128 SetNewSpaceHighPromotionModeActive(false);
1129 if (FLAG_trace_gc) {
1130 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1131 new_space_.MaximumCapacity() / MB);
1133 // Trigger deoptimization here to turn off global pretenuring as soon as
1135 if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
1136 isolate_->stack_guard()->FullDeopt();
1140 if (new_space_high_promotion_mode_active_ &&
1141 new_space_.Capacity() > new_space_.InitialCapacity()) {
1142 new_space_.Shrink();
1145 isolate_->counters()->objs_since_last_young()->Set(0);
1147 // Callbacks that fire after this point might trigger nested GCs and
1148 // restart incremental marking, the assertion can't be moved down.
1149 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1151 gc_post_processing_depth_++;
1152 { AllowHeapAllocation allow_allocation;
1153 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1154 next_gc_likely_to_collect_more =
1155 isolate_->global_handles()->PostGarbageCollectionProcessing(
1158 gc_post_processing_depth_--;
1160 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1162 // Update relocatables.
1163 Relocatable::PostGarbageCollectionProcessing(isolate_);
1165 if (collector == MARK_COMPACTOR) {
1166 // Register the amount of external allocated memory.
1167 amount_of_external_allocated_memory_at_last_global_gc_ =
1168 amount_of_external_allocated_memory_;
1172 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1173 VMState<EXTERNAL> state(isolate_);
1174 HandleScope handle_scope(isolate_);
1175 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1179 if (FLAG_verify_heap) {
1180 VerifyStringTable(this);
1184 return next_gc_likely_to_collect_more;
1188 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1189 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1190 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1191 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1192 v8::GCPrologueCallback callback =
1193 reinterpret_cast<v8::GCPrologueCallback>(
1194 gc_prologue_callbacks_[i].callback);
1195 callback(gc_type, flags);
1197 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1198 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1205 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1206 GCCallbackFlags gc_callback_flags) {
1207 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1208 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1209 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1210 v8::GCPrologueCallback callback =
1211 reinterpret_cast<v8::GCPrologueCallback>(
1212 gc_epilogue_callbacks_[i].callback);
1213 callback(gc_type, gc_callback_flags);
1215 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1216 gc_epilogue_callbacks_[i].callback(
1217 isolate, gc_type, gc_callback_flags);
1224 void Heap::MarkCompact(GCTracer* tracer) {
1225 gc_state_ = MARK_COMPACT;
1226 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1228 uint64_t size_of_objects_before_gc = SizeOfObjects();
1230 mark_compact_collector_.Prepare(tracer);
1233 tracer->set_full_gc_count(ms_count_);
1235 MarkCompactPrologue();
1237 mark_compact_collector_.CollectGarbage();
1239 LOG(isolate_, ResourceEvent("markcompact", "end"));
1241 gc_state_ = NOT_IN_GC;
1243 isolate_->counters()->objs_since_last_full()->Set(0);
1245 flush_monomorphic_ics_ = false;
1247 if (FLAG_allocation_site_pretenuring) {
1248 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1253 void Heap::MarkCompactPrologue() {
1254 // At any old GC clear the keyed lookup cache to enable collection of unused
1256 isolate_->keyed_lookup_cache()->Clear();
1257 isolate_->context_slot_cache()->Clear();
1258 isolate_->descriptor_lookup_cache()->Clear();
1259 RegExpResultsCache::Clear(string_split_cache());
1260 RegExpResultsCache::Clear(regexp_multiple_cache());
1262 isolate_->compilation_cache()->MarkCompactPrologue();
1264 CompletelyClearInstanceofCache();
1266 FlushNumberStringCache();
1267 if (FLAG_cleanup_code_caches_at_gc) {
1268 polymorphic_code_cache()->set_cache(undefined_value());
1271 ClearNormalizedMapCaches();
1275 // Helper class for copying HeapObjects
1276 class ScavengeVisitor: public ObjectVisitor {
1278 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1280 void VisitPointer(Object** p) { ScavengePointer(p); }
1282 void VisitPointers(Object** start, Object** end) {
1283 // Copy all HeapObject pointers in [start, end)
1284 for (Object** p = start; p < end; p++) ScavengePointer(p);
1288 void ScavengePointer(Object** p) {
1289 Object* object = *p;
1290 if (!heap_->InNewSpace(object)) return;
1291 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1292 reinterpret_cast<HeapObject*>(object));
1300 // Visitor class to verify pointers in code or data space do not point into
1302 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1304 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1305 void VisitPointers(Object** start, Object**end) {
1306 for (Object** current = start; current < end; current++) {
1307 if ((*current)->IsHeapObject()) {
1308 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1318 static void VerifyNonPointerSpacePointers(Heap* heap) {
1319 // Verify that there are no pointers to new space in spaces where we
1320 // do not expect them.
1321 VerifyNonPointerSpacePointersVisitor v(heap);
1322 HeapObjectIterator code_it(heap->code_space());
1323 for (HeapObject* object = code_it.Next();
1324 object != NULL; object = code_it.Next())
1325 object->Iterate(&v);
1327 // The old data space was normally swept conservatively so that the iterator
1328 // doesn't work, so we normally skip the next bit.
1329 if (!heap->old_data_space()->was_swept_conservatively()) {
1330 HeapObjectIterator data_it(heap->old_data_space());
1331 for (HeapObject* object = data_it.Next();
1332 object != NULL; object = data_it.Next())
1333 object->Iterate(&v);
1336 #endif // VERIFY_HEAP
1339 void Heap::CheckNewSpaceExpansionCriteria() {
1340 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1341 survived_since_last_expansion_ > new_space_.Capacity() &&
1342 !new_space_high_promotion_mode_active_) {
1343 // Grow the size of new space if there is room to grow, enough data
1344 // has survived scavenge since the last expansion and we are not in
1345 // high promotion mode.
1347 survived_since_last_expansion_ = 0;
1352 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1353 return heap->InNewSpace(*p) &&
1354 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1358 void Heap::ScavengeStoreBufferCallback(
1361 StoreBufferEvent event) {
1362 heap->store_buffer_rebuilder_.Callback(page, event);
1366 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1367 if (event == kStoreBufferStartScanningPagesEvent) {
1368 start_of_current_page_ = NULL;
1369 current_page_ = NULL;
1370 } else if (event == kStoreBufferScanningPageEvent) {
1371 if (current_page_ != NULL) {
1372 // If this page already overflowed the store buffer during this iteration.
1373 if (current_page_->scan_on_scavenge()) {
1374 // Then we should wipe out the entries that have been added for it.
1375 store_buffer_->SetTop(start_of_current_page_);
1376 } else if (store_buffer_->Top() - start_of_current_page_ >=
1377 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1378 // Did we find too many pointers in the previous page? The heuristic is
1379 // that no page can take more then 1/5 the remaining slots in the store
1381 current_page_->set_scan_on_scavenge(true);
1382 store_buffer_->SetTop(start_of_current_page_);
1384 // In this case the page we scanned took a reasonable number of slots in
1385 // the store buffer. It has now been rehabilitated and is no longer
1386 // marked scan_on_scavenge.
1387 ASSERT(!current_page_->scan_on_scavenge());
1390 start_of_current_page_ = store_buffer_->Top();
1391 current_page_ = page;
1392 } else if (event == kStoreBufferFullEvent) {
1393 // The current page overflowed the store buffer again. Wipe out its entries
1394 // in the store buffer and mark it scan-on-scavenge again. This may happen
1395 // several times while scanning.
1396 if (current_page_ == NULL) {
1397 // Store Buffer overflowed while scanning promoted objects. These are not
1398 // in any particular page, though they are likely to be clustered by the
1399 // allocation routines.
1400 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1402 // Store Buffer overflowed while scanning a particular old space page for
1403 // pointers to new space.
1404 ASSERT(current_page_ == page);
1405 ASSERT(page != NULL);
1406 current_page_->set_scan_on_scavenge(true);
1407 ASSERT(start_of_current_page_ != store_buffer_->Top());
1408 store_buffer_->SetTop(start_of_current_page_);
1416 void PromotionQueue::Initialize() {
1417 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1418 // entries (where each is a pair of intptr_t). This allows us to simplify
1419 // the test fpr when to switch pages.
1420 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1422 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1424 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1425 emergency_stack_ = NULL;
1430 void PromotionQueue::RelocateQueueHead() {
1431 ASSERT(emergency_stack_ == NULL);
1433 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1434 intptr_t* head_start = rear_;
1435 intptr_t* head_end =
1436 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1439 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1441 emergency_stack_ = new List<Entry>(2 * entries_count);
1443 while (head_start != head_end) {
1444 int size = static_cast<int>(*(head_start++));
1445 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1446 emergency_stack_->Add(Entry(obj, size));
1452 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1454 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1456 virtual Object* RetainAs(Object* object) {
1457 if (!heap_->InFromSpace(object)) {
1461 MapWord map_word = HeapObject::cast(object)->map_word();
1462 if (map_word.IsForwardingAddress()) {
1463 return map_word.ToForwardingAddress();
1473 void Heap::Scavenge() {
1474 RelocationLock relocation_lock(this);
1477 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1480 gc_state_ = SCAVENGE;
1482 // Implements Cheney's copying algorithm
1483 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1485 // Clear descriptor cache.
1486 isolate_->descriptor_lookup_cache()->Clear();
1488 // Used for updating survived_since_last_expansion_ at function end.
1489 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1491 CheckNewSpaceExpansionCriteria();
1493 SelectScavengingVisitorsTable();
1495 incremental_marking()->PrepareForScavenge();
1497 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1498 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1500 // Flip the semispaces. After flipping, to space is empty, from space has
1503 new_space_.ResetAllocationInfo();
1505 // We need to sweep newly copied objects which can be either in the
1506 // to space or promoted to the old generation. For to-space
1507 // objects, we treat the bottom of the to space as a queue. Newly
1508 // copied and unswept objects lie between a 'front' mark and the
1509 // allocation pointer.
1511 // Promoted objects can go into various old-generation spaces, and
1512 // can be allocated internally in the spaces (from the free list).
1513 // We treat the top of the to space as a queue of addresses of
1514 // promoted objects. The addresses of newly promoted and unswept
1515 // objects lie between a 'front' mark and a 'rear' mark that is
1516 // updated as a side effect of promoting an object.
1518 // There is guaranteed to be enough room at the top of the to space
1519 // for the addresses of promoted objects: every object promoted
1520 // frees up its size in bytes from the top of the new space, and
1521 // objects are at least one pointer in size.
1522 Address new_space_front = new_space_.ToSpaceStart();
1523 promotion_queue_.Initialize();
1526 store_buffer()->Clean();
1529 ScavengeVisitor scavenge_visitor(this);
1531 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1533 // Copy objects reachable from the old generation.
1535 StoreBufferRebuildScope scope(this,
1537 &ScavengeStoreBufferCallback);
1538 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1541 // Copy objects reachable from simple cells by scavenging cell values
1543 HeapObjectIterator cell_iterator(cell_space_);
1544 for (HeapObject* heap_object = cell_iterator.Next();
1545 heap_object != NULL;
1546 heap_object = cell_iterator.Next()) {
1547 if (heap_object->IsCell()) {
1548 Cell* cell = Cell::cast(heap_object);
1549 Address value_address = cell->ValueAddress();
1550 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1554 // Copy objects reachable from global property cells by scavenging global
1555 // property cell values directly.
1556 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1557 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1558 heap_object != NULL;
1559 heap_object = js_global_property_cell_iterator.Next()) {
1560 if (heap_object->IsPropertyCell()) {
1561 PropertyCell* cell = PropertyCell::cast(heap_object);
1562 Address value_address = cell->ValueAddress();
1563 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1564 Address type_address = cell->TypeAddress();
1565 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1569 // Copy objects reachable from the code flushing candidates list.
1570 MarkCompactCollector* collector = mark_compact_collector();
1571 if (collector->is_code_flushing_enabled()) {
1572 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1575 // Scavenge object reachable from the native contexts list directly.
1576 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1578 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1580 while (isolate()->global_handles()->IterateObjectGroups(
1581 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1582 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1584 isolate()->global_handles()->RemoveObjectGroups();
1585 isolate()->global_handles()->RemoveImplicitRefGroups();
1587 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1588 &IsUnscavengedHeapObject);
1589 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1591 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1593 UpdateNewSpaceReferencesInExternalStringTable(
1594 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1596 promotion_queue_.Destroy();
1598 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1600 ScavengeWeakObjectRetainer weak_object_retainer(this);
1601 ProcessWeakReferences(&weak_object_retainer);
1603 ASSERT(new_space_front == new_space_.top());
1606 new_space_.set_age_mark(new_space_.top());
1608 new_space_.LowerInlineAllocationLimit(
1609 new_space_.inline_allocation_limit_step());
1611 // Update how much has survived scavenge.
1612 IncrementYoungSurvivorsCounter(static_cast<int>(
1613 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1615 ProcessPretenuringFeedback();
1617 LOG(isolate_, ResourceEvent("scavenge", "end"));
1619 gc_state_ = NOT_IN_GC;
1621 scavenges_since_last_idle_round_++;
1625 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1627 MapWord first_word = HeapObject::cast(*p)->map_word();
1629 if (!first_word.IsForwardingAddress()) {
1630 // Unreachable external string can be finalized.
1631 heap->FinalizeExternalString(String::cast(*p));
1635 // String is still reachable.
1636 return String::cast(first_word.ToForwardingAddress());
1640 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1641 ExternalStringTableUpdaterCallback updater_func) {
1643 if (FLAG_verify_heap) {
1644 external_string_table_.Verify();
1648 if (external_string_table_.new_space_strings_.is_empty()) return;
1650 Object** start = &external_string_table_.new_space_strings_[0];
1651 Object** end = start + external_string_table_.new_space_strings_.length();
1652 Object** last = start;
1654 for (Object** p = start; p < end; ++p) {
1655 ASSERT(InFromSpace(*p));
1656 String* target = updater_func(this, p);
1658 if (target == NULL) continue;
1660 ASSERT(target->IsExternalString());
1662 if (InNewSpace(target)) {
1663 // String is still in new space. Update the table entry.
1667 // String got promoted. Move it to the old string list.
1668 external_string_table_.AddOldString(target);
1672 ASSERT(last <= end);
1673 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1677 void Heap::UpdateReferencesInExternalStringTable(
1678 ExternalStringTableUpdaterCallback updater_func) {
1680 // Update old space string references.
1681 if (external_string_table_.old_space_strings_.length() > 0) {
1682 Object** start = &external_string_table_.old_space_strings_[0];
1683 Object** end = start + external_string_table_.old_space_strings_.length();
1684 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1687 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1692 struct WeakListVisitor;
1696 static Object* VisitWeakList(Heap* heap,
1698 WeakObjectRetainer* retainer,
1699 bool record_slots) {
1700 Object* undefined = heap->undefined_value();
1701 Object* head = undefined;
1703 MarkCompactCollector* collector = heap->mark_compact_collector();
1704 while (list != undefined) {
1705 // Check whether to keep the candidate in the list.
1706 T* candidate = reinterpret_cast<T*>(list);
1707 Object* retained = retainer->RetainAs(list);
1708 if (retained != NULL) {
1709 if (head == undefined) {
1710 // First element in the list.
1713 // Subsequent elements in the list.
1714 ASSERT(tail != NULL);
1715 WeakListVisitor<T>::SetWeakNext(tail, retained);
1717 Object** next_slot =
1718 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1719 collector->RecordSlot(next_slot, next_slot, retained);
1722 // Retained object is new tail.
1723 ASSERT(!retained->IsUndefined());
1724 candidate = reinterpret_cast<T*>(retained);
1728 // tail is a live object, visit it.
1729 WeakListVisitor<T>::VisitLiveObject(
1730 heap, tail, retainer, record_slots);
1732 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1735 // Move to next element in the list.
1736 list = WeakListVisitor<T>::WeakNext(candidate);
1739 // Terminate the list if there is one or more elements.
1741 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1748 struct WeakListVisitor<JSFunction> {
1749 static void SetWeakNext(JSFunction* function, Object* next) {
1750 function->set_next_function_link(next);
1753 static Object* WeakNext(JSFunction* function) {
1754 return function->next_function_link();
1757 static int WeakNextOffset() {
1758 return JSFunction::kNextFunctionLinkOffset;
1761 static void VisitLiveObject(Heap*, JSFunction*,
1762 WeakObjectRetainer*, bool) {
1765 static void VisitPhantomObject(Heap*, JSFunction*) {
1771 struct WeakListVisitor<Code> {
1772 static void SetWeakNext(Code* code, Object* next) {
1773 code->set_next_code_link(next);
1776 static Object* WeakNext(Code* code) {
1777 return code->next_code_link();
1780 static int WeakNextOffset() {
1781 return Code::kNextCodeLinkOffset;
1784 static void VisitLiveObject(Heap*, Code*,
1785 WeakObjectRetainer*, bool) {
1788 static void VisitPhantomObject(Heap*, Code*) {
1794 struct WeakListVisitor<Context> {
1795 static void SetWeakNext(Context* context, Object* next) {
1796 context->set(Context::NEXT_CONTEXT_LINK,
1798 UPDATE_WRITE_BARRIER);
1801 static Object* WeakNext(Context* context) {
1802 return context->get(Context::NEXT_CONTEXT_LINK);
1805 static void VisitLiveObject(Heap* heap,
1807 WeakObjectRetainer* retainer,
1808 bool record_slots) {
1809 // Process the three weak lists linked off the context.
1810 DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1811 Context::OPTIMIZED_FUNCTIONS_LIST);
1812 DoWeakList<Code>(heap, context, retainer, record_slots,
1813 Context::OPTIMIZED_CODE_LIST);
1814 DoWeakList<Code>(heap, context, retainer, record_slots,
1815 Context::DEOPTIMIZED_CODE_LIST);
1819 static void DoWeakList(Heap* heap,
1821 WeakObjectRetainer* retainer,
1824 // Visit the weak list, removing dead intermediate elements.
1825 Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1828 // Update the list head.
1829 context->set(index, list_head, UPDATE_WRITE_BARRIER);
1832 // Record the updated slot if necessary.
1833 Object** head_slot = HeapObject::RawField(
1834 context, FixedArray::SizeFor(index));
1835 heap->mark_compact_collector()->RecordSlot(
1836 head_slot, head_slot, list_head);
1840 static void VisitPhantomObject(Heap*, Context*) {
1843 static int WeakNextOffset() {
1844 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1849 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1850 // We don't record weak slots during marking or scavenges.
1851 // Instead we do it once when we complete mark-compact cycle.
1852 // Note that write barrier has no effect if we are already in the middle of
1853 // compacting mark-sweep cycle and we have to record slots manually.
1855 gc_state() == MARK_COMPACT &&
1856 mark_compact_collector()->is_compacting();
1857 ProcessArrayBuffers(retainer, record_slots);
1858 ProcessNativeContexts(retainer, record_slots);
1859 // TODO(mvstanton): AllocationSites only need to be processed during
1860 // MARK_COMPACT, as they live in old space. Verify and address.
1861 ProcessAllocationSites(retainer, record_slots);
1864 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1865 bool record_slots) {
1867 VisitWeakList<Context>(
1868 this, native_contexts_list(), retainer, record_slots);
1869 // Update the head of the list of contexts.
1870 native_contexts_list_ = head;
1875 struct WeakListVisitor<JSArrayBufferView> {
1876 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1877 obj->set_weak_next(next);
1880 static Object* WeakNext(JSArrayBufferView* obj) {
1881 return obj->weak_next();
1884 static void VisitLiveObject(Heap*,
1885 JSArrayBufferView* obj,
1886 WeakObjectRetainer* retainer,
1887 bool record_slots) {}
1889 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1891 static int WeakNextOffset() {
1892 return JSArrayBufferView::kWeakNextOffset;
1898 struct WeakListVisitor<JSArrayBuffer> {
1899 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1900 obj->set_weak_next(next);
1903 static Object* WeakNext(JSArrayBuffer* obj) {
1904 return obj->weak_next();
1907 static void VisitLiveObject(Heap* heap,
1908 JSArrayBuffer* array_buffer,
1909 WeakObjectRetainer* retainer,
1910 bool record_slots) {
1911 Object* typed_array_obj =
1912 VisitWeakList<JSArrayBufferView>(
1914 array_buffer->weak_first_view(),
1915 retainer, record_slots);
1916 array_buffer->set_weak_first_view(typed_array_obj);
1917 if (typed_array_obj != heap->undefined_value() && record_slots) {
1918 Object** slot = HeapObject::RawField(
1919 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1920 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1924 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1925 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1928 static int WeakNextOffset() {
1929 return JSArrayBuffer::kWeakNextOffset;
1934 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1935 bool record_slots) {
1936 Object* array_buffer_obj =
1937 VisitWeakList<JSArrayBuffer>(this,
1938 array_buffers_list(),
1939 retainer, record_slots);
1940 set_array_buffers_list(array_buffer_obj);
1944 void Heap::TearDownArrayBuffers() {
1945 Object* undefined = undefined_value();
1946 for (Object* o = array_buffers_list(); o != undefined;) {
1947 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1948 Runtime::FreeArrayBuffer(isolate(), buffer);
1949 o = buffer->weak_next();
1951 array_buffers_list_ = undefined;
1956 struct WeakListVisitor<AllocationSite> {
1957 static void SetWeakNext(AllocationSite* obj, Object* next) {
1958 obj->set_weak_next(next);
1961 static Object* WeakNext(AllocationSite* obj) {
1962 return obj->weak_next();
1965 static void VisitLiveObject(Heap* heap,
1966 AllocationSite* site,
1967 WeakObjectRetainer* retainer,
1968 bool record_slots) {}
1970 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1972 static int WeakNextOffset() {
1973 return AllocationSite::kWeakNextOffset;
1978 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1979 bool record_slots) {
1980 Object* allocation_site_obj =
1981 VisitWeakList<AllocationSite>(this,
1982 allocation_sites_list(),
1983 retainer, record_slots);
1984 set_allocation_sites_list(allocation_site_obj);
1988 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1989 DisallowHeapAllocation no_allocation_scope;
1990 Object* cur = allocation_sites_list();
1991 bool marked = false;
1992 while (cur->IsAllocationSite()) {
1993 AllocationSite* casted = AllocationSite::cast(cur);
1994 if (casted->GetPretenureMode() == flag) {
1995 casted->ResetPretenureDecision();
1996 bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
1998 DependentCode::kAllocationSiteTenuringChangedGroup);
1999 if (got_marked) marked = true;
2001 cur = casted->weak_next();
2003 if (marked) isolate_->stack_guard()->DeoptMarkedCode();
2007 void Heap::EvaluateOldSpaceLocalPretenuring(
2008 uint64_t size_of_objects_before_gc) {
2009 uint64_t size_of_objects_after_gc = SizeOfObjects();
2010 double old_generation_survival_rate =
2011 (static_cast<double>(size_of_objects_after_gc) * 100) /
2012 static_cast<double>(size_of_objects_before_gc);
2014 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2015 // Too many objects died in the old generation, pretenuring of wrong
2016 // allocation sites may be the cause for that. We have to deopt all
2017 // dependent code registered in the allocation sites to re-evaluate
2018 // our pretenuring decisions.
2019 ResetAllAllocationSitesDependentCode(TENURED);
2020 if (FLAG_trace_pretenuring) {
2021 PrintF("Deopt all allocation sites dependent code due to low survival "
2022 "rate in the old generation %f\n", old_generation_survival_rate);
2028 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2029 DisallowHeapAllocation no_allocation;
2030 // All external strings are listed in the external string table.
2032 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
2034 explicit ExternalStringTableVisitorAdapter(
2035 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
2036 virtual void VisitPointers(Object** start, Object** end) {
2037 for (Object** p = start; p < end; p++) {
2038 ASSERT((*p)->IsExternalString());
2039 visitor_->VisitExternalString(Utils::ToLocal(
2040 Handle<String>(String::cast(*p))));
2044 v8::ExternalResourceVisitor* visitor_;
2045 } external_string_table_visitor(visitor);
2047 external_string_table_.Iterate(&external_string_table_visitor);
2051 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
2053 static inline void VisitPointer(Heap* heap, Object** p) {
2054 Object* object = *p;
2055 if (!heap->InNewSpace(object)) return;
2056 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
2057 reinterpret_cast<HeapObject*>(object));
2062 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2063 Address new_space_front) {
2065 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2066 // The addresses new_space_front and new_space_.top() define a
2067 // queue of unprocessed copied objects. Process them until the
2069 while (new_space_front != new_space_.top()) {
2070 if (!NewSpacePage::IsAtEnd(new_space_front)) {
2071 HeapObject* object = HeapObject::FromAddress(new_space_front);
2073 NewSpaceScavenger::IterateBody(object->map(), object);
2076 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2080 // Promote and process all the to-be-promoted objects.
2082 StoreBufferRebuildScope scope(this,
2084 &ScavengeStoreBufferCallback);
2085 while (!promotion_queue()->is_empty()) {
2088 promotion_queue()->remove(&target, &size);
2090 // Promoted object might be already partially visited
2091 // during old space pointer iteration. Thus we search specificly
2092 // for pointers to from semispace instead of looking for pointers
2094 ASSERT(!target->IsMap());
2095 IterateAndMarkPointersToFromSpace(target->address(),
2096 target->address() + size,
2101 // Take another spin if there are now unswept objects in new space
2102 // (there are currently no more unswept promoted objects).
2103 } while (new_space_front != new_space_.top());
2105 return new_space_front;
2109 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2110 STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2113 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2117 static HeapObject* EnsureDoubleAligned(Heap* heap,
2120 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2121 heap->CreateFillerObjectAt(object->address(), kPointerSize);
2122 return HeapObject::FromAddress(object->address() + kPointerSize);
2124 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2131 enum LoggingAndProfiling {
2132 LOGGING_AND_PROFILING_ENABLED,
2133 LOGGING_AND_PROFILING_DISABLED
2137 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2140 template<MarksHandling marks_handling,
2141 LoggingAndProfiling logging_and_profiling_mode>
2142 class ScavengingVisitor : public StaticVisitorBase {
2144 static void Initialize() {
2145 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2146 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2147 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2148 table_.Register(kVisitByteArray, &EvacuateByteArray);
2149 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2150 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2151 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2152 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2154 table_.Register(kVisitNativeContext,
2155 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2156 template VisitSpecialized<Context::kSize>);
2158 table_.Register(kVisitConsString,
2159 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2160 template VisitSpecialized<ConsString::kSize>);
2162 table_.Register(kVisitSlicedString,
2163 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2164 template VisitSpecialized<SlicedString::kSize>);
2166 table_.Register(kVisitSymbol,
2167 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2168 template VisitSpecialized<Symbol::kSize>);
2170 table_.Register(kVisitSharedFunctionInfo,
2171 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2172 template VisitSpecialized<SharedFunctionInfo::kSize>);
2174 table_.Register(kVisitJSWeakMap,
2175 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2178 table_.Register(kVisitJSWeakSet,
2179 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2182 table_.Register(kVisitJSArrayBuffer,
2183 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2186 table_.Register(kVisitJSTypedArray,
2187 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2190 table_.Register(kVisitJSDataView,
2191 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2194 table_.Register(kVisitJSRegExp,
2195 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2198 if (marks_handling == IGNORE_MARKS) {
2199 table_.Register(kVisitJSFunction,
2200 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2201 template VisitSpecialized<JSFunction::kSize>);
2203 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2206 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2208 kVisitDataObjectGeneric>();
2210 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2212 kVisitJSObjectGeneric>();
2214 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2216 kVisitStructGeneric>();
2219 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2224 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2226 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2227 bool should_record = false;
2229 should_record = FLAG_heap_stats;
2231 should_record = should_record || FLAG_log_gc;
2232 if (should_record) {
2233 if (heap->new_space()->Contains(obj)) {
2234 heap->new_space()->RecordAllocation(obj);
2236 heap->new_space()->RecordPromotion(obj);
2241 // Helper function used by CopyObject to copy a source object to an
2242 // allocated target object and update the forwarding pointer in the source
2243 // object. Returns the target object.
2244 INLINE(static void MigrateObject(Heap* heap,
2248 // Copy the content of source to target.
2249 heap->CopyBlock(target->address(), source->address(), size);
2251 // Set the forwarding address.
2252 source->set_map_word(MapWord::FromForwardingAddress(target));
2254 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2255 // Update NewSpace stats if necessary.
2256 RecordCopiedObject(heap, target);
2257 Isolate* isolate = heap->isolate();
2258 HeapProfiler* heap_profiler = isolate->heap_profiler();
2259 if (heap_profiler->is_tracking_object_moves()) {
2260 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2263 if (isolate->logger()->is_logging_code_events() ||
2264 isolate->cpu_profiler()->is_profiling()) {
2265 if (target->IsSharedFunctionInfo()) {
2266 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2267 source->address(), target->address()));
2272 if (marks_handling == TRANSFER_MARKS) {
2273 if (Marking::TransferColor(source, target)) {
2274 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2280 template<ObjectContents object_contents, int alignment>
2281 static inline void EvacuateObject(Map* map,
2285 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2286 SLOW_ASSERT(object->Size() == object_size);
2288 int allocation_size = object_size;
2289 if (alignment != kObjectAlignment) {
2290 ASSERT(alignment == kDoubleAlignment);
2291 allocation_size += kPointerSize;
2294 Heap* heap = map->GetHeap();
2295 if (heap->ShouldBePromoted(object->address(), object_size)) {
2296 MaybeObject* maybe_result;
2298 if (object_contents == DATA_OBJECT) {
2299 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2300 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2302 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2303 maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2306 Object* result = NULL; // Initialization to please compiler.
2307 if (maybe_result->ToObject(&result)) {
2308 HeapObject* target = HeapObject::cast(result);
2310 if (alignment != kObjectAlignment) {
2311 target = EnsureDoubleAligned(heap, target, allocation_size);
2314 // Order is important: slot might be inside of the target if target
2315 // was allocated over a dead object and slot comes from the store
2318 MigrateObject(heap, object, target, object_size);
2320 if (object_contents == POINTER_OBJECT) {
2321 if (map->instance_type() == JS_FUNCTION_TYPE) {
2322 heap->promotion_queue()->insert(
2323 target, JSFunction::kNonWeakFieldsEndOffset);
2325 heap->promotion_queue()->insert(target, object_size);
2329 heap->tracer()->increment_promoted_objects_size(object_size);
2333 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2334 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2335 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2336 Object* result = allocation->ToObjectUnchecked();
2337 HeapObject* target = HeapObject::cast(result);
2339 if (alignment != kObjectAlignment) {
2340 target = EnsureDoubleAligned(heap, target, allocation_size);
2343 // Order is important: slot might be inside of the target if target
2344 // was allocated over a dead object and slot comes from the store
2347 MigrateObject(heap, object, target, object_size);
2352 static inline void EvacuateJSFunction(Map* map,
2354 HeapObject* object) {
2355 ObjectEvacuationStrategy<POINTER_OBJECT>::
2356 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2358 HeapObject* target = *slot;
2359 MarkBit mark_bit = Marking::MarkBitFrom(target);
2360 if (Marking::IsBlack(mark_bit)) {
2361 // This object is black and it might not be rescanned by marker.
2362 // We should explicitly record code entry slot for compaction because
2363 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2364 // miss it as it is not HeapObject-tagged.
2365 Address code_entry_slot =
2366 target->address() + JSFunction::kCodeEntryOffset;
2367 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2368 map->GetHeap()->mark_compact_collector()->
2369 RecordCodeEntrySlot(code_entry_slot, code);
2374 static inline void EvacuateFixedArray(Map* map,
2376 HeapObject* object) {
2377 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2378 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2379 map, slot, object, object_size);
2383 static inline void EvacuateFixedDoubleArray(Map* map,
2385 HeapObject* object) {
2386 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2387 int object_size = FixedDoubleArray::SizeFor(length);
2388 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2389 map, slot, object, object_size);
2393 static inline void EvacuateFixedTypedArray(Map* map,
2395 HeapObject* object) {
2396 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2397 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2398 map, slot, object, object_size);
2402 static inline void EvacuateFixedFloat64Array(Map* map,
2404 HeapObject* object) {
2405 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2406 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2407 map, slot, object, object_size);
2411 static inline void EvacuateByteArray(Map* map,
2413 HeapObject* object) {
2414 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2415 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2416 map, slot, object, object_size);
2420 static inline void EvacuateSeqOneByteString(Map* map,
2422 HeapObject* object) {
2423 int object_size = SeqOneByteString::cast(object)->
2424 SeqOneByteStringSize(map->instance_type());
2425 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2426 map, slot, object, object_size);
2430 static inline void EvacuateSeqTwoByteString(Map* map,
2432 HeapObject* object) {
2433 int object_size = SeqTwoByteString::cast(object)->
2434 SeqTwoByteStringSize(map->instance_type());
2435 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2436 map, slot, object, object_size);
2440 static inline bool IsShortcutCandidate(int type) {
2441 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2444 static inline void EvacuateShortcutCandidate(Map* map,
2446 HeapObject* object) {
2447 ASSERT(IsShortcutCandidate(map->instance_type()));
2449 Heap* heap = map->GetHeap();
2451 if (marks_handling == IGNORE_MARKS &&
2452 ConsString::cast(object)->unchecked_second() ==
2453 heap->empty_string()) {
2455 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2459 if (!heap->InNewSpace(first)) {
2460 object->set_map_word(MapWord::FromForwardingAddress(first));
2464 MapWord first_word = first->map_word();
2465 if (first_word.IsForwardingAddress()) {
2466 HeapObject* target = first_word.ToForwardingAddress();
2469 object->set_map_word(MapWord::FromForwardingAddress(target));
2473 heap->DoScavengeObject(first->map(), slot, first);
2474 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2478 int object_size = ConsString::kSize;
2479 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2480 map, slot, object, object_size);
2483 template<ObjectContents object_contents>
2484 class ObjectEvacuationStrategy {
2486 template<int object_size>
2487 static inline void VisitSpecialized(Map* map,
2489 HeapObject* object) {
2490 EvacuateObject<object_contents, kObjectAlignment>(
2491 map, slot, object, object_size);
2494 static inline void Visit(Map* map,
2496 HeapObject* object) {
2497 int object_size = map->instance_size();
2498 EvacuateObject<object_contents, kObjectAlignment>(
2499 map, slot, object, object_size);
2503 static VisitorDispatchTable<ScavengingCallback> table_;
2507 template<MarksHandling marks_handling,
2508 LoggingAndProfiling logging_and_profiling_mode>
2509 VisitorDispatchTable<ScavengingCallback>
2510 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2513 static void InitializeScavengingVisitorsTables() {
2514 ScavengingVisitor<TRANSFER_MARKS,
2515 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2516 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2517 ScavengingVisitor<TRANSFER_MARKS,
2518 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2519 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2523 void Heap::SelectScavengingVisitorsTable() {
2524 bool logging_and_profiling =
2525 isolate()->logger()->is_logging() ||
2526 isolate()->cpu_profiler()->is_profiling() ||
2527 (isolate()->heap_profiler() != NULL &&
2528 isolate()->heap_profiler()->is_tracking_object_moves());
2530 if (!incremental_marking()->IsMarking()) {
2531 if (!logging_and_profiling) {
2532 scavenging_visitors_table_.CopyFrom(
2533 ScavengingVisitor<IGNORE_MARKS,
2534 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2536 scavenging_visitors_table_.CopyFrom(
2537 ScavengingVisitor<IGNORE_MARKS,
2538 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2541 if (!logging_and_profiling) {
2542 scavenging_visitors_table_.CopyFrom(
2543 ScavengingVisitor<TRANSFER_MARKS,
2544 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2546 scavenging_visitors_table_.CopyFrom(
2547 ScavengingVisitor<TRANSFER_MARKS,
2548 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2551 if (incremental_marking()->IsCompacting()) {
2552 // When compacting forbid short-circuiting of cons-strings.
2553 // Scavenging code relies on the fact that new space object
2554 // can't be evacuated into evacuation candidate but
2555 // short-circuiting violates this assumption.
2556 scavenging_visitors_table_.Register(
2557 StaticVisitorBase::kVisitShortcutCandidate,
2558 scavenging_visitors_table_.GetVisitorById(
2559 StaticVisitorBase::kVisitConsString));
2565 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2566 SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2567 MapWord first_word = object->map_word();
2568 SLOW_ASSERT(!first_word.IsForwardingAddress());
2569 Map* map = first_word.ToMap();
2570 map->GetHeap()->DoScavengeObject(map, p, object);
2574 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2575 int instance_size) {
2577 MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2578 if (!maybe_result->ToObject(&result)) return maybe_result;
2580 // Map::cast cannot be used due to uninitialized map field.
2581 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2582 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2583 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2584 reinterpret_cast<Map*>(result)->set_visitor_id(
2585 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2586 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2587 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2588 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2589 reinterpret_cast<Map*>(result)->set_bit_field(0);
2590 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2591 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2592 Map::OwnsDescriptors::encode(true);
2593 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2598 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2600 ElementsKind elements_kind) {
2602 MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2603 if (!maybe_result->To(&result)) return maybe_result;
2605 Map* map = reinterpret_cast<Map*>(result);
2606 map->set_map_no_write_barrier(meta_map());
2607 map->set_instance_type(instance_type);
2608 map->set_visitor_id(
2609 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2610 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2611 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2612 map->set_instance_size(instance_size);
2613 map->set_inobject_properties(0);
2614 map->set_pre_allocated_property_fields(0);
2615 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2616 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2617 SKIP_WRITE_BARRIER);
2618 map->init_back_pointer(undefined_value());
2619 map->set_unused_property_fields(0);
2620 map->set_instance_descriptors(empty_descriptor_array());
2621 map->set_bit_field(0);
2622 map->set_bit_field2(1 << Map::kIsExtensible);
2623 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2624 Map::OwnsDescriptors::encode(true);
2625 map->set_bit_field3(bit_field3);
2626 map->set_elements_kind(elements_kind);
2632 MaybeObject* Heap::AllocateCodeCache() {
2633 CodeCache* code_cache;
2634 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2635 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2637 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2638 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2643 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2644 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2648 MaybeObject* Heap::AllocateAccessorPair() {
2649 AccessorPair* accessors;
2650 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2651 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2653 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2654 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2655 accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2660 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2661 TypeFeedbackInfo* info;
2662 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2663 if (!maybe_info->To(&info)) return maybe_info;
2665 info->initialize_storage();
2666 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2667 SKIP_WRITE_BARRIER);
2672 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2673 AliasedArgumentsEntry* entry;
2674 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2675 if (!maybe_entry->To(&entry)) return maybe_entry;
2677 entry->set_aliased_context_slot(aliased_context_slot);
2682 const Heap::StringTypeTable Heap::string_type_table[] = {
2683 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2684 {type, size, k##camel_name##MapRootIndex},
2685 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2686 #undef STRING_TYPE_ELEMENT
2690 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2691 #define CONSTANT_STRING_ELEMENT(name, contents) \
2692 {contents, k##name##RootIndex},
2693 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2694 #undef CONSTANT_STRING_ELEMENT
2698 const Heap::StructTable Heap::struct_table[] = {
2699 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2700 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2701 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2702 #undef STRUCT_TABLE_ELEMENT
2706 bool Heap::CreateInitialMaps() {
2708 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2709 if (!maybe_obj->ToObject(&obj)) return false;
2711 // Map::cast cannot be used due to uninitialized map field.
2712 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2713 set_meta_map(new_meta_map);
2714 new_meta_map->set_map(new_meta_map);
2716 { MaybeObject* maybe_obj =
2717 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2718 if (!maybe_obj->ToObject(&obj)) return false;
2720 set_fixed_array_map(Map::cast(obj));
2722 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2723 if (!maybe_obj->ToObject(&obj)) return false;
2725 set_oddball_map(Map::cast(obj));
2727 { MaybeObject* maybe_obj =
2728 AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2729 if (!maybe_obj->ToObject(&obj)) return false;
2731 set_constant_pool_array_map(Map::cast(obj));
2733 // Allocate the empty array.
2734 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2735 if (!maybe_obj->ToObject(&obj)) return false;
2737 set_empty_fixed_array(FixedArray::cast(obj));
2739 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2740 if (!maybe_obj->ToObject(&obj)) return false;
2742 set_null_value(Oddball::cast(obj));
2743 Oddball::cast(obj)->set_kind(Oddball::kNull);
2745 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2746 if (!maybe_obj->ToObject(&obj)) return false;
2748 set_undefined_value(Oddball::cast(obj));
2749 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2750 ASSERT(!InNewSpace(undefined_value()));
2752 // Allocate the empty descriptor array.
2753 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2754 if (!maybe_obj->ToObject(&obj)) return false;
2756 set_empty_descriptor_array(DescriptorArray::cast(obj));
2758 // Allocate the constant pool array.
2759 { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
2760 if (!maybe_obj->ToObject(&obj)) return false;
2762 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2764 // Fix the instance_descriptors for the existing maps.
2765 meta_map()->set_code_cache(empty_fixed_array());
2766 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2767 meta_map()->init_back_pointer(undefined_value());
2768 meta_map()->set_instance_descriptors(empty_descriptor_array());
2770 fixed_array_map()->set_code_cache(empty_fixed_array());
2771 fixed_array_map()->set_dependent_code(
2772 DependentCode::cast(empty_fixed_array()));
2773 fixed_array_map()->init_back_pointer(undefined_value());
2774 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2776 oddball_map()->set_code_cache(empty_fixed_array());
2777 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2778 oddball_map()->init_back_pointer(undefined_value());
2779 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2781 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2782 constant_pool_array_map()->set_dependent_code(
2783 DependentCode::cast(empty_fixed_array()));
2784 constant_pool_array_map()->init_back_pointer(undefined_value());
2785 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2787 // Fix prototype object for existing maps.
2788 meta_map()->set_prototype(null_value());
2789 meta_map()->set_constructor(null_value());
2791 fixed_array_map()->set_prototype(null_value());
2792 fixed_array_map()->set_constructor(null_value());
2794 oddball_map()->set_prototype(null_value());
2795 oddball_map()->set_constructor(null_value());
2797 constant_pool_array_map()->set_prototype(null_value());
2798 constant_pool_array_map()->set_constructor(null_value());
2801 #define ALLOCATE_MAP(instance_type, size, field_name) \
2803 if (!AllocateMap((instance_type), size)->To(&map)) return false; \
2804 set_##field_name##_map(map); \
2807 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2808 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2810 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2811 ASSERT(fixed_array_map() != fixed_cow_array_map());
2813 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2814 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2815 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2816 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2818 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2819 const StringTypeTable& entry = string_type_table[i];
2820 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2821 if (!maybe_obj->ToObject(&obj)) return false;
2823 roots_[entry.index] = Map::cast(obj);
2826 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2827 undetectable_string_map()->set_is_undetectable();
2829 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2830 undetectable_ascii_string_map()->set_is_undetectable();
2832 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2833 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2834 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2836 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2837 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2838 external_##type##_array)
2840 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2841 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2843 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2844 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
2845 fixed_##type##_array)
2847 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2848 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2850 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
2852 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2854 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2855 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2856 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2857 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2860 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2861 const StructTable& entry = struct_table[i];
2863 if (!AllocateMap(entry.type, entry.size)->To(&map))
2865 roots_[entry.index] = map;
2868 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2870 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2871 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2872 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2873 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2874 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2875 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2877 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2878 native_context_map()->set_dictionary_map(true);
2879 native_context_map()->set_visitor_id(
2880 StaticVisitorBase::kVisitNativeContext);
2882 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2883 shared_function_info)
2885 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2887 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2889 external_map()->set_is_extensible(false);
2890 #undef ALLOCATE_VARSIZE_MAP
2895 { ByteArray* byte_array;
2896 if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
2897 set_empty_byte_array(byte_array);
2900 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2901 { ExternalArray* obj; \
2902 if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj)) \
2904 set_empty_external_##type##_array(obj); \
2907 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2908 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2910 ASSERT(!InNewSpace(empty_fixed_array()));
2915 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2916 // Statically ensure that it is safe to allocate heap numbers in paged
2918 int size = HeapNumber::kSize;
2919 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2921 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2924 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2925 if (!maybe_result->ToObject(&result)) return maybe_result;
2928 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2929 HeapNumber::cast(result)->set_value(value);
2934 MaybeObject* Heap::AllocateCell(Object* value) {
2935 int size = Cell::kSize;
2936 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2939 { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2940 if (!maybe_result->ToObject(&result)) return maybe_result;
2942 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2943 Cell::cast(result)->set_value(value);
2948 MaybeObject* Heap::AllocatePropertyCell() {
2949 int size = PropertyCell::kSize;
2950 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2953 MaybeObject* maybe_result =
2954 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2955 if (!maybe_result->ToObject(&result)) return maybe_result;
2957 HeapObject::cast(result)->set_map_no_write_barrier(
2958 global_property_cell_map());
2959 PropertyCell* cell = PropertyCell::cast(result);
2960 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2961 SKIP_WRITE_BARRIER);
2962 cell->set_value(the_hole_value());
2963 cell->set_type(HeapType::None());
2968 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2970 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2971 if (!maybe_result->To(&result)) return maybe_result;
2972 result->set_value(value);
2977 MaybeObject* Heap::AllocateAllocationSite() {
2978 AllocationSite* site;
2979 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2981 if (!maybe_result->To(&site)) return maybe_result;
2985 site->set_weak_next(allocation_sites_list());
2986 set_allocation_sites_list(site);
2991 MaybeObject* Heap::CreateOddball(const char* to_string,
2995 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2996 if (!maybe_result->ToObject(&result)) return maybe_result;
2998 return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3002 bool Heap::CreateApiObjects() {
3005 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3006 if (!maybe_obj->ToObject(&obj)) return false;
3008 // Don't use Smi-only elements optimizations for objects with the neander
3009 // map. There are too many cases where element values are set directly with a
3010 // bottleneck to trap the Smi-only -> fast elements transition, and there
3011 // appears to be no benefit for optimize this case.
3012 Map* new_neander_map = Map::cast(obj);
3013 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3014 set_neander_map(new_neander_map);
3016 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3017 if (!maybe_obj->ToObject(&obj)) return false;
3020 { MaybeObject* maybe_elements = AllocateFixedArray(2);
3021 if (!maybe_elements->ToObject(&elements)) return false;
3023 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3024 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3025 set_message_listeners(JSObject::cast(obj));
3031 void Heap::CreateJSEntryStub() {
3033 set_js_entry_code(*stub.GetCode(isolate()));
3037 void Heap::CreateJSConstructEntryStub() {
3038 JSConstructEntryStub stub;
3039 set_js_construct_entry_code(*stub.GetCode(isolate()));
3043 void Heap::CreateFixedStubs() {
3044 // Here we create roots for fixed stubs. They are needed at GC
3045 // for cooking and uncooking (check out frames.cc).
3046 // The eliminates the need for doing dictionary lookup in the
3047 // stub cache for these stubs.
3048 HandleScope scope(isolate());
3049 // gcc-4.4 has problem generating correct code of following snippet:
3050 // { JSEntryStub stub;
3051 // js_entry_code_ = *stub.GetCode();
3053 // { JSConstructEntryStub stub;
3054 // js_construct_entry_code_ = *stub.GetCode();
3056 // To workaround the problem, make separate functions without inlining.
3057 Heap::CreateJSEntryStub();
3058 Heap::CreateJSConstructEntryStub();
3060 // Create stubs that should be there, so we don't unexpectedly have to
3061 // create them if we need them during the creation of another stub.
3062 // Stub creation mixes raw pointers and handles in an unsafe manner so
3063 // we cannot create stubs while we are creating stubs.
3064 CodeStub::GenerateStubsAheadOfTime(isolate());
3068 void Heap::CreateStubsRequiringBuiltins() {
3069 HandleScope scope(isolate());
3070 CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
3074 bool Heap::CreateInitialObjects() {
3077 // The -0 value must be set before NumberFromDouble works.
3078 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3079 if (!maybe_obj->ToObject(&obj)) return false;
3081 set_minus_zero_value(HeapNumber::cast(obj));
3082 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3084 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3085 if (!maybe_obj->ToObject(&obj)) return false;
3087 set_nan_value(HeapNumber::cast(obj));
3089 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3090 if (!maybe_obj->ToObject(&obj)) return false;
3092 set_infinity_value(HeapNumber::cast(obj));
3094 // The hole has not been created yet, but we want to put something
3095 // predictable in the gaps in the string table, so lets make that Smi zero.
3096 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3098 // Allocate initial string table.
3099 { MaybeObject* maybe_obj =
3100 StringTable::Allocate(this, kInitialStringTableSize);
3101 if (!maybe_obj->ToObject(&obj)) return false;
3103 // Don't use set_string_table() due to asserts.
3104 roots_[kStringTableRootIndex] = obj;
3106 // Finish initializing oddballs after creating the string table.
3107 { MaybeObject* maybe_obj =
3108 undefined_value()->Initialize(this,
3111 Oddball::kUndefined);
3112 if (!maybe_obj->ToObject(&obj)) return false;
3115 // Initialize the null_value.
3116 { MaybeObject* maybe_obj = null_value()->Initialize(
3117 this, "null", Smi::FromInt(0), Oddball::kNull);
3118 if (!maybe_obj->ToObject(&obj)) return false;
3121 { MaybeObject* maybe_obj = CreateOddball("true",
3124 if (!maybe_obj->ToObject(&obj)) return false;
3126 set_true_value(Oddball::cast(obj));
3128 { MaybeObject* maybe_obj = CreateOddball("false",
3131 if (!maybe_obj->ToObject(&obj)) return false;
3133 set_false_value(Oddball::cast(obj));
3135 { MaybeObject* maybe_obj = CreateOddball("hole",
3138 if (!maybe_obj->ToObject(&obj)) return false;
3140 set_the_hole_value(Oddball::cast(obj));
3142 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3144 Oddball::kUninitialized);
3145 if (!maybe_obj->ToObject(&obj)) return false;
3147 set_uninitialized_value(Oddball::cast(obj));
3149 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3151 Oddball::kArgumentMarker);
3152 if (!maybe_obj->ToObject(&obj)) return false;
3154 set_arguments_marker(Oddball::cast(obj));
3156 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3159 if (!maybe_obj->ToObject(&obj)) return false;
3161 set_no_interceptor_result_sentinel(obj);
3163 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3166 if (!maybe_obj->ToObject(&obj)) return false;
3168 set_termination_exception(obj);
3170 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3171 { MaybeObject* maybe_obj =
3172 InternalizeUtf8String(constant_string_table[i].contents);
3173 if (!maybe_obj->ToObject(&obj)) return false;
3175 roots_[constant_string_table[i].index] = String::cast(obj);
3178 // Allocate the hidden string which is used to identify the hidden properties
3179 // in JSObjects. The hash code has a special value so that it will not match
3180 // the empty string when searching for the property. It cannot be part of the
3181 // loop above because it needs to be allocated manually with the special
3182 // hash code in place. The hash code for the hidden_string is zero to ensure
3183 // that it will always be at the first entry in property descriptors.
3184 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3185 OneByteVector("", 0), String::kEmptyStringHash);
3186 if (!maybe_obj->ToObject(&obj)) return false;
3188 hidden_string_ = String::cast(obj);
3190 // Allocate the code_stubs dictionary. The initial size is set to avoid
3191 // expanding the dictionary during bootstrapping.
3192 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3193 if (!maybe_obj->ToObject(&obj)) return false;
3195 set_code_stubs(UnseededNumberDictionary::cast(obj));
3198 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3199 // is set to avoid expanding the dictionary during bootstrapping.
3200 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3201 if (!maybe_obj->ToObject(&obj)) return false;
3203 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3205 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3206 if (!maybe_obj->ToObject(&obj)) return false;
3208 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3210 set_instanceof_cache_function(Smi::FromInt(0));
3211 set_instanceof_cache_map(Smi::FromInt(0));
3212 set_instanceof_cache_answer(Smi::FromInt(0));
3216 // Allocate the dictionary of intrinsic function names.
3217 { MaybeObject* maybe_obj =
3218 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3219 if (!maybe_obj->ToObject(&obj)) return false;
3221 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3223 if (!maybe_obj->ToObject(&obj)) return false;
3225 set_intrinsic_function_names(NameDictionary::cast(obj));
3227 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3228 if (!maybe_obj->ToObject(&obj)) return false;
3230 set_number_string_cache(FixedArray::cast(obj));
3232 // Allocate cache for single character one byte strings.
3233 { MaybeObject* maybe_obj =
3234 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3235 if (!maybe_obj->ToObject(&obj)) return false;
3237 set_single_character_string_cache(FixedArray::cast(obj));
3239 // Allocate cache for string split.
3240 { MaybeObject* maybe_obj = AllocateFixedArray(
3241 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3242 if (!maybe_obj->ToObject(&obj)) return false;
3244 set_string_split_cache(FixedArray::cast(obj));
3246 { MaybeObject* maybe_obj = AllocateFixedArray(
3247 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3248 if (!maybe_obj->ToObject(&obj)) return false;
3250 set_regexp_multiple_cache(FixedArray::cast(obj));
3252 // Allocate cache for external strings pointing to native source code.
3253 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3254 if (!maybe_obj->ToObject(&obj)) return false;
3256 set_natives_source_cache(FixedArray::cast(obj));
3258 { MaybeObject* maybe_obj = AllocateCell(undefined_value());
3259 if (!maybe_obj->ToObject(&obj)) return false;
3261 set_undefined_cell(Cell::cast(obj));
3263 // Allocate object to hold object observation state.
3264 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3265 if (!maybe_obj->ToObject(&obj)) return false;
3267 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3268 if (!maybe_obj->ToObject(&obj)) return false;
3270 set_observation_state(JSObject::cast(obj));
3272 { MaybeObject* maybe_obj = AllocateSymbol();
3273 if (!maybe_obj->ToObject(&obj)) return false;
3275 Symbol::cast(obj)->set_is_private(true);
3276 set_frozen_symbol(Symbol::cast(obj));
3278 { MaybeObject* maybe_obj = AllocateSymbol();
3279 if (!maybe_obj->ToObject(&obj)) return false;
3281 Symbol::cast(obj)->set_is_private(true);
3282 set_elements_transition_symbol(Symbol::cast(obj));
3284 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3285 if (!maybe_obj->ToObject(&obj)) return false;
3287 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3288 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3290 { MaybeObject* maybe_obj = AllocateSymbol();
3291 if (!maybe_obj->ToObject(&obj)) return false;
3293 Symbol::cast(obj)->set_is_private(true);
3294 set_observed_symbol(Symbol::cast(obj));
3296 // Handling of script id generation is in Factory::NewScript.
3297 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3299 // Initialize keyed lookup cache.
3300 isolate_->keyed_lookup_cache()->Clear();
3302 // Initialize context slot cache.
3303 isolate_->context_slot_cache()->Clear();
3305 // Initialize descriptor cache.
3306 isolate_->descriptor_lookup_cache()->Clear();
3308 // Initialize compilation cache.
3309 isolate_->compilation_cache()->Clear();
3315 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3316 RootListIndex writable_roots[] = {
3317 kStoreBufferTopRootIndex,
3318 kStackLimitRootIndex,
3319 kNumberStringCacheRootIndex,
3320 kInstanceofCacheFunctionRootIndex,
3321 kInstanceofCacheMapRootIndex,
3322 kInstanceofCacheAnswerRootIndex,
3323 kCodeStubsRootIndex,
3324 kNonMonomorphicCacheRootIndex,
3325 kPolymorphicCodeCacheRootIndex,
3326 kLastScriptIdRootIndex,
3327 kEmptyScriptRootIndex,
3328 kRealStackLimitRootIndex,
3329 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3330 kConstructStubDeoptPCOffsetRootIndex,
3331 kGetterStubDeoptPCOffsetRootIndex,
3332 kSetterStubDeoptPCOffsetRootIndex,
3333 kStringTableRootIndex,
3336 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3337 if (root_index == writable_roots[i])
3344 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3345 return !RootCanBeWrittenAfterInitialization(root_index) &&
3346 !InNewSpace(roots_array_start()[root_index]);
3350 Object* RegExpResultsCache::Lookup(Heap* heap,
3352 Object* key_pattern,
3353 ResultsCacheType type) {
3355 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3356 if (type == STRING_SPLIT_SUBSTRINGS) {
3357 ASSERT(key_pattern->IsString());
3358 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3359 cache = heap->string_split_cache();
3361 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3362 ASSERT(key_pattern->IsFixedArray());
3363 cache = heap->regexp_multiple_cache();
3366 uint32_t hash = key_string->Hash();
3367 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3368 ~(kArrayEntriesPerCacheEntry - 1));
3369 if (cache->get(index + kStringOffset) == key_string &&
3370 cache->get(index + kPatternOffset) == key_pattern) {
3371 return cache->get(index + kArrayOffset);
3374 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3375 if (cache->get(index + kStringOffset) == key_string &&
3376 cache->get(index + kPatternOffset) == key_pattern) {
3377 return cache->get(index + kArrayOffset);
3379 return Smi::FromInt(0);
3383 void RegExpResultsCache::Enter(Heap* heap,
3385 Object* key_pattern,
3386 FixedArray* value_array,
3387 ResultsCacheType type) {
3389 if (!key_string->IsInternalizedString()) return;
3390 if (type == STRING_SPLIT_SUBSTRINGS) {
3391 ASSERT(key_pattern->IsString());
3392 if (!key_pattern->IsInternalizedString()) return;
3393 cache = heap->string_split_cache();
3395 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3396 ASSERT(key_pattern->IsFixedArray());
3397 cache = heap->regexp_multiple_cache();
3400 uint32_t hash = key_string->Hash();
3401 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3402 ~(kArrayEntriesPerCacheEntry - 1));
3403 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3404 cache->set(index + kStringOffset, key_string);
3405 cache->set(index + kPatternOffset, key_pattern);
3406 cache->set(index + kArrayOffset, value_array);
3409 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3410 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3411 cache->set(index2 + kStringOffset, key_string);
3412 cache->set(index2 + kPatternOffset, key_pattern);
3413 cache->set(index2 + kArrayOffset, value_array);
3415 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3416 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3417 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3418 cache->set(index + kStringOffset, key_string);
3419 cache->set(index + kPatternOffset, key_pattern);
3420 cache->set(index + kArrayOffset, value_array);
3423 // If the array is a reasonably short list of substrings, convert it into a
3424 // list of internalized strings.
3425 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3426 for (int i = 0; i < value_array->length(); i++) {
3427 String* str = String::cast(value_array->get(i));
3428 Object* internalized_str;
3429 MaybeObject* maybe_string = heap->InternalizeString(str);
3430 if (maybe_string->ToObject(&internalized_str)) {
3431 value_array->set(i, internalized_str);
3435 // Convert backing store to a copy-on-write array.
3436 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3440 void RegExpResultsCache::Clear(FixedArray* cache) {
3441 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3442 cache->set(i, Smi::FromInt(0));
3447 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3448 MaybeObject* maybe_obj =
3449 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3454 int Heap::FullSizeNumberStringCacheLength() {
3455 // Compute the size of the number string cache based on the max newspace size.
3456 // The number string cache has a minimum size based on twice the initial cache
3457 // size to ensure that it is bigger after being made 'full size'.
3458 int number_string_cache_size = max_semispace_size_ / 512;
3459 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3460 Min(0x4000, number_string_cache_size));
3461 // There is a string and a number per entry so the length is twice the number
3463 return number_string_cache_size * 2;
3467 void Heap::AllocateFullSizeNumberStringCache() {
3468 // The idea is to have a small number string cache in the snapshot to keep
3469 // boot-time memory usage down. If we expand the number string cache already
3470 // while creating the snapshot then that didn't work out.
3471 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3472 MaybeObject* maybe_obj =
3473 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3475 if (maybe_obj->ToObject(&new_cache)) {
3476 // We don't bother to repopulate the cache with entries from the old cache.
3477 // It will be repopulated soon enough with new strings.
3478 set_number_string_cache(FixedArray::cast(new_cache));
3480 // If allocation fails then we just return without doing anything. It is only
3481 // a cache, so best effort is OK here.
3485 void Heap::FlushNumberStringCache() {
3486 // Flush the number to string cache.
3487 int len = number_string_cache()->length();
3488 for (int i = 0; i < len; i++) {
3489 number_string_cache()->set_undefined(i);
3494 static inline int double_get_hash(double d) {
3495 DoubleRepresentation rep(d);
3496 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3500 static inline int smi_get_hash(Smi* smi) {
3501 return smi->value();
3505 Object* Heap::GetNumberStringCache(Object* number) {
3507 int mask = (number_string_cache()->length() >> 1) - 1;
3508 if (number->IsSmi()) {
3509 hash = smi_get_hash(Smi::cast(number)) & mask;
3511 hash = double_get_hash(number->Number()) & mask;
3513 Object* key = number_string_cache()->get(hash * 2);
3514 if (key == number) {
3515 return String::cast(number_string_cache()->get(hash * 2 + 1));
3516 } else if (key->IsHeapNumber() &&
3517 number->IsHeapNumber() &&
3518 key->Number() == number->Number()) {
3519 return String::cast(number_string_cache()->get(hash * 2 + 1));
3521 return undefined_value();
3525 void Heap::SetNumberStringCache(Object* number, String* string) {
3527 int mask = (number_string_cache()->length() >> 1) - 1;
3528 if (number->IsSmi()) {
3529 hash = smi_get_hash(Smi::cast(number)) & mask;
3531 hash = double_get_hash(number->Number()) & mask;
3533 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3534 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3535 // The first time we have a hash collision, we move to the full sized
3536 // number string cache.
3537 AllocateFullSizeNumberStringCache();
3540 number_string_cache()->set(hash * 2, number);
3541 number_string_cache()->set(hash * 2 + 1, string);
3545 MaybeObject* Heap::NumberToString(Object* number,
3546 bool check_number_string_cache) {
3547 isolate_->counters()->number_to_string_runtime()->Increment();
3548 if (check_number_string_cache) {
3549 Object* cached = GetNumberStringCache(number);
3550 if (cached != undefined_value()) {
3556 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3558 if (number->IsSmi()) {
3559 int num = Smi::cast(number)->value();
3560 str = IntToCString(num, buffer);
3562 double num = HeapNumber::cast(number)->value();
3563 str = DoubleToCString(num, buffer);
3568 // We tenure the allocated string since it is referenced from the
3569 // number-string cache which lives in the old space.
3570 MaybeObject* maybe_js_string =
3571 AllocateStringFromOneByte(CStrVector(str), TENURED);
3572 if (maybe_js_string->ToObject(&js_string)) {
3573 SetNumberStringCache(number, String::cast(js_string));
3575 return maybe_js_string;
3579 MaybeObject* Heap::Uint32ToString(uint32_t value,
3580 bool check_number_string_cache) {
3582 MaybeObject* maybe = NumberFromUint32(value);
3583 if (!maybe->To<Object>(&number)) return maybe;
3584 return NumberToString(number, check_number_string_cache);
3588 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3589 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3593 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3594 ExternalArrayType array_type) {
3595 switch (array_type) {
3596 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3597 case kExternal##Type##Array: \
3598 return kExternal##Type##ArrayMapRootIndex;
3600 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3601 #undef ARRAY_TYPE_TO_ROOT_INDEX
3605 return kUndefinedValueRootIndex;
3610 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3611 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3615 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3616 ExternalArrayType array_type) {
3617 switch (array_type) {
3618 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3619 case kExternal##Type##Array: \
3620 return kFixed##Type##ArrayMapRootIndex;
3622 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3623 #undef ARRAY_TYPE_TO_ROOT_INDEX
3627 return kUndefinedValueRootIndex;
3632 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3633 ElementsKind elementsKind) {
3634 switch (elementsKind) {
3635 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3636 case EXTERNAL_##TYPE##_ELEMENTS: \
3637 return kEmptyExternal##Type##ArrayRootIndex;
3639 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3640 #undef ELEMENT_KIND_TO_ROOT_INDEX
3644 return kUndefinedValueRootIndex;
3649 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3650 return ExternalArray::cast(
3651 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3655 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3656 // We need to distinguish the minus zero value and this cannot be
3657 // done after conversion to int. Doing this by comparing bit
3658 // patterns is faster than using fpclassify() et al.
3659 if (IsMinusZero(value)) {
3660 return AllocateHeapNumber(-0.0, pretenure);
3663 int int_value = FastD2I(value);
3664 if (value == int_value && Smi::IsValid(int_value)) {
3665 return Smi::FromInt(int_value);
3668 // Materialize the value in the heap.
3669 return AllocateHeapNumber(value, pretenure);
3673 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3674 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3675 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3676 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3678 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3679 if (!maybe_result->To(&result)) return maybe_result;
3680 result->set_foreign_address(address);
3685 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3686 SharedFunctionInfo* share;
3687 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3688 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3690 // Set pointer fields.
3691 share->set_name(name);
3692 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3693 share->set_code(illegal);
3694 share->set_optimized_code_map(Smi::FromInt(0));
3695 share->set_scope_info(ScopeInfo::Empty(isolate_));
3696 Code* construct_stub =
3697 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3698 share->set_construct_stub(construct_stub);
3699 share->set_instance_class_name(Object_string());
3700 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3701 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3702 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3703 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3704 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3705 share->set_ast_node_count(0);
3706 share->set_counters(0);
3708 // Set integer fields (smi or int, depending on the architecture).
3709 share->set_length(0);
3710 share->set_formal_parameter_count(0);
3711 share->set_expected_nof_properties(0);
3712 share->set_num_literals(0);
3713 share->set_start_position_and_type(0);
3714 share->set_end_position(0);
3715 share->set_function_token_position(0);
3716 // All compiler hints default to false or 0.
3717 share->set_compiler_hints(0);
3718 share->set_opt_count_and_bailout_reason(0);
3724 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3729 Object* stack_trace,
3730 Object* stack_frames) {
3732 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3733 if (!maybe_result->ToObject(&result)) return maybe_result;
3735 JSMessageObject* message = JSMessageObject::cast(result);
3736 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3737 message->initialize_elements();
3738 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3739 message->set_type(type);
3740 message->set_arguments(arguments);
3741 message->set_start_position(start_position);
3742 message->set_end_position(end_position);
3743 message->set_script(script);
3744 message->set_stack_trace(stack_trace);
3745 message->set_stack_frames(stack_frames);
3750 MaybeObject* Heap::AllocateExternalStringFromAscii(
3751 const ExternalAsciiString::Resource* resource) {
3752 size_t length = resource->length();
3753 if (length > static_cast<size_t>(String::kMaxLength)) {
3754 isolate()->context()->mark_out_of_memory();
3755 return Failure::OutOfMemoryException(0x5);
3758 Map* map = external_ascii_string_map();
3760 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3761 if (!maybe_result->ToObject(&result)) return maybe_result;
3764 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3765 external_string->set_length(static_cast<int>(length));
3766 external_string->set_hash_field(String::kEmptyHashField);
3767 external_string->set_resource(resource);
3773 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3774 const ExternalTwoByteString::Resource* resource) {
3775 size_t length = resource->length();
3776 if (length > static_cast<size_t>(String::kMaxLength)) {
3777 isolate()->context()->mark_out_of_memory();
3778 return Failure::OutOfMemoryException(0x6);
3781 // For small strings we check whether the resource contains only
3782 // one byte characters. If yes, we use a different string map.
3783 static const size_t kOneByteCheckLengthLimit = 32;
3784 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3785 String::IsOneByte(resource->data(), static_cast<int>(length));
3786 Map* map = is_one_byte ?
3787 external_string_with_one_byte_data_map() : external_string_map();
3789 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3790 if (!maybe_result->ToObject(&result)) return maybe_result;
3793 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3794 external_string->set_length(static_cast<int>(length));
3795 external_string->set_hash_field(String::kEmptyHashField);
3796 external_string->set_resource(resource);
3802 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3803 if (code <= String::kMaxOneByteCharCode) {
3804 Object* value = single_character_string_cache()->get(code);
3805 if (value != undefined_value()) return value;
3808 buffer[0] = static_cast<uint8_t>(code);
3810 OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
3811 MaybeObject* maybe_result = InternalizeStringWithKey(&key);
3813 if (!maybe_result->ToObject(&result)) return maybe_result;
3814 single_character_string_cache()->set(code, result);
3818 SeqTwoByteString* result;
3819 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3820 if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
3822 result->SeqTwoByteStringSet(0, code);
3827 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3828 if (length < 0 || length > ByteArray::kMaxLength) {
3829 return Failure::OutOfMemoryException(0x7);
3831 int size = ByteArray::SizeFor(length);
3832 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3834 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3835 if (!maybe_result->ToObject(&result)) return maybe_result;
3838 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3840 reinterpret_cast<ByteArray*>(result)->set_length(length);
3845 void Heap::CreateFillerObjectAt(Address addr, int size) {
3846 if (size == 0) return;
3847 HeapObject* filler = HeapObject::FromAddress(addr);
3848 if (size == kPointerSize) {
3849 filler->set_map_no_write_barrier(one_pointer_filler_map());
3850 } else if (size == 2 * kPointerSize) {
3851 filler->set_map_no_write_barrier(two_pointer_filler_map());
3853 filler->set_map_no_write_barrier(free_space_map());
3854 FreeSpace::cast(filler)->set_size(size);
3859 MaybeObject* Heap::AllocateExternalArray(int length,
3860 ExternalArrayType array_type,
3861 void* external_pointer,
3862 PretenureFlag pretenure) {
3863 int size = ExternalArray::kAlignedSize;
3864 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3866 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3867 if (!maybe_result->ToObject(&result)) return maybe_result;
3870 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3871 MapForExternalArrayType(array_type));
3872 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3873 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3879 static void ForFixedTypedArray(ExternalArrayType array_type,
3881 ElementsKind* element_kind) {
3882 switch (array_type) {
3883 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3884 case kExternal##Type##Array: \
3885 *element_size = size; \
3886 *element_kind = TYPE##_ELEMENTS; \
3889 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3890 #undef TYPED_ARRAY_CASE
3893 *element_size = 0; // Bogus
3894 *element_kind = UINT8_ELEMENTS; // Bogus
3900 MaybeObject* Heap::AllocateFixedTypedArray(int length,
3901 ExternalArrayType array_type,
3902 PretenureFlag pretenure) {
3904 ElementsKind elements_kind;
3905 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3906 int size = OBJECT_POINTER_ALIGN(
3907 length * element_size + FixedTypedArrayBase::kDataOffset);
3908 #ifndef V8_HOST_ARCH_64_BIT
3909 if (array_type == kExternalFloat64Array) {
3910 size += kPointerSize;
3913 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3916 MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
3917 if (!maybe_object->To(&object)) return maybe_object;
3919 if (array_type == kExternalFloat64Array) {
3920 object = EnsureDoubleAligned(this, object, size);
3923 FixedTypedArrayBase* elements =
3924 reinterpret_cast<FixedTypedArrayBase*>(object);
3925 elements->set_map(MapForFixedTypedArray(array_type));
3926 elements->set_length(length);
3931 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3933 Handle<Object> self_reference,
3936 int prologue_offset) {
3937 // Allocate ByteArray before the Code object, so that we do not risk
3938 // leaving uninitialized Code object (and breaking the heap).
3939 ByteArray* reloc_info;
3940 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3941 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3944 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3945 int obj_size = Code::SizeFor(body_size);
3946 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3947 MaybeObject* maybe_result;
3948 // Large code objects and code objects which should stay at a fixed address
3949 // are allocated in large object space.
3951 bool force_lo_space = obj_size > code_space()->AreaSize();
3952 if (force_lo_space) {
3953 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3955 maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3957 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3959 if (immovable && !force_lo_space &&
3960 // Objects on the first page of each space are never moved.
3961 !code_space_->FirstPage()->Contains(result->address())) {
3962 // Discard the first code allocation, which was on a page where it could be
3964 CreateFillerObjectAt(result->address(), obj_size);
3965 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3966 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3969 // Initialize the object
3970 result->set_map_no_write_barrier(code_map());
3971 Code* code = Code::cast(result);
3972 ASSERT(!isolate_->code_range()->exists() ||
3973 isolate_->code_range()->contains(code->address()));
3974 code->set_instruction_size(desc.instr_size);
3975 code->set_relocation_info(reloc_info);
3976 code->set_flags(flags);
3977 code->set_raw_kind_specific_flags1(0);
3978 code->set_raw_kind_specific_flags2(0);
3979 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3980 code->set_check_type(RECEIVER_MAP_CHECK);
3982 code->set_is_crankshafted(crankshafted);
3983 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3984 code->set_raw_type_feedback_info(undefined_value());
3985 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3986 code->set_gc_metadata(Smi::FromInt(0));
3987 code->set_ic_age(global_ic_age_);
3988 code->set_prologue_offset(prologue_offset);
3989 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3990 code->set_marked_for_deoptimization(false);
3992 code->set_constant_pool(empty_constant_pool_array());
3994 #ifdef ENABLE_DEBUGGER_SUPPORT
3995 if (code->kind() == Code::FUNCTION) {
3996 code->set_has_debug_break_slots(
3997 isolate_->debugger()->IsDebuggerActive());
4001 // Allow self references to created code object by patching the handle to
4002 // point to the newly allocated Code object.
4003 if (!self_reference.is_null()) {
4004 *(self_reference.location()) = code;
4006 // Migrate generated code.
4007 // The generated code can contain Object** values (typically from handles)
4008 // that are dereferenced during the copy to point directly to the actual heap
4009 // objects. These pointers can include references to the code object itself,
4010 // through the self_reference parameter.
4011 code->CopyFrom(desc);
4014 if (FLAG_verify_heap) {
4022 MaybeObject* Heap::CopyCode(Code* code) {
4023 // Allocate an object the same size as the code object.
4024 int obj_size = code->Size();
4025 MaybeObject* maybe_result;
4026 if (obj_size > code_space()->AreaSize()) {
4027 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4029 maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4033 if (!maybe_result->ToObject(&result)) return maybe_result;
4035 // Copy code object.
4036 Address old_addr = code->address();
4037 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4038 CopyBlock(new_addr, old_addr, obj_size);
4039 // Relocate the copy.
4040 Code* new_code = Code::cast(result);
4041 ASSERT(!isolate_->code_range()->exists() ||
4042 isolate_->code_range()->contains(code->address()));
4043 new_code->Relocate(new_addr - old_addr);
4048 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4049 // Allocate ByteArray before the Code object, so that we do not risk
4050 // leaving uninitialized Code object (and breaking the heap).
4051 Object* reloc_info_array;
4052 { MaybeObject* maybe_reloc_info_array =
4053 AllocateByteArray(reloc_info.length(), TENURED);
4054 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4055 return maybe_reloc_info_array;
4059 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4061 int new_obj_size = Code::SizeFor(new_body_size);
4063 Address old_addr = code->address();
4065 size_t relocation_offset =
4066 static_cast<size_t>(code->instruction_end() - old_addr);
4068 MaybeObject* maybe_result;
4069 if (new_obj_size > code_space()->AreaSize()) {
4070 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4072 maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4076 if (!maybe_result->ToObject(&result)) return maybe_result;
4078 // Copy code object.
4079 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4081 // Copy header and instructions.
4082 CopyBytes(new_addr, old_addr, relocation_offset);
4084 Code* new_code = Code::cast(result);
4085 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4087 // Copy patched rinfo.
4088 CopyBytes(new_code->relocation_start(),
4090 static_cast<size_t>(reloc_info.length()));
4092 // Relocate the copy.
4093 ASSERT(!isolate_->code_range()->exists() ||
4094 isolate_->code_range()->contains(code->address()));
4095 new_code->Relocate(new_addr - old_addr);
4098 if (FLAG_verify_heap) {
4106 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4107 AllocationSite* allocation_site) {
4108 memento->set_map_no_write_barrier(allocation_memento_map());
4109 ASSERT(allocation_site->map() == allocation_site_map());
4110 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4111 if (FLAG_allocation_site_pretenuring) {
4112 allocation_site->IncrementMementoCreateCount();
4117 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4118 Handle<AllocationSite> allocation_site) {
4119 ASSERT(gc_state_ == NOT_IN_GC);
4120 ASSERT(map->instance_type() != MAP_TYPE);
4121 // If allocation failures are disallowed, we may allocate in a different
4122 // space when new space is full and the object is not a large object.
4123 AllocationSpace retry_space =
4124 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4125 int size = map->instance_size() + AllocationMemento::kSize;
4127 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4128 if (!maybe_result->ToObject(&result)) return maybe_result;
4129 // No need for write barrier since object is white and map is in old space.
4130 HeapObject::cast(result)->set_map_no_write_barrier(map);
4131 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4132 reinterpret_cast<Address>(result) + map->instance_size());
4133 InitializeAllocationMemento(alloc_memento, *allocation_site);
4138 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4139 ASSERT(gc_state_ == NOT_IN_GC);
4140 ASSERT(map->instance_type() != MAP_TYPE);
4141 // If allocation failures are disallowed, we may allocate in a different
4142 // space when new space is full and the object is not a large object.
4143 AllocationSpace retry_space =
4144 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4145 int size = map->instance_size();
4147 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4148 if (!maybe_result->ToObject(&result)) return maybe_result;
4149 // No need for write barrier since object is white and map is in old space.
4150 HeapObject::cast(result)->set_map_no_write_barrier(map);
4155 void Heap::InitializeFunction(JSFunction* function,
4156 SharedFunctionInfo* shared,
4157 Object* prototype) {
4158 ASSERT(!prototype->IsMap());
4159 function->initialize_properties();
4160 function->initialize_elements();
4161 function->set_shared(shared);
4162 function->set_code(shared->code());
4163 function->set_prototype_or_initial_map(prototype);
4164 function->set_context(undefined_value());
4165 function->set_literals_or_bindings(empty_fixed_array());
4166 function->set_next_function_link(undefined_value());
4170 MaybeObject* Heap::AllocateFunction(Map* function_map,
4171 SharedFunctionInfo* shared,
4173 PretenureFlag pretenure) {
4174 AllocationSpace space =
4175 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4177 { MaybeObject* maybe_result = Allocate(function_map, space);
4178 if (!maybe_result->ToObject(&result)) return maybe_result;
4180 InitializeFunction(JSFunction::cast(result), shared, prototype);
4185 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4186 // To get fast allocation and map sharing for arguments objects we
4187 // allocate them based on an arguments boilerplate.
4189 JSObject* boilerplate;
4190 int arguments_object_size;
4191 bool strict_mode_callee = callee->IsJSFunction() &&
4192 !JSFunction::cast(callee)->shared()->is_classic_mode();
4193 if (strict_mode_callee) {
4195 isolate()->context()->native_context()->
4196 strict_mode_arguments_boilerplate();
4197 arguments_object_size = kArgumentsObjectSizeStrict;
4200 isolate()->context()->native_context()->arguments_boilerplate();
4201 arguments_object_size = kArgumentsObjectSize;
4204 // Check that the size of the boilerplate matches our
4205 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4206 // on the size being a known constant.
4207 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4209 // Do the allocation.
4211 { MaybeObject* maybe_result =
4212 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4213 if (!maybe_result->ToObject(&result)) return maybe_result;
4216 // Copy the content. The arguments boilerplate doesn't have any
4217 // fields that point to new space so it's safe to skip the write
4219 CopyBlock(HeapObject::cast(result)->address(),
4220 boilerplate->address(),
4221 JSObject::kHeaderSize);
4223 // Set the length property.
4224 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4225 Smi::FromInt(length),
4226 SKIP_WRITE_BARRIER);
4227 // Set the callee property for non-strict mode arguments object only.
4228 if (!strict_mode_callee) {
4229 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4233 // Check the state of the object
4234 ASSERT(JSObject::cast(result)->HasFastProperties());
4235 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4241 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4242 FixedArray* properties,
4244 obj->set_properties(properties);
4245 obj->initialize_elements();
4246 // TODO(1240798): Initialize the object's body using valid initial values
4247 // according to the object's initial map. For example, if the map's
4248 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4249 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4250 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4251 // verification code has to cope with (temporarily) invalid objects. See
4252 // for example, JSArray::JSArrayVerify).
4254 // We cannot always fill with one_pointer_filler_map because objects
4255 // created from API functions expect their internal fields to be initialized
4256 // with undefined_value.
4257 // Pre-allocated fields need to be initialized with undefined_value as well
4258 // so that object accesses before the constructor completes (e.g. in the
4259 // debugger) will not cause a crash.
4260 if (map->constructor()->IsJSFunction() &&
4261 JSFunction::cast(map->constructor())->shared()->
4262 IsInobjectSlackTrackingInProgress()) {
4263 // We might want to shrink the object later.
4264 ASSERT(obj->GetInternalFieldCount() == 0);
4265 filler = Heap::one_pointer_filler_map();
4267 filler = Heap::undefined_value();
4269 obj->InitializeBody(map, Heap::undefined_value(), filler);
4273 MaybeObject* Heap::AllocateJSObjectFromMap(
4274 Map* map, PretenureFlag pretenure, bool allocate_properties) {
4275 // JSFunctions should be allocated using AllocateFunction to be
4276 // properly initialized.
4277 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4279 // Both types of global objects should be allocated using
4280 // AllocateGlobalObject to be properly initialized.
4281 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4282 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4284 // Allocate the backing storage for the properties.
4285 FixedArray* properties;
4286 if (allocate_properties) {
4287 int prop_size = map->InitialPropertiesLength();
4288 ASSERT(prop_size >= 0);
4289 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4290 if (!maybe_properties->To(&properties)) return maybe_properties;
4293 properties = empty_fixed_array();
4296 // Allocate the JSObject.
4297 int size = map->instance_size();
4298 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4300 MaybeObject* maybe_obj = Allocate(map, space);
4301 if (!maybe_obj->To(&obj)) return maybe_obj;
4303 // Initialize the JSObject.
4304 InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4305 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4306 JSObject::cast(obj)->HasExternalArrayElements());
4311 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4312 Map* map, Handle<AllocationSite> allocation_site) {
4313 // JSFunctions should be allocated using AllocateFunction to be
4314 // properly initialized.
4315 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4317 // Both types of global objects should be allocated using
4318 // AllocateGlobalObject to be properly initialized.
4319 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4320 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4322 // Allocate the backing storage for the properties.
4323 int prop_size = map->InitialPropertiesLength();
4324 ASSERT(prop_size >= 0);
4325 FixedArray* properties;
4326 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4327 if (!maybe_properties->To(&properties)) return maybe_properties;
4330 // Allocate the JSObject.
4331 int size = map->instance_size();
4332 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4334 MaybeObject* maybe_obj =
4335 AllocateWithAllocationSite(map, space, allocation_site);
4336 if (!maybe_obj->To(&obj)) return maybe_obj;
4338 // Initialize the JSObject.
4339 InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4340 ASSERT(JSObject::cast(obj)->HasFastElements());
4345 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4346 PretenureFlag pretenure) {
4347 ASSERT(constructor->has_initial_map());
4348 // Allocate the object based on the constructors initial map.
4349 MaybeObject* result = AllocateJSObjectFromMap(
4350 constructor->initial_map(), pretenure);
4352 // Make sure result is NOT a global object if valid.
4353 Object* non_failure;
4354 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4360 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4361 Handle<AllocationSite> allocation_site) {
4362 ASSERT(constructor->has_initial_map());
4363 // Allocate the object based on the constructors initial map, or the payload
4365 Map* initial_map = constructor->initial_map();
4367 ElementsKind to_kind = allocation_site->GetElementsKind();
4368 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4369 if (to_kind != initial_map->elements_kind()) {
4370 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4371 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4372 // Possibly alter the mode, since we found an updated elements kind
4373 // in the type info cell.
4374 mode = AllocationSite::GetMode(to_kind);
4377 MaybeObject* result;
4378 if (mode == TRACK_ALLOCATION_SITE) {
4379 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4382 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4385 // Make sure result is NOT a global object if valid.
4386 Object* non_failure;
4387 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4393 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4394 // Allocate a fresh map. Modules do not have a prototype.
4396 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4397 if (!maybe_map->To(&map)) return maybe_map;
4398 // Allocate the object based on the map.
4400 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4401 if (!maybe_module->To(&module)) return maybe_module;
4402 module->set_context(context);
4403 module->set_scope_info(scope_info);
4408 MaybeObject* Heap::AllocateJSArrayAndStorage(
4409 ElementsKind elements_kind,
4412 ArrayStorageAllocationMode mode,
4413 PretenureFlag pretenure) {
4414 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4416 if (!maybe_array->To(&array)) return maybe_array;
4418 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4419 // for performance reasons.
4420 ASSERT(capacity >= length);
4422 if (capacity == 0) {
4423 array->set_length(Smi::FromInt(0));
4424 array->set_elements(empty_fixed_array());
4428 FixedArrayBase* elms;
4429 MaybeObject* maybe_elms = NULL;
4430 if (IsFastDoubleElementsKind(elements_kind)) {
4431 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4432 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4434 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4435 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4438 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4439 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4440 maybe_elms = AllocateUninitializedFixedArray(capacity);
4442 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4443 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4446 if (!maybe_elms->To(&elms)) return maybe_elms;
4448 array->set_elements(elms);
4449 array->set_length(Smi::FromInt(length));
4454 MaybeObject* Heap::AllocateJSArrayStorage(
4458 ArrayStorageAllocationMode mode) {
4459 ASSERT(capacity >= length);
4461 if (capacity == 0) {
4462 array->set_length(Smi::FromInt(0));
4463 array->set_elements(empty_fixed_array());
4467 FixedArrayBase* elms;
4468 MaybeObject* maybe_elms = NULL;
4469 ElementsKind elements_kind = array->GetElementsKind();
4470 if (IsFastDoubleElementsKind(elements_kind)) {
4471 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4472 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4474 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4475 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4478 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4479 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4480 maybe_elms = AllocateUninitializedFixedArray(capacity);
4482 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4483 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4486 if (!maybe_elms->To(&elms)) return maybe_elms;
4488 array->set_elements(elms);
4489 array->set_length(Smi::FromInt(length));
4494 MaybeObject* Heap::AllocateJSArrayWithElements(
4495 FixedArrayBase* elements,
4496 ElementsKind elements_kind,
4498 PretenureFlag pretenure) {
4499 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4501 if (!maybe_array->To(&array)) return maybe_array;
4503 array->set_elements(elements);
4504 array->set_length(Smi::FromInt(length));
4505 array->ValidateElements();
4510 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4512 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4513 // maps. Will probably depend on the identity of the handler object, too.
4515 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4516 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4517 map->set_prototype(prototype);
4519 // Allocate the proxy object.
4521 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4522 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4523 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4524 result->set_handler(handler);
4525 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4530 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4532 Object* construct_trap,
4533 Object* prototype) {
4535 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4536 // maps. Will probably depend on the identity of the handler object, too.
4538 MaybeObject* maybe_map_obj =
4539 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4540 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4541 map->set_prototype(prototype);
4543 // Allocate the proxy object.
4544 JSFunctionProxy* result;
4545 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4546 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4547 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4548 result->set_handler(handler);
4549 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4550 result->set_call_trap(call_trap);
4551 result->set_construct_trap(construct_trap);
4556 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4557 // Never used to copy functions. If functions need to be copied we
4558 // have to be careful to clear the literals array.
4559 SLOW_ASSERT(!source->IsJSFunction());
4562 Map* map = source->map();
4563 int object_size = map->instance_size();
4566 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4568 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4570 // If we're forced to always allocate, we use the general allocation
4571 // functions which may leave us with an object in old space.
4572 if (always_allocate()) {
4573 { MaybeObject* maybe_clone =
4574 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4575 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4577 Address clone_address = HeapObject::cast(clone)->address();
4578 CopyBlock(clone_address,
4581 // Update write barrier for all fields that lie beyond the header.
4582 RecordWrites(clone_address,
4583 JSObject::kHeaderSize,
4584 (object_size - JSObject::kHeaderSize) / kPointerSize);
4586 wb_mode = SKIP_WRITE_BARRIER;
4588 { int adjusted_object_size = site != NULL
4589 ? object_size + AllocationMemento::kSize
4591 MaybeObject* maybe_clone =
4592 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4593 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4595 SLOW_ASSERT(InNewSpace(clone));
4596 // Since we know the clone is allocated in new space, we can copy
4597 // the contents without worrying about updating the write barrier.
4598 CopyBlock(HeapObject::cast(clone)->address(),
4603 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4604 reinterpret_cast<Address>(clone) + object_size);
4605 InitializeAllocationMemento(alloc_memento, site);
4610 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4611 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4612 FixedArray* properties = FixedArray::cast(source->properties());
4613 // Update elements if necessary.
4614 if (elements->length() > 0) {
4616 { MaybeObject* maybe_elem;
4617 if (elements->map() == fixed_cow_array_map()) {
4618 maybe_elem = FixedArray::cast(elements);
4619 } else if (source->HasFastDoubleElements()) {
4620 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4622 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4624 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4626 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4628 // Update properties if necessary.
4629 if (properties->length() > 0) {
4631 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4632 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4634 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4636 // Return the new clone.
4641 MaybeObject* Heap::ReinitializeJSReceiver(
4642 JSReceiver* object, InstanceType type, int size) {
4643 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4645 // Allocate fresh map.
4646 // TODO(rossberg): Once we optimize proxies, cache these maps.
4648 MaybeObject* maybe = AllocateMap(type, size);
4649 if (!maybe->To<Map>(&map)) return maybe;
4651 // Check that the receiver has at least the size of the fresh object.
4652 int size_difference = object->map()->instance_size() - map->instance_size();
4653 ASSERT(size_difference >= 0);
4655 map->set_prototype(object->map()->prototype());
4657 // Allocate the backing storage for the properties.
4658 int prop_size = map->unused_property_fields() - map->inobject_properties();
4660 maybe = AllocateFixedArray(prop_size, TENURED);
4661 if (!maybe->ToObject(&properties)) return maybe;
4663 // Functions require some allocation, which might fail here.
4664 SharedFunctionInfo* shared = NULL;
4665 if (type == JS_FUNCTION_TYPE) {
4667 OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
4669 maybe = InternalizeStringWithKey(&key);
4670 if (!maybe->To<String>(&name)) return maybe;
4671 maybe = AllocateSharedFunctionInfo(name);
4672 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4675 // Because of possible retries of this function after failure,
4676 // we must NOT fail after this point, where we have changed the type!
4678 // Reset the map for the object.
4679 object->set_map(map);
4680 JSObject* jsobj = JSObject::cast(object);
4682 // Reinitialize the object from the constructor map.
4683 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4685 // Functions require some minimal initialization.
4686 if (type == JS_FUNCTION_TYPE) {
4687 map->set_function_with_prototype(true);
4688 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4689 JSFunction::cast(object)->set_context(
4690 isolate()->context()->native_context());
4693 // Put in filler if the new object is smaller than the old.
4694 if (size_difference > 0) {
4695 CreateFillerObjectAt(
4696 object->address() + map->instance_size(), size_difference);
4703 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4704 JSGlobalProxy* object) {
4705 ASSERT(constructor->has_initial_map());
4706 Map* map = constructor->initial_map();
4708 // Check that the already allocated object has the same size and type as
4709 // objects allocated using the constructor.
4710 ASSERT(map->instance_size() == object->map()->instance_size());
4711 ASSERT(map->instance_type() == object->map()->instance_type());
4713 // Allocate the backing storage for the properties.
4714 int prop_size = map->unused_property_fields() - map->inobject_properties();
4716 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4717 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4720 // Reset the map for the object.
4721 object->set_map(constructor->initial_map());
4723 // Reinitialize the object from the constructor map.
4724 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4729 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4730 PretenureFlag pretenure) {
4731 int length = string.length();
4733 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4736 { MaybeObject* maybe_result =
4737 AllocateRawOneByteString(string.length(), pretenure);
4738 if (!maybe_result->ToObject(&result)) return maybe_result;
4741 // Copy the characters into the new object.
4742 CopyChars(SeqOneByteString::cast(result)->GetChars(),
4749 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4750 int non_ascii_start,
4751 PretenureFlag pretenure) {
4752 // Continue counting the number of characters in the UTF-8 string, starting
4753 // from the first non-ascii character or word.
4754 Access<UnicodeCache::Utf8Decoder>
4755 decoder(isolate_->unicode_cache()->utf8_decoder());
4756 decoder->Reset(string.start() + non_ascii_start,
4757 string.length() - non_ascii_start);
4758 int utf16_length = decoder->Utf16Length();
4759 ASSERT(utf16_length > 0);
4763 int chars = non_ascii_start + utf16_length;
4764 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4765 if (!maybe_result->ToObject(&result)) return maybe_result;
4767 // Convert and copy the characters into the new object.
4768 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4769 // Copy ascii portion.
4770 uint16_t* data = twobyte->GetChars();
4771 if (non_ascii_start != 0) {
4772 const char* ascii_data = string.start();
4773 for (int i = 0; i < non_ascii_start; i++) {
4774 *data++ = *ascii_data++;
4777 // Now write the remainder.
4778 decoder->WriteUtf16(data, utf16_length);
4783 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4784 PretenureFlag pretenure) {
4785 // Check if the string is an ASCII string.
4787 int length = string.length();
4788 const uc16* start = string.start();
4790 if (String::IsOneByte(start, length)) {
4791 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4792 if (!maybe_result->ToObject(&result)) return maybe_result;
4793 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4794 } else { // It's not a one byte string.
4795 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4796 if (!maybe_result->ToObject(&result)) return maybe_result;
4797 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4803 Map* Heap::InternalizedStringMapForString(String* string) {
4804 // If the string is in new space it cannot be used as internalized.
4805 if (InNewSpace(string)) return NULL;
4807 // Find the corresponding internalized string map for strings.
4808 switch (string->map()->instance_type()) {
4809 case STRING_TYPE: return internalized_string_map();
4810 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4811 case CONS_STRING_TYPE: return cons_internalized_string_map();
4812 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4813 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4814 case EXTERNAL_ASCII_STRING_TYPE:
4815 return external_ascii_internalized_string_map();
4816 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4817 return external_internalized_string_with_one_byte_data_map();
4818 case SHORT_EXTERNAL_STRING_TYPE:
4819 return short_external_internalized_string_map();
4820 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4821 return short_external_ascii_internalized_string_map();
4822 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4823 return short_external_internalized_string_with_one_byte_data_map();
4824 default: return NULL; // No match found.
4829 static inline void WriteOneByteData(Vector<const char> vector,
4832 // Only works for ascii.
4833 ASSERT(vector.length() == len);
4834 OS::MemCopy(chars, vector.start(), len);
4837 static inline void WriteTwoByteData(Vector<const char> vector,
4840 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4841 unsigned stream_length = vector.length();
4842 while (stream_length != 0) {
4843 unsigned consumed = 0;
4844 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4845 ASSERT(c != unibrow::Utf8::kBadChar);
4846 ASSERT(consumed <= stream_length);
4847 stream_length -= consumed;
4849 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4852 *chars++ = unibrow::Utf16::LeadSurrogate(c);
4853 *chars++ = unibrow::Utf16::TrailSurrogate(c);
4860 ASSERT(stream_length == 0);
4865 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4866 ASSERT(s->length() == len);
4867 String::WriteToFlat(s, chars, 0, len);
4871 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4872 ASSERT(s->length() == len);
4873 String::WriteToFlat(s, chars, 0, len);
4877 template<bool is_one_byte, typename T>
4878 MaybeObject* Heap::AllocateInternalizedStringImpl(
4879 T t, int chars, uint32_t hash_field) {
4881 // Compute map and object size.
4886 if (chars > SeqOneByteString::kMaxLength) {
4887 return Failure::OutOfMemoryException(0x9);
4889 map = ascii_internalized_string_map();
4890 size = SeqOneByteString::SizeFor(chars);
4892 if (chars > SeqTwoByteString::kMaxLength) {
4893 return Failure::OutOfMemoryException(0xa);
4895 map = internalized_string_map();
4896 size = SeqTwoByteString::SizeFor(chars);
4898 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4902 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4903 if (!maybe_result->ToObject(&result)) return maybe_result;
4906 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4907 // Set length and hash fields of the allocated string.
4908 String* answer = String::cast(result);
4909 answer->set_length(chars);
4910 answer->set_hash_field(hash_field);
4912 ASSERT_EQ(size, answer->Size());
4915 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4917 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4923 // Need explicit instantiations.
4925 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
4927 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
4928 String*, int, uint32_t);
4930 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
4931 Vector<const char>, int, uint32_t);
4934 MaybeObject* Heap::AllocateRawOneByteString(int length,
4935 PretenureFlag pretenure) {
4936 if (length < 0 || length > SeqOneByteString::kMaxLength) {
4937 return Failure::OutOfMemoryException(0xb);
4939 int size = SeqOneByteString::SizeFor(length);
4940 ASSERT(size <= SeqOneByteString::kMaxSize);
4941 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4944 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4945 if (!maybe_result->ToObject(&result)) return maybe_result;
4948 // Partially initialize the object.
4949 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4950 String::cast(result)->set_length(length);
4951 String::cast(result)->set_hash_field(String::kEmptyHashField);
4952 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4958 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4959 PretenureFlag pretenure) {
4960 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4961 return Failure::OutOfMemoryException(0xc);
4963 int size = SeqTwoByteString::SizeFor(length);
4964 ASSERT(size <= SeqTwoByteString::kMaxSize);
4965 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4968 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4969 if (!maybe_result->ToObject(&result)) return maybe_result;
4972 // Partially initialize the object.
4973 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4974 String::cast(result)->set_length(length);
4975 String::cast(result)->set_hash_field(String::kEmptyHashField);
4976 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4981 MaybeObject* Heap::AllocateJSArray(
4982 ElementsKind elements_kind,
4983 PretenureFlag pretenure) {
4984 Context* native_context = isolate()->context()->native_context();
4985 JSFunction* array_function = native_context->array_function();
4986 Map* map = array_function->initial_map();
4987 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
4988 if (transition_map != NULL) map = transition_map;
4989 return AllocateJSObjectFromMap(map, pretenure);
4993 MaybeObject* Heap::AllocateEmptyFixedArray() {
4994 int size = FixedArray::SizeFor(0);
4996 { MaybeObject* maybe_result =
4997 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4998 if (!maybe_result->ToObject(&result)) return maybe_result;
5000 // Initialize the object.
5001 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5003 reinterpret_cast<FixedArray*>(result)->set_length(0);
5008 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5009 return AllocateExternalArray(0, array_type, NULL, TENURED);
5013 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5014 int len = src->length();
5016 { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5017 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5019 if (InNewSpace(obj)) {
5020 HeapObject* dst = HeapObject::cast(obj);
5021 dst->set_map_no_write_barrier(map);
5022 CopyBlock(dst->address() + kPointerSize,
5023 src->address() + kPointerSize,
5024 FixedArray::SizeFor(len) - kPointerSize);
5027 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5028 FixedArray* result = FixedArray::cast(obj);
5029 result->set_length(len);
5032 DisallowHeapAllocation no_gc;
5033 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5034 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5039 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5041 int len = src->length();
5043 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5044 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5046 HeapObject* dst = HeapObject::cast(obj);
5047 dst->set_map_no_write_barrier(map);
5049 dst->address() + FixedDoubleArray::kLengthOffset,
5050 src->address() + FixedDoubleArray::kLengthOffset,
5051 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5056 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5058 int int64_entries = src->count_of_int64_entries();
5059 int ptr_entries = src->count_of_ptr_entries();
5060 int int32_entries = src->count_of_int32_entries();
5062 { MaybeObject* maybe_obj =
5063 AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5064 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5066 HeapObject* dst = HeapObject::cast(obj);
5067 dst->set_map_no_write_barrier(map);
5069 dst->address() + ConstantPoolArray::kLengthOffset,
5070 src->address() + ConstantPoolArray::kLengthOffset,
5071 ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5072 - ConstantPoolArray::kLengthOffset);
5077 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5078 if (length < 0 || length > FixedArray::kMaxLength) {
5079 return Failure::OutOfMemoryException(0xe);
5081 int size = FixedArray::SizeFor(length);
5082 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5084 return AllocateRaw(size, space, OLD_POINTER_SPACE);
5088 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5089 PretenureFlag pretenure,
5091 ASSERT(length >= 0);
5092 ASSERT(empty_fixed_array()->IsFixedArray());
5093 if (length == 0) return empty_fixed_array();
5095 ASSERT(!InNewSpace(filler));
5097 { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5098 if (!maybe_result->ToObject(&result)) return maybe_result;
5101 HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5102 FixedArray* array = FixedArray::cast(result);
5103 array->set_length(length);
5104 MemsetPointer(array->data_start(), filler, length);
5109 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5110 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5114 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5115 PretenureFlag pretenure) {
5116 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5120 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5121 if (length == 0) return empty_fixed_array();
5124 { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5125 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5128 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5130 FixedArray::cast(obj)->set_length(length);
5135 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5136 int size = FixedDoubleArray::SizeFor(0);
5138 { MaybeObject* maybe_result =
5139 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5140 if (!maybe_result->ToObject(&result)) return maybe_result;
5142 // Initialize the object.
5143 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5144 fixed_double_array_map());
5145 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5150 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5152 PretenureFlag pretenure) {
5153 if (length == 0) return empty_fixed_array();
5155 Object* elements_object;
5156 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5157 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5158 FixedDoubleArray* elements =
5159 reinterpret_cast<FixedDoubleArray*>(elements_object);
5161 elements->set_map_no_write_barrier(fixed_double_array_map());
5162 elements->set_length(length);
5167 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5169 PretenureFlag pretenure) {
5170 if (length == 0) return empty_fixed_array();
5172 Object* elements_object;
5173 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5174 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5175 FixedDoubleArray* elements =
5176 reinterpret_cast<FixedDoubleArray*>(elements_object);
5178 for (int i = 0; i < length; ++i) {
5179 elements->set_the_hole(i);
5182 elements->set_map_no_write_barrier(fixed_double_array_map());
5183 elements->set_length(length);
5188 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5189 PretenureFlag pretenure) {
5190 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5191 return Failure::OutOfMemoryException(0xf);
5193 int size = FixedDoubleArray::SizeFor(length);
5194 #ifndef V8_HOST_ARCH_64_BIT
5195 size += kPointerSize;
5197 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5200 { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5201 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5204 return EnsureDoubleAligned(this, object, size);
5208 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5209 int number_of_ptr_entries,
5210 int number_of_int32_entries) {
5211 ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5212 number_of_int32_entries > 0);
5213 int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5214 number_of_ptr_entries,
5215 number_of_int32_entries);
5216 #ifndef V8_HOST_ARCH_64_BIT
5217 size += kPointerSize;
5219 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5222 { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5223 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5225 object = EnsureDoubleAligned(this, object, size);
5226 HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5228 ConstantPoolArray* constant_pool =
5229 reinterpret_cast<ConstantPoolArray*>(object);
5230 constant_pool->SetEntryCounts(number_of_int64_entries,
5231 number_of_ptr_entries,
5232 number_of_int32_entries);
5233 if (number_of_ptr_entries > 0) {
5235 HeapObject::RawField(
5237 constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5239 number_of_ptr_entries);
5241 return constant_pool;
5245 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
5246 int size = ConstantPoolArray::SizeFor(0, 0, 0);
5248 { MaybeObject* maybe_result =
5249 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5250 if (!maybe_result->ToObject(&result)) return maybe_result;
5252 HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
5253 ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
5258 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5260 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5261 if (!maybe_result->ToObject(&result)) return maybe_result;
5263 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5265 ASSERT(result->IsHashTable());
5270 MaybeObject* Heap::AllocateSymbol() {
5271 // Statically ensure that it is safe to allocate symbols in paged spaces.
5272 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
5275 MaybeObject* maybe =
5276 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5277 if (!maybe->ToObject(&result)) return maybe;
5279 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5281 // Generate a random hash value.
5285 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5287 } while (hash == 0 && attempts < 30);
5288 if (hash == 0) hash = 1; // never return 0
5290 Symbol::cast(result)->set_hash_field(
5291 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5292 Symbol::cast(result)->set_name(undefined_value());
5293 Symbol::cast(result)->set_flags(Smi::FromInt(0));
5295 ASSERT(!Symbol::cast(result)->is_private());
5300 MaybeObject* Heap::AllocatePrivateSymbol() {
5301 MaybeObject* maybe = AllocateSymbol();
5303 if (!maybe->To(&symbol)) return maybe;
5304 symbol->set_is_private(true);
5309 MaybeObject* Heap::AllocateNativeContext() {
5311 { MaybeObject* maybe_result =
5312 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5313 if (!maybe_result->ToObject(&result)) return maybe_result;
5315 Context* context = reinterpret_cast<Context*>(result);
5316 context->set_map_no_write_barrier(native_context_map());
5317 context->set_js_array_maps(undefined_value());
5318 ASSERT(context->IsNativeContext());
5319 ASSERT(result->IsContext());
5324 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5325 ScopeInfo* scope_info) {
5327 { MaybeObject* maybe_result =
5328 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5329 if (!maybe_result->ToObject(&result)) return maybe_result;
5331 Context* context = reinterpret_cast<Context*>(result);
5332 context->set_map_no_write_barrier(global_context_map());
5333 context->set_closure(function);
5334 context->set_previous(function->context());
5335 context->set_extension(scope_info);
5336 context->set_global_object(function->context()->global_object());
5337 ASSERT(context->IsGlobalContext());
5338 ASSERT(result->IsContext());
5343 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5345 { MaybeObject* maybe_result =
5346 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5347 if (!maybe_result->ToObject(&result)) return maybe_result;
5349 Context* context = reinterpret_cast<Context*>(result);
5350 context->set_map_no_write_barrier(module_context_map());
5351 // Instance link will be set later.
5352 context->set_extension(Smi::FromInt(0));
5357 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5358 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5360 { MaybeObject* maybe_result = AllocateFixedArray(length);
5361 if (!maybe_result->ToObject(&result)) return maybe_result;
5363 Context* context = reinterpret_cast<Context*>(result);
5364 context->set_map_no_write_barrier(function_context_map());
5365 context->set_closure(function);
5366 context->set_previous(function->context());
5367 context->set_extension(Smi::FromInt(0));
5368 context->set_global_object(function->context()->global_object());
5373 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5376 Object* thrown_object) {
5377 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5379 { MaybeObject* maybe_result =
5380 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5381 if (!maybe_result->ToObject(&result)) return maybe_result;
5383 Context* context = reinterpret_cast<Context*>(result);
5384 context->set_map_no_write_barrier(catch_context_map());
5385 context->set_closure(function);
5386 context->set_previous(previous);
5387 context->set_extension(name);
5388 context->set_global_object(previous->global_object());
5389 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5394 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5396 JSReceiver* extension) {
5398 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5399 if (!maybe_result->ToObject(&result)) return maybe_result;
5401 Context* context = reinterpret_cast<Context*>(result);
5402 context->set_map_no_write_barrier(with_context_map());
5403 context->set_closure(function);
5404 context->set_previous(previous);
5405 context->set_extension(extension);
5406 context->set_global_object(previous->global_object());
5411 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5413 ScopeInfo* scope_info) {
5415 { MaybeObject* maybe_result =
5416 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5417 if (!maybe_result->ToObject(&result)) return maybe_result;
5419 Context* context = reinterpret_cast<Context*>(result);
5420 context->set_map_no_write_barrier(block_context_map());
5421 context->set_closure(function);
5422 context->set_previous(previous);
5423 context->set_extension(scope_info);
5424 context->set_global_object(previous->global_object());
5429 MaybeObject* Heap::AllocateScopeInfo(int length) {
5430 FixedArray* scope_info;
5431 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5432 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5433 scope_info->set_map_no_write_barrier(scope_info_map());
5438 MaybeObject* Heap::AllocateExternal(void* value) {
5440 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5441 if (!maybe_result->To(&foreign)) return maybe_result;
5444 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5445 if (!maybe_result->To(&external)) return maybe_result;
5447 external->SetInternalField(0, foreign);
5452 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5455 #define MAKE_CASE(NAME, Name, name) \
5456 case NAME##_TYPE: map = name##_map(); break;
5457 STRUCT_LIST(MAKE_CASE)
5461 return Failure::InternalError();
5463 int size = map->instance_size();
5464 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5466 { MaybeObject* maybe_result = Allocate(map, space);
5467 if (!maybe_result->ToObject(&result)) return maybe_result;
5469 Struct::cast(result)->InitializeBody(size);
5474 bool Heap::IsHeapIterable() {
5475 return (!old_pointer_space()->was_swept_conservatively() &&
5476 !old_data_space()->was_swept_conservatively());
5480 void Heap::EnsureHeapIsIterable() {
5481 ASSERT(AllowHeapAllocation::IsAllowed());
5482 if (!IsHeapIterable()) {
5483 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5485 ASSERT(IsHeapIterable());
5489 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5490 incremental_marking()->Step(step_size,
5491 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5493 if (incremental_marking()->IsComplete()) {
5494 bool uncommit = false;
5495 if (gc_count_at_last_idle_gc_ == gc_count_) {
5496 // No GC since the last full GC, the mutator is probably not active.
5497 isolate_->compilation_cache()->Clear();
5500 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5501 mark_sweeps_since_idle_round_started_++;
5502 gc_count_at_last_idle_gc_ = gc_count_;
5504 new_space_.Shrink();
5505 UncommitFromSpace();
5511 bool Heap::IdleNotification(int hint) {
5512 // Hints greater than this value indicate that
5513 // the embedder is requesting a lot of GC work.
5514 const int kMaxHint = 1000;
5515 const int kMinHintForIncrementalMarking = 10;
5516 // Minimal hint that allows to do full GC.
5517 const int kMinHintForFullGC = 100;
5518 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5519 // The size factor is in range [5..250]. The numbers here are chosen from
5520 // experiments. If you changes them, make sure to test with
5521 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5522 intptr_t step_size =
5523 size_factor * IncrementalMarking::kAllocatedThreshold;
5525 if (contexts_disposed_ > 0) {
5526 contexts_disposed_ = 0;
5527 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5528 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5529 incremental_marking()->IsStopped()) {
5530 HistogramTimerScope scope(isolate_->counters()->gc_context());
5531 CollectAllGarbage(kReduceMemoryFootprintMask,
5532 "idle notification: contexts disposed");
5534 AdvanceIdleIncrementalMarking(step_size);
5537 // After context disposal there is likely a lot of garbage remaining, reset
5538 // the idle notification counters in order to trigger more incremental GCs
5539 // on subsequent idle notifications.
5544 if (!FLAG_incremental_marking || Serializer::enabled()) {
5545 return IdleGlobalGC();
5548 // By doing small chunks of GC work in each IdleNotification,
5549 // perform a round of incremental GCs and after that wait until
5550 // the mutator creates enough garbage to justify a new round.
5551 // An incremental GC progresses as follows:
5552 // 1. many incremental marking steps,
5553 // 2. one old space mark-sweep-compact,
5554 // 3. many lazy sweep steps.
5555 // Use mark-sweep-compact events to count incremental GCs in a round.
5557 if (incremental_marking()->IsStopped()) {
5558 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5559 !IsSweepingComplete() &&
5560 !AdvanceSweepers(static_cast<int>(step_size))) {
5565 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5566 if (EnoughGarbageSinceLastIdleRound()) {
5573 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5574 mark_sweeps_since_idle_round_started_;
5576 if (incremental_marking()->IsStopped()) {
5577 // If there are no more than two GCs left in this idle round and we are
5578 // allowed to do a full GC, then make those GCs full in order to compact
5580 // TODO(ulan): Once we enable code compaction for incremental marking,
5581 // we can get rid of this special case and always start incremental marking.
5582 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5583 CollectAllGarbage(kReduceMemoryFootprintMask,
5584 "idle notification: finalize idle round");
5585 mark_sweeps_since_idle_round_started_++;
5586 } else if (hint > kMinHintForIncrementalMarking) {
5587 incremental_marking()->Start();
5590 if (!incremental_marking()->IsStopped() &&
5591 hint > kMinHintForIncrementalMarking) {
5592 AdvanceIdleIncrementalMarking(step_size);
5595 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5604 bool Heap::IdleGlobalGC() {
5605 static const int kIdlesBeforeScavenge = 4;
5606 static const int kIdlesBeforeMarkSweep = 7;
5607 static const int kIdlesBeforeMarkCompact = 8;
5608 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5609 static const unsigned int kGCsBetweenCleanup = 4;
5611 if (!last_idle_notification_gc_count_init_) {
5612 last_idle_notification_gc_count_ = gc_count_;
5613 last_idle_notification_gc_count_init_ = true;
5616 bool uncommit = true;
5617 bool finished = false;
5619 // Reset the number of idle notifications received when a number of
5620 // GCs have taken place. This allows another round of cleanup based
5621 // on idle notifications if enough work has been carried out to
5622 // provoke a number of garbage collections.
5623 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5624 number_idle_notifications_ =
5625 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5627 number_idle_notifications_ = 0;
5628 last_idle_notification_gc_count_ = gc_count_;
5631 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5632 CollectGarbage(NEW_SPACE, "idle notification");
5633 new_space_.Shrink();
5634 last_idle_notification_gc_count_ = gc_count_;
5635 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5636 // Before doing the mark-sweep collections we clear the
5637 // compilation cache to avoid hanging on to source code and
5638 // generated code for cached functions.
5639 isolate_->compilation_cache()->Clear();
5641 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5642 new_space_.Shrink();
5643 last_idle_notification_gc_count_ = gc_count_;
5645 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5646 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5647 new_space_.Shrink();
5648 last_idle_notification_gc_count_ = gc_count_;
5649 number_idle_notifications_ = 0;
5651 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5652 // If we have received more than kIdlesBeforeMarkCompact idle
5653 // notifications we do not perform any cleanup because we don't
5654 // expect to gain much by doing so.
5658 if (uncommit) UncommitFromSpace();
5666 void Heap::Print() {
5667 if (!HasBeenSetUp()) return;
5668 isolate()->PrintStack(stdout);
5669 AllSpaces spaces(this);
5670 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5676 void Heap::ReportCodeStatistics(const char* title) {
5677 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5678 PagedSpace::ResetCodeStatistics(isolate());
5679 // We do not look for code in new space, map space, or old space. If code
5680 // somehow ends up in those spaces, we would miss it here.
5681 code_space_->CollectCodeStatistics();
5682 lo_space_->CollectCodeStatistics();
5683 PagedSpace::ReportCodeStatistics(isolate());
5687 // This function expects that NewSpace's allocated objects histogram is
5688 // populated (via a call to CollectStatistics or else as a side effect of a
5689 // just-completed scavenge collection).
5690 void Heap::ReportHeapStatistics(const char* title) {
5692 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5694 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5695 old_generation_allocation_limit_);
5698 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5699 isolate_->global_handles()->PrintStats();
5702 PrintF("Heap statistics : ");
5703 isolate_->memory_allocator()->ReportStatistics();
5704 PrintF("To space : ");
5705 new_space_.ReportStatistics();
5706 PrintF("Old pointer space : ");
5707 old_pointer_space_->ReportStatistics();
5708 PrintF("Old data space : ");
5709 old_data_space_->ReportStatistics();
5710 PrintF("Code space : ");
5711 code_space_->ReportStatistics();
5712 PrintF("Map space : ");
5713 map_space_->ReportStatistics();
5714 PrintF("Cell space : ");
5715 cell_space_->ReportStatistics();
5716 PrintF("PropertyCell space : ");
5717 property_cell_space_->ReportStatistics();
5718 PrintF("Large object space : ");
5719 lo_space_->ReportStatistics();
5720 PrintF(">>>>>> ========================================= >>>>>>\n");
5725 bool Heap::Contains(HeapObject* value) {
5726 return Contains(value->address());
5730 bool Heap::Contains(Address addr) {
5731 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5732 return HasBeenSetUp() &&
5733 (new_space_.ToSpaceContains(addr) ||
5734 old_pointer_space_->Contains(addr) ||
5735 old_data_space_->Contains(addr) ||
5736 code_space_->Contains(addr) ||
5737 map_space_->Contains(addr) ||
5738 cell_space_->Contains(addr) ||
5739 property_cell_space_->Contains(addr) ||
5740 lo_space_->SlowContains(addr));
5744 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5745 return InSpace(value->address(), space);
5749 bool Heap::InSpace(Address addr, AllocationSpace space) {
5750 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5751 if (!HasBeenSetUp()) return false;
5755 return new_space_.ToSpaceContains(addr);
5756 case OLD_POINTER_SPACE:
5757 return old_pointer_space_->Contains(addr);
5758 case OLD_DATA_SPACE:
5759 return old_data_space_->Contains(addr);
5761 return code_space_->Contains(addr);
5763 return map_space_->Contains(addr);
5765 return cell_space_->Contains(addr);
5766 case PROPERTY_CELL_SPACE:
5767 return property_cell_space_->Contains(addr);
5769 return lo_space_->SlowContains(addr);
5777 void Heap::Verify() {
5778 CHECK(HasBeenSetUp());
5780 store_buffer()->Verify();
5782 VerifyPointersVisitor visitor;
5783 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5785 new_space_.Verify();
5787 old_pointer_space_->Verify(&visitor);
5788 map_space_->Verify(&visitor);
5790 VerifyPointersVisitor no_dirty_regions_visitor;
5791 old_data_space_->Verify(&no_dirty_regions_visitor);
5792 code_space_->Verify(&no_dirty_regions_visitor);
5793 cell_space_->Verify(&no_dirty_regions_visitor);
5794 property_cell_space_->Verify(&no_dirty_regions_visitor);
5796 lo_space_->Verify();
5801 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
5802 Utf8StringKey key(string, HashSeed());
5803 return InternalizeStringWithKey(&key);
5807 MaybeObject* Heap::InternalizeString(String* string) {
5808 if (string->IsInternalizedString()) return string;
5809 Object* result = NULL;
5811 { MaybeObject* maybe_new_table =
5812 string_table()->LookupString(string, &result);
5813 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5815 // Can't use set_string_table because StringTable::cast knows that
5816 // StringTable is a singleton and checks for identity.
5817 roots_[kStringTableRootIndex] = new_table;
5818 ASSERT(result != NULL);
5823 bool Heap::InternalizeStringIfExists(String* string, String** result) {
5824 if (string->IsInternalizedString()) {
5828 return string_table()->LookupStringIfExists(string, result);
5832 MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) {
5833 Object* result = NULL;
5835 { MaybeObject* maybe_new_table =
5836 string_table()->LookupKey(key, &result);
5837 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5839 // Can't use set_string_table because StringTable::cast knows that
5840 // StringTable is a singleton and checks for identity.
5841 roots_[kStringTableRootIndex] = new_table;
5842 ASSERT(result != NULL);
5847 void Heap::ZapFromSpace() {
5848 NewSpacePageIterator it(new_space_.FromSpaceStart(),
5849 new_space_.FromSpaceEnd());
5850 while (it.has_next()) {
5851 NewSpacePage* page = it.next();
5852 for (Address cursor = page->area_start(), limit = page->area_end();
5854 cursor += kPointerSize) {
5855 Memory::Address_at(cursor) = kFromSpaceZapValue;
5861 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5863 ObjectSlotCallback callback) {
5864 Address slot_address = start;
5866 // We are not collecting slots on new space objects during mutation
5867 // thus we have to scan for pointers to evacuation candidates when we
5868 // promote objects. But we should not record any slots in non-black
5869 // objects. Grey object's slots would be rescanned.
5870 // White object might not survive until the end of collection
5871 // it would be a violation of the invariant to record it's slots.
5872 bool record_slots = false;
5873 if (incremental_marking()->IsCompacting()) {
5874 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5875 record_slots = Marking::IsBlack(mark_bit);
5878 while (slot_address < end) {
5879 Object** slot = reinterpret_cast<Object**>(slot_address);
5880 Object* object = *slot;
5881 // If the store buffer becomes overfull we mark pages as being exempt from
5882 // the store buffer. These pages are scanned to find pointers that point
5883 // to the new space. In that case we may hit newly promoted objects and
5884 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5885 if (object->IsHeapObject()) {
5886 if (Heap::InFromSpace(object)) {
5887 callback(reinterpret_cast<HeapObject**>(slot),
5888 HeapObject::cast(object));
5889 Object* new_object = *slot;
5890 if (InNewSpace(new_object)) {
5891 SLOW_ASSERT(Heap::InToSpace(new_object));
5892 SLOW_ASSERT(new_object->IsHeapObject());
5893 store_buffer_.EnterDirectlyIntoStoreBuffer(
5894 reinterpret_cast<Address>(slot));
5896 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5897 } else if (record_slots &&
5898 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5899 mark_compact_collector()->RecordSlot(slot, slot, object);
5902 slot_address += kPointerSize;
5908 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5911 bool IsAMapPointerAddress(Object** addr) {
5912 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5913 int mod = a % Map::kSize;
5914 return mod >= Map::kPointerFieldsBeginOffset &&
5915 mod < Map::kPointerFieldsEndOffset;
5919 bool EverythingsAPointer(Object** addr) {
5924 static void CheckStoreBuffer(Heap* heap,
5927 Object**** store_buffer_position,
5928 Object*** store_buffer_top,
5929 CheckStoreBufferFilter filter,
5930 Address special_garbage_start,
5931 Address special_garbage_end) {
5932 Map* free_space_map = heap->free_space_map();
5933 for ( ; current < limit; current++) {
5934 Object* o = *current;
5935 Address current_address = reinterpret_cast<Address>(current);
5937 if (o == free_space_map) {
5938 Address current_address = reinterpret_cast<Address>(current);
5939 FreeSpace* free_space =
5940 FreeSpace::cast(HeapObject::FromAddress(current_address));
5941 int skip = free_space->Size();
5942 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5944 current_address += skip - kPointerSize;
5945 current = reinterpret_cast<Object**>(current_address);
5948 // Skip the current linear allocation space between top and limit which is
5949 // unmarked with the free space map, but can contain junk.
5950 if (current_address == special_garbage_start &&
5951 special_garbage_end != special_garbage_start) {
5952 current_address = special_garbage_end - kPointerSize;
5953 current = reinterpret_cast<Object**>(current_address);
5956 if (!(*filter)(current)) continue;
5957 ASSERT(current_address < special_garbage_start ||
5958 current_address >= special_garbage_end);
5959 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5960 // We have to check that the pointer does not point into new space
5961 // without trying to cast it to a heap object since the hash field of
5962 // a string can contain values like 1 and 3 which are tagged null
5964 if (!heap->InNewSpace(o)) continue;
5965 while (**store_buffer_position < current &&
5966 *store_buffer_position < store_buffer_top) {
5967 (*store_buffer_position)++;
5969 if (**store_buffer_position != current ||
5970 *store_buffer_position == store_buffer_top) {
5971 Object** obj_start = current;
5972 while (!(*obj_start)->IsMap()) obj_start--;
5979 // Check that the store buffer contains all intergenerational pointers by
5980 // scanning a page and ensuring that all pointers to young space are in the
5982 void Heap::OldPointerSpaceCheckStoreBuffer() {
5983 OldSpace* space = old_pointer_space();
5984 PageIterator pages(space);
5986 store_buffer()->SortUniq();
5988 while (pages.has_next()) {
5989 Page* page = pages.next();
5990 Object** current = reinterpret_cast<Object**>(page->area_start());
5992 Address end = page->area_end();
5994 Object*** store_buffer_position = store_buffer()->Start();
5995 Object*** store_buffer_top = store_buffer()->Top();
5997 Object** limit = reinterpret_cast<Object**>(end);
5998 CheckStoreBuffer(this,
6001 &store_buffer_position,
6003 &EverythingsAPointer,
6010 void Heap::MapSpaceCheckStoreBuffer() {
6011 MapSpace* space = map_space();
6012 PageIterator pages(space);
6014 store_buffer()->SortUniq();
6016 while (pages.has_next()) {
6017 Page* page = pages.next();
6018 Object** current = reinterpret_cast<Object**>(page->area_start());
6020 Address end = page->area_end();
6022 Object*** store_buffer_position = store_buffer()->Start();
6023 Object*** store_buffer_top = store_buffer()->Top();
6025 Object** limit = reinterpret_cast<Object**>(end);
6026 CheckStoreBuffer(this,
6029 &store_buffer_position,
6031 &IsAMapPointerAddress,
6038 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6039 LargeObjectIterator it(lo_space());
6040 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6041 // We only have code, sequential strings, or fixed arrays in large
6042 // object space, and only fixed arrays can possibly contain pointers to
6043 // the young generation.
6044 if (object->IsFixedArray()) {
6045 Object*** store_buffer_position = store_buffer()->Start();
6046 Object*** store_buffer_top = store_buffer()->Top();
6047 Object** current = reinterpret_cast<Object**>(object->address());
6049 reinterpret_cast<Object**>(object->address() + object->Size());
6050 CheckStoreBuffer(this,
6053 &store_buffer_position,
6055 &EverythingsAPointer,
6064 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6065 IterateStrongRoots(v, mode);
6066 IterateWeakRoots(v, mode);
6070 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6071 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6072 v->Synchronize(VisitorSynchronization::kStringTable);
6073 if (mode != VISIT_ALL_IN_SCAVENGE &&
6074 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6075 // Scavenge collections have special processing for this.
6076 external_string_table_.Iterate(v);
6078 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6082 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6083 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6084 v->Synchronize(VisitorSynchronization::kStrongRootList);
6086 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6087 v->Synchronize(VisitorSynchronization::kInternalizedString);
6089 isolate_->bootstrapper()->Iterate(v);
6090 v->Synchronize(VisitorSynchronization::kBootstrapper);
6091 isolate_->Iterate(v);
6092 v->Synchronize(VisitorSynchronization::kTop);
6093 Relocatable::Iterate(isolate_, v);
6094 v->Synchronize(VisitorSynchronization::kRelocatable);
6096 #ifdef ENABLE_DEBUGGER_SUPPORT
6097 isolate_->debug()->Iterate(v);
6098 if (isolate_->deoptimizer_data() != NULL) {
6099 isolate_->deoptimizer_data()->Iterate(v);
6102 v->Synchronize(VisitorSynchronization::kDebug);
6103 isolate_->compilation_cache()->Iterate(v);
6104 v->Synchronize(VisitorSynchronization::kCompilationCache);
6106 // Iterate over local handles in handle scopes.
6107 isolate_->handle_scope_implementer()->Iterate(v);
6108 isolate_->IterateDeferredHandles(v);
6109 v->Synchronize(VisitorSynchronization::kHandleScope);
6111 // Iterate over the builtin code objects and code stubs in the
6112 // heap. Note that it is not necessary to iterate over code objects
6113 // on scavenge collections.
6114 if (mode != VISIT_ALL_IN_SCAVENGE) {
6115 isolate_->builtins()->IterateBuiltins(v);
6117 v->Synchronize(VisitorSynchronization::kBuiltins);
6119 // Iterate over global handles.
6121 case VISIT_ONLY_STRONG:
6122 isolate_->global_handles()->IterateStrongRoots(v);
6124 case VISIT_ALL_IN_SCAVENGE:
6125 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6127 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6129 isolate_->global_handles()->IterateAllRoots(v);
6132 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6134 // Iterate over eternal handles.
6135 if (mode == VISIT_ALL_IN_SCAVENGE) {
6136 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6138 isolate_->eternal_handles()->IterateAllRoots(v);
6140 v->Synchronize(VisitorSynchronization::kEternalHandles);
6142 // Iterate over pointers being held by inactive threads.
6143 isolate_->thread_manager()->Iterate(v);
6144 v->Synchronize(VisitorSynchronization::kThreadManager);
6146 // Iterate over the pointers the Serialization/Deserialization code is
6148 // During garbage collection this keeps the partial snapshot cache alive.
6149 // During deserialization of the startup snapshot this creates the partial
6150 // snapshot cache and deserializes the objects it refers to. During
6151 // serialization this does nothing, since the partial snapshot cache is
6152 // empty. However the next thing we do is create the partial snapshot,
6153 // filling up the partial snapshot cache with objects it needs as we go.
6154 SerializerDeserializer::Iterate(isolate_, v);
6155 // We don't do a v->Synchronize call here, because in debug mode that will
6156 // output a flag to the snapshot. However at this point the serializer and
6157 // deserializer are deliberately a little unsynchronized (see above) so the
6158 // checking of the sync flag in the snapshot would fail.
6162 // TODO(1236194): Since the heap size is configurable on the command line
6163 // and through the API, we should gracefully handle the case that the heap
6164 // size is not big enough to fit all the initial objects.
6165 bool Heap::ConfigureHeap(int max_semispace_size,
6166 intptr_t max_old_gen_size,
6167 intptr_t max_executable_size) {
6168 if (HasBeenSetUp()) return false;
6170 if (FLAG_stress_compaction) {
6171 // This will cause more frequent GCs when stressing.
6172 max_semispace_size_ = Page::kPageSize;
6175 if (max_semispace_size > 0) {
6176 if (max_semispace_size < Page::kPageSize) {
6177 max_semispace_size = Page::kPageSize;
6178 if (FLAG_trace_gc) {
6179 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6180 Page::kPageSize >> 10);
6183 max_semispace_size_ = max_semispace_size;
6186 if (Snapshot::IsEnabled()) {
6187 // If we are using a snapshot we always reserve the default amount
6188 // of memory for each semispace because code in the snapshot has
6189 // write-barrier code that relies on the size and alignment of new
6190 // space. We therefore cannot use a larger max semispace size
6191 // than the default reserved semispace size.
6192 if (max_semispace_size_ > reserved_semispace_size_) {
6193 max_semispace_size_ = reserved_semispace_size_;
6194 if (FLAG_trace_gc) {
6195 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6196 reserved_semispace_size_ >> 10);
6200 // If we are not using snapshots we reserve space for the actual
6201 // max semispace size.
6202 reserved_semispace_size_ = max_semispace_size_;
6205 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6206 if (max_executable_size > 0) {
6207 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6210 // The max executable size must be less than or equal to the max old
6212 if (max_executable_size_ > max_old_generation_size_) {
6213 max_executable_size_ = max_old_generation_size_;
6216 // The new space size must be a power of two to support single-bit testing
6218 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6219 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6220 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6222 // The external allocation limit should be below 256 MB on all architectures
6223 // to avoid unnecessary low memory notifications, as that is the threshold
6224 // for some embedders.
6225 external_allocation_limit_ = 12 * max_semispace_size_;
6226 ASSERT(external_allocation_limit_ <= 256 * MB);
6228 // The old generation is paged and needs at least one page for each space.
6229 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6230 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6232 RoundUp(max_old_generation_size_,
6235 // We rely on being able to allocate new arrays in paged spaces.
6236 ASSERT(Page::kMaxRegularHeapObjectSize >=
6238 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6239 AllocationMemento::kSize));
6246 bool Heap::ConfigureHeapDefault() {
6247 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6248 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6249 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6253 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6254 *stats->start_marker = HeapStats::kStartMarker;
6255 *stats->end_marker = HeapStats::kEndMarker;
6256 *stats->new_space_size = new_space_.SizeAsInt();
6257 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6258 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6259 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6260 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6261 *stats->old_data_space_capacity = old_data_space_->Capacity();
6262 *stats->code_space_size = code_space_->SizeOfObjects();
6263 *stats->code_space_capacity = code_space_->Capacity();
6264 *stats->map_space_size = map_space_->SizeOfObjects();
6265 *stats->map_space_capacity = map_space_->Capacity();
6266 *stats->cell_space_size = cell_space_->SizeOfObjects();
6267 *stats->cell_space_capacity = cell_space_->Capacity();
6268 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6269 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6270 *stats->lo_space_size = lo_space_->Size();
6271 isolate_->global_handles()->RecordStats(stats);
6272 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6273 *stats->memory_allocator_capacity =
6274 isolate()->memory_allocator()->Size() +
6275 isolate()->memory_allocator()->Available();
6276 *stats->os_error = OS::GetLastError();
6277 isolate()->memory_allocator()->Available();
6278 if (take_snapshot) {
6279 HeapIterator iterator(this);
6280 for (HeapObject* obj = iterator.next();
6282 obj = iterator.next()) {
6283 InstanceType type = obj->map()->instance_type();
6284 ASSERT(0 <= type && type <= LAST_TYPE);
6285 stats->objects_per_type[type]++;
6286 stats->size_per_type[type] += obj->Size();
6292 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6293 return old_pointer_space_->SizeOfObjects()
6294 + old_data_space_->SizeOfObjects()
6295 + code_space_->SizeOfObjects()
6296 + map_space_->SizeOfObjects()
6297 + cell_space_->SizeOfObjects()
6298 + property_cell_space_->SizeOfObjects()
6299 + lo_space_->SizeOfObjects();
6303 bool Heap::AdvanceSweepers(int step_size) {
6304 ASSERT(isolate()->num_sweeper_threads() == 0);
6305 bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6306 sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6307 return sweeping_complete;
6311 int64_t Heap::PromotedExternalMemorySize() {
6312 if (amount_of_external_allocated_memory_
6313 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6314 return amount_of_external_allocated_memory_
6315 - amount_of_external_allocated_memory_at_last_global_gc_;
6319 void Heap::EnableInlineAllocation() {
6320 if (!inline_allocation_disabled_) return;
6321 inline_allocation_disabled_ = false;
6323 // Update inline allocation limit for new space.
6324 new_space()->UpdateInlineAllocationLimit(0);
6328 void Heap::DisableInlineAllocation() {
6329 if (inline_allocation_disabled_) return;
6330 inline_allocation_disabled_ = true;
6332 // Update inline allocation limit for new space.
6333 new_space()->UpdateInlineAllocationLimit(0);
6335 // Update inline allocation limit for old spaces.
6336 PagedSpaces spaces(this);
6337 for (PagedSpace* space = spaces.next();
6339 space = spaces.next()) {
6340 space->EmptyAllocationInfo();
6345 V8_DECLARE_ONCE(initialize_gc_once);
6347 static void InitializeGCOnce() {
6348 InitializeScavengingVisitorsTables();
6349 NewSpaceScavenger::Initialize();
6350 MarkCompactCollector::Initialize();
6354 bool Heap::SetUp() {
6356 allocation_timeout_ = FLAG_gc_interval;
6359 // Initialize heap spaces and initial maps and objects. Whenever something
6360 // goes wrong, just return false. The caller should check the results and
6361 // call Heap::TearDown() to release allocated memory.
6363 // If the heap is not yet configured (e.g. through the API), configure it.
6364 // Configuration is based on the flags new-space-size (really the semispace
6365 // size) and old-space-size if set or the initial values of semispace_size_
6366 // and old_generation_size_ otherwise.
6368 if (!ConfigureHeapDefault()) return false;
6371 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6373 MarkMapPointersAsEncoded(false);
6375 // Set up memory allocator.
6376 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6379 // Set up new space.
6380 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6384 // Initialize old pointer space.
6385 old_pointer_space_ =
6387 max_old_generation_size_,
6390 if (old_pointer_space_ == NULL) return false;
6391 if (!old_pointer_space_->SetUp()) return false;
6393 // Initialize old data space.
6396 max_old_generation_size_,
6399 if (old_data_space_ == NULL) return false;
6400 if (!old_data_space_->SetUp()) return false;
6402 // Initialize the code space, set its maximum capacity to the old
6403 // generation size. It needs executable memory.
6404 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6405 // virtual address space, so that they can call each other with near calls.
6406 if (code_range_size_ > 0) {
6407 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6413 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6414 if (code_space_ == NULL) return false;
6415 if (!code_space_->SetUp()) return false;
6417 // Initialize map space.
6418 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6419 if (map_space_ == NULL) return false;
6420 if (!map_space_->SetUp()) return false;
6422 // Initialize simple cell space.
6423 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6424 if (cell_space_ == NULL) return false;
6425 if (!cell_space_->SetUp()) return false;
6427 // Initialize global property cell space.
6428 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6429 PROPERTY_CELL_SPACE);
6430 if (property_cell_space_ == NULL) return false;
6431 if (!property_cell_space_->SetUp()) return false;
6433 // The large object code space may contain code or data. We set the memory
6434 // to be non-executable here for safety, but this means we need to enable it
6435 // explicitly when allocating large code objects.
6436 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6437 if (lo_space_ == NULL) return false;
6438 if (!lo_space_->SetUp()) return false;
6440 // Set up the seed that is used to randomize the string hash function.
6441 ASSERT(hash_seed() == 0);
6442 if (FLAG_randomize_hashes) {
6443 if (FLAG_hash_seed == 0) {
6444 int rnd = isolate()->random_number_generator()->NextInt();
6445 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6447 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6451 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6452 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6454 store_buffer()->SetUp();
6456 mark_compact_collector()->SetUp();
6458 if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6464 bool Heap::CreateHeapObjects() {
6465 // Create initial maps.
6466 if (!CreateInitialMaps()) return false;
6467 if (!CreateApiObjects()) return false;
6469 // Create initial objects
6470 if (!CreateInitialObjects()) return false;
6472 native_contexts_list_ = undefined_value();
6473 array_buffers_list_ = undefined_value();
6474 allocation_sites_list_ = undefined_value();
6475 weak_object_to_code_table_ = undefined_value();
6480 void Heap::SetStackLimits() {
6481 ASSERT(isolate_ != NULL);
6482 ASSERT(isolate_ == isolate());
6483 // On 64 bit machines, pointers are generally out of range of Smis. We write
6484 // something that looks like an out of range Smi to the GC.
6486 // Set up the special root array entries containing the stack limits.
6487 // These are actually addresses, but the tag makes the GC ignore it.
6488 roots_[kStackLimitRootIndex] =
6489 reinterpret_cast<Object*>(
6490 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6491 roots_[kRealStackLimitRootIndex] =
6492 reinterpret_cast<Object*>(
6493 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6497 void Heap::TearDown() {
6499 if (FLAG_verify_heap) {
6504 UpdateMaximumCommitted();
6506 if (FLAG_print_cumulative_gc_stat) {
6508 PrintF("gc_count=%d ", gc_count_);
6509 PrintF("mark_sweep_count=%d ", ms_count_);
6510 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6511 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6512 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6513 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6514 get_max_alive_after_gc());
6515 PrintF("total_marking_time=%.1f ", marking_time());
6516 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6520 if (FLAG_print_max_heap_committed) {
6522 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6523 MaximumCommittedMemory());
6524 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6525 new_space_.MaximumCommittedMemory());
6526 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6527 old_data_space_->MaximumCommittedMemory());
6528 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6529 old_pointer_space_->MaximumCommittedMemory());
6530 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6531 old_pointer_space_->MaximumCommittedMemory());
6532 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6533 code_space_->MaximumCommittedMemory());
6534 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6535 map_space_->MaximumCommittedMemory());
6536 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6537 cell_space_->MaximumCommittedMemory());
6538 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6539 property_cell_space_->MaximumCommittedMemory());
6540 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6541 lo_space_->MaximumCommittedMemory());
6545 TearDownArrayBuffers();
6547 isolate_->global_handles()->TearDown();
6549 external_string_table_.TearDown();
6551 mark_compact_collector()->TearDown();
6553 new_space_.TearDown();
6555 if (old_pointer_space_ != NULL) {
6556 old_pointer_space_->TearDown();
6557 delete old_pointer_space_;
6558 old_pointer_space_ = NULL;
6561 if (old_data_space_ != NULL) {
6562 old_data_space_->TearDown();
6563 delete old_data_space_;
6564 old_data_space_ = NULL;
6567 if (code_space_ != NULL) {
6568 code_space_->TearDown();
6573 if (map_space_ != NULL) {
6574 map_space_->TearDown();
6579 if (cell_space_ != NULL) {
6580 cell_space_->TearDown();
6585 if (property_cell_space_ != NULL) {
6586 property_cell_space_->TearDown();
6587 delete property_cell_space_;
6588 property_cell_space_ = NULL;
6591 if (lo_space_ != NULL) {
6592 lo_space_->TearDown();
6597 store_buffer()->TearDown();
6598 incremental_marking()->TearDown();
6600 isolate_->memory_allocator()->TearDown();
6602 delete relocation_mutex_;
6603 relocation_mutex_ = NULL;
6607 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6609 bool pass_isolate) {
6610 ASSERT(callback != NULL);
6611 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6612 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6613 return gc_prologue_callbacks_.Add(pair);
6617 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6618 ASSERT(callback != NULL);
6619 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6620 if (gc_prologue_callbacks_[i].callback == callback) {
6621 gc_prologue_callbacks_.Remove(i);
6629 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6631 bool pass_isolate) {
6632 ASSERT(callback != NULL);
6633 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6634 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6635 return gc_epilogue_callbacks_.Add(pair);
6639 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6640 ASSERT(callback != NULL);
6641 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6642 if (gc_epilogue_callbacks_[i].callback == callback) {
6643 gc_epilogue_callbacks_.Remove(i);
6651 MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6652 DependentCode* dep) {
6653 ASSERT(!InNewSpace(obj));
6654 ASSERT(!InNewSpace(dep));
6655 MaybeObject* maybe_obj =
6656 WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6657 WeakHashTable* table;
6658 if (!maybe_obj->To(&table)) return maybe_obj;
6659 if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6660 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6662 set_weak_object_to_code_table(table);
6663 ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6664 return weak_object_to_code_table_;
6668 DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6669 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6670 if (dep->IsDependentCode()) return DependentCode::cast(dep);
6671 return DependentCode::cast(empty_fixed_array());
6675 void Heap::EnsureWeakObjectToCodeTable() {
6676 if (!weak_object_to_code_table()->IsHashTable()) {
6677 set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6682 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
6683 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
6688 class PrintHandleVisitor: public ObjectVisitor {
6690 void VisitPointers(Object** start, Object** end) {
6691 for (Object** p = start; p < end; p++)
6692 PrintF(" handle %p to %p\n",
6693 reinterpret_cast<void*>(p),
6694 reinterpret_cast<void*>(*p));
6699 void Heap::PrintHandles() {
6700 PrintF("Handles:\n");
6701 PrintHandleVisitor v;
6702 isolate_->handle_scope_implementer()->Iterate(&v);
6708 Space* AllSpaces::next() {
6709 switch (counter_++) {
6711 return heap_->new_space();
6712 case OLD_POINTER_SPACE:
6713 return heap_->old_pointer_space();
6714 case OLD_DATA_SPACE:
6715 return heap_->old_data_space();
6717 return heap_->code_space();
6719 return heap_->map_space();
6721 return heap_->cell_space();
6722 case PROPERTY_CELL_SPACE:
6723 return heap_->property_cell_space();
6725 return heap_->lo_space();
6732 PagedSpace* PagedSpaces::next() {
6733 switch (counter_++) {
6734 case OLD_POINTER_SPACE:
6735 return heap_->old_pointer_space();
6736 case OLD_DATA_SPACE:
6737 return heap_->old_data_space();
6739 return heap_->code_space();
6741 return heap_->map_space();
6743 return heap_->cell_space();
6744 case PROPERTY_CELL_SPACE:
6745 return heap_->property_cell_space();
6753 OldSpace* OldSpaces::next() {
6754 switch (counter_++) {
6755 case OLD_POINTER_SPACE:
6756 return heap_->old_pointer_space();
6757 case OLD_DATA_SPACE:
6758 return heap_->old_data_space();
6760 return heap_->code_space();
6767 SpaceIterator::SpaceIterator(Heap* heap)
6769 current_space_(FIRST_SPACE),
6775 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6777 current_space_(FIRST_SPACE),
6779 size_func_(size_func) {
6783 SpaceIterator::~SpaceIterator() {
6784 // Delete active iterator if any.
6789 bool SpaceIterator::has_next() {
6790 // Iterate until no more spaces.
6791 return current_space_ != LAST_SPACE;
6795 ObjectIterator* SpaceIterator::next() {
6796 if (iterator_ != NULL) {
6799 // Move to the next space
6801 if (current_space_ > LAST_SPACE) {
6806 // Return iterator for the new current space.
6807 return CreateIterator();
6811 // Create an iterator for the space to iterate.
6812 ObjectIterator* SpaceIterator::CreateIterator() {
6813 ASSERT(iterator_ == NULL);
6815 switch (current_space_) {
6817 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6819 case OLD_POINTER_SPACE:
6821 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6823 case OLD_DATA_SPACE:
6824 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6827 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6830 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6833 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6835 case PROPERTY_CELL_SPACE:
6836 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
6840 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6844 // Return the newly allocated iterator;
6845 ASSERT(iterator_ != NULL);
6850 class HeapObjectsFilter {
6852 virtual ~HeapObjectsFilter() {}
6853 virtual bool SkipObject(HeapObject* object) = 0;
6857 class UnreachableObjectsFilter : public HeapObjectsFilter {
6859 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6860 MarkReachableObjects();
6863 ~UnreachableObjectsFilter() {
6864 heap_->mark_compact_collector()->ClearMarkbits();
6867 bool SkipObject(HeapObject* object) {
6868 MarkBit mark_bit = Marking::MarkBitFrom(object);
6869 return !mark_bit.Get();
6873 class MarkingVisitor : public ObjectVisitor {
6875 MarkingVisitor() : marking_stack_(10) {}
6877 void VisitPointers(Object** start, Object** end) {
6878 for (Object** p = start; p < end; p++) {
6879 if (!(*p)->IsHeapObject()) continue;
6880 HeapObject* obj = HeapObject::cast(*p);
6881 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6882 if (!mark_bit.Get()) {
6884 marking_stack_.Add(obj);
6889 void TransitiveClosure() {
6890 while (!marking_stack_.is_empty()) {
6891 HeapObject* obj = marking_stack_.RemoveLast();
6897 List<HeapObject*> marking_stack_;
6900 void MarkReachableObjects() {
6901 MarkingVisitor visitor;
6902 heap_->IterateRoots(&visitor, VISIT_ALL);
6903 visitor.TransitiveClosure();
6907 DisallowHeapAllocation no_allocation_;
6911 HeapIterator::HeapIterator(Heap* heap)
6913 filtering_(HeapIterator::kNoFiltering),
6919 HeapIterator::HeapIterator(Heap* heap,
6920 HeapIterator::HeapObjectsFiltering filtering)
6922 filtering_(filtering),
6928 HeapIterator::~HeapIterator() {
6933 void HeapIterator::Init() {
6934 // Start the iteration.
6935 space_iterator_ = new SpaceIterator(heap_);
6936 switch (filtering_) {
6937 case kFilterUnreachable:
6938 filter_ = new UnreachableObjectsFilter(heap_);
6943 object_iterator_ = space_iterator_->next();
6947 void HeapIterator::Shutdown() {
6949 // Assert that in filtering mode we have iterated through all
6950 // objects. Otherwise, heap will be left in an inconsistent state.
6951 if (filtering_ != kNoFiltering) {
6952 ASSERT(object_iterator_ == NULL);
6955 // Make sure the last iterator is deallocated.
6956 delete space_iterator_;
6957 space_iterator_ = NULL;
6958 object_iterator_ = NULL;
6964 HeapObject* HeapIterator::next() {
6965 if (filter_ == NULL) return NextObject();
6967 HeapObject* obj = NextObject();
6968 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6973 HeapObject* HeapIterator::NextObject() {
6974 // No iterator means we are done.
6975 if (object_iterator_ == NULL) return NULL;
6977 if (HeapObject* obj = object_iterator_->next_object()) {
6978 // If the current iterator has more objects we are fine.
6981 // Go though the spaces looking for one that has objects.
6982 while (space_iterator_->has_next()) {
6983 object_iterator_ = space_iterator_->next();
6984 if (HeapObject* obj = object_iterator_->next_object()) {
6989 // Done with the last space.
6990 object_iterator_ = NULL;
6995 void HeapIterator::reset() {
6996 // Restart the iterator.
7004 Object* const PathTracer::kAnyGlobalObject = NULL;
7006 class PathTracer::MarkVisitor: public ObjectVisitor {
7008 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7009 void VisitPointers(Object** start, Object** end) {
7010 // Scan all HeapObject pointers in [start, end)
7011 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7012 if ((*p)->IsHeapObject())
7013 tracer_->MarkRecursively(p, this);
7018 PathTracer* tracer_;
7022 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7024 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7025 void VisitPointers(Object** start, Object** end) {
7026 // Scan all HeapObject pointers in [start, end)
7027 for (Object** p = start; p < end; p++) {
7028 if ((*p)->IsHeapObject())
7029 tracer_->UnmarkRecursively(p, this);
7034 PathTracer* tracer_;
7038 void PathTracer::VisitPointers(Object** start, Object** end) {
7039 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7040 // Visit all HeapObject pointers in [start, end)
7041 for (Object** p = start; !done && (p < end); p++) {
7042 if ((*p)->IsHeapObject()) {
7044 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7050 void PathTracer::Reset() {
7051 found_target_ = false;
7052 object_stack_.Clear();
7056 void PathTracer::TracePathFrom(Object** root) {
7057 ASSERT((search_target_ == kAnyGlobalObject) ||
7058 search_target_->IsHeapObject());
7059 found_target_in_trace_ = false;
7062 MarkVisitor mark_visitor(this);
7063 MarkRecursively(root, &mark_visitor);
7065 UnmarkVisitor unmark_visitor(this);
7066 UnmarkRecursively(root, &unmark_visitor);
7072 static bool SafeIsNativeContext(HeapObject* obj) {
7073 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7077 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7078 if (!(*p)->IsHeapObject()) return;
7080 HeapObject* obj = HeapObject::cast(*p);
7082 Object* map = obj->map();
7084 if (!map->IsHeapObject()) return; // visited before
7086 if (found_target_in_trace_) return; // stop if target found
7087 object_stack_.Add(obj);
7088 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7089 (obj == search_target_)) {
7090 found_target_in_trace_ = true;
7091 found_target_ = true;
7095 bool is_native_context = SafeIsNativeContext(obj);
7098 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7100 Address map_addr = map_p->address();
7102 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7104 // Scan the object body.
7105 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7106 // This is specialized to scan Context's properly.
7107 Object** start = reinterpret_cast<Object**>(obj->address() +
7108 Context::kHeaderSize);
7109 Object** end = reinterpret_cast<Object**>(obj->address() +
7110 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7111 mark_visitor->VisitPointers(start, end);
7113 obj->IterateBody(map_p->instance_type(),
7114 obj->SizeFromMap(map_p),
7118 // Scan the map after the body because the body is a lot more interesting
7119 // when doing leak detection.
7120 MarkRecursively(&map, mark_visitor);
7122 if (!found_target_in_trace_) // don't pop if found the target
7123 object_stack_.RemoveLast();
7127 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7128 if (!(*p)->IsHeapObject()) return;
7130 HeapObject* obj = HeapObject::cast(*p);
7132 Object* map = obj->map();
7134 if (map->IsHeapObject()) return; // unmarked already
7136 Address map_addr = reinterpret_cast<Address>(map);
7138 map_addr -= kMarkTag;
7140 ASSERT_TAG_ALIGNED(map_addr);
7142 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7144 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7146 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7148 obj->IterateBody(Map::cast(map_p)->instance_type(),
7149 obj->SizeFromMap(Map::cast(map_p)),
7154 void PathTracer::ProcessResults() {
7155 if (found_target_) {
7156 PrintF("=====================================\n");
7157 PrintF("==== Path to object ====\n");
7158 PrintF("=====================================\n\n");
7160 ASSERT(!object_stack_.is_empty());
7161 for (int i = 0; i < object_stack_.length(); i++) {
7162 if (i > 0) PrintF("\n |\n |\n V\n\n");
7163 Object* obj = object_stack_[i];
7166 PrintF("=====================================\n");
7171 // Triggers a depth-first traversal of reachable objects from one
7172 // given root object and finds a path to a specific heap object and
7174 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7175 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7176 tracer.VisitPointer(&root);
7180 // Triggers a depth-first traversal of reachable objects from roots
7181 // and finds a path to a specific heap object and prints it.
7182 void Heap::TracePathToObject(Object* target) {
7183 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7184 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7188 // Triggers a depth-first traversal of reachable objects from roots
7189 // and finds a path to any global object and prints it. Useful for
7190 // determining the source for leaks of global objects.
7191 void Heap::TracePathToGlobal() {
7192 PathTracer tracer(PathTracer::kAnyGlobalObject,
7193 PathTracer::FIND_ALL,
7195 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7200 static intptr_t CountTotalHolesSize(Heap* heap) {
7201 intptr_t holes_size = 0;
7202 OldSpaces spaces(heap);
7203 for (OldSpace* space = spaces.next();
7205 space = spaces.next()) {
7206 holes_size += space->Waste() + space->Available();
7212 GCTracer::GCTracer(Heap* heap,
7213 const char* gc_reason,
7214 const char* collector_reason)
7216 start_object_size_(0),
7217 start_memory_size_(0),
7220 allocated_since_last_gc_(0),
7221 spent_in_mutator_(0),
7222 promoted_objects_size_(0),
7223 nodes_died_in_new_space_(0),
7224 nodes_copied_in_new_space_(0),
7227 gc_reason_(gc_reason),
7228 collector_reason_(collector_reason) {
7229 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7230 start_time_ = OS::TimeCurrentMillis();
7231 start_object_size_ = heap_->SizeOfObjects();
7232 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7234 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7238 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7240 allocated_since_last_gc_ =
7241 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7243 if (heap_->last_gc_end_timestamp_ > 0) {
7244 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7247 steps_count_ = heap_->incremental_marking()->steps_count();
7248 steps_took_ = heap_->incremental_marking()->steps_took();
7249 longest_step_ = heap_->incremental_marking()->longest_step();
7250 steps_count_since_last_gc_ =
7251 heap_->incremental_marking()->steps_count_since_last_gc();
7252 steps_took_since_last_gc_ =
7253 heap_->incremental_marking()->steps_took_since_last_gc();
7257 GCTracer::~GCTracer() {
7258 // Printf ONE line iff flag is set.
7259 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7261 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7263 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7264 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7266 double time = heap_->last_gc_end_timestamp_ - start_time_;
7268 // Update cumulative GC statistics if required.
7269 if (FLAG_print_cumulative_gc_stat) {
7270 heap_->total_gc_time_ms_ += time;
7271 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7272 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7273 heap_->alive_after_last_gc_);
7275 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7278 } else if (FLAG_trace_gc_verbose) {
7279 heap_->total_gc_time_ms_ += time;
7282 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7284 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7286 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7287 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7289 if (!FLAG_trace_gc_nvp) {
7290 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7292 double end_memory_size_mb =
7293 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7295 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7297 static_cast<double>(start_object_size_) / MB,
7298 static_cast<double>(start_memory_size_) / MB,
7299 SizeOfHeapObjects(),
7300 end_memory_size_mb);
7302 if (external_time > 0) PrintF("%d / ", external_time);
7303 PrintF("%.1f ms", time);
7304 if (steps_count_ > 0) {
7305 if (collector_ == SCAVENGER) {
7306 PrintF(" (+ %.1f ms in %d steps since last GC)",
7307 steps_took_since_last_gc_,
7308 steps_count_since_last_gc_);
7310 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7311 "biggest step %.1f ms)",
7318 if (gc_reason_ != NULL) {
7319 PrintF(" [%s]", gc_reason_);
7322 if (collector_reason_ != NULL) {
7323 PrintF(" [%s]", collector_reason_);
7328 PrintF("pause=%.1f ", time);
7329 PrintF("mutator=%.1f ", spent_in_mutator_);
7331 switch (collector_) {
7335 case MARK_COMPACTOR:
7343 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7344 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7345 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7346 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7347 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7348 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7349 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7350 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7351 PrintF("compaction_ptrs=%.1f ",
7352 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7353 PrintF("intracompaction_ptrs=%.1f ",
7354 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7355 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7356 PrintF("weakcollection_process=%.1f ",
7357 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7358 PrintF("weakcollection_clear=%.1f ",
7359 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7361 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7362 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7363 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7364 in_free_list_or_wasted_before_gc_);
7365 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7367 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7368 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7369 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7370 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7371 PrintF("nodes_promoted=%d ", nodes_promoted_);
7373 if (collector_ == SCAVENGER) {
7374 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7375 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7377 PrintF("stepscount=%d ", steps_count_);
7378 PrintF("stepstook=%.1f ", steps_took_);
7379 PrintF("longeststep=%.1f ", longest_step_);
7385 heap_->PrintShortHeapStatistics();
7389 const char* GCTracer::CollectorString() {
7390 switch (collector_) {
7393 case MARK_COMPACTOR:
7394 return "Mark-sweep";
7396 return "Unknown GC";
7400 int KeyedLookupCache::Hash(Map* map, Name* name) {
7401 // Uses only lower 32 bits if pointers are larger.
7402 uintptr_t addr_hash =
7403 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7404 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7408 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7409 int index = (Hash(map, name) & kHashMask);
7410 for (int i = 0; i < kEntriesPerBucket; i++) {
7411 Key& key = keys_[index + i];
7412 if ((key.map == map) && key.name->Equals(name)) {
7413 return field_offsets_[index + i];
7420 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7421 if (!name->IsUniqueName()) {
7422 String* internalized_string;
7423 if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7424 String::cast(name), &internalized_string)) {
7427 name = internalized_string;
7429 // This cache is cleared only between mark compact passes, so we expect the
7430 // cache to only contain old space names.
7431 ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7433 int index = (Hash(map, name) & kHashMask);
7434 // After a GC there will be free slots, so we use them in order (this may
7435 // help to get the most frequently used one in position 0).
7436 for (int i = 0; i< kEntriesPerBucket; i++) {
7437 Key& key = keys_[index];
7438 Object* free_entry_indicator = NULL;
7439 if (key.map == free_entry_indicator) {
7442 field_offsets_[index + i] = field_offset;
7446 // No free entry found in this bucket, so we move them all down one and
7447 // put the new entry at position zero.
7448 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7449 Key& key = keys_[index + i];
7450 Key& key2 = keys_[index + i - 1];
7452 field_offsets_[index + i] = field_offsets_[index + i - 1];
7455 // Write the new first entry.
7456 Key& key = keys_[index];
7459 field_offsets_[index] = field_offset;
7463 void KeyedLookupCache::Clear() {
7464 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7468 void DescriptorLookupCache::Clear() {
7469 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7474 void Heap::GarbageCollectionGreedyCheck() {
7475 ASSERT(FLAG_gc_greedy);
7476 if (isolate_->bootstrapper()->IsActive()) return;
7477 if (disallow_allocation_failure()) return;
7478 CollectGarbage(NEW_SPACE);
7483 void ExternalStringTable::CleanUp() {
7485 for (int i = 0; i < new_space_strings_.length(); ++i) {
7486 if (new_space_strings_[i] == heap_->the_hole_value()) {
7489 ASSERT(new_space_strings_[i]->IsExternalString());
7490 if (heap_->InNewSpace(new_space_strings_[i])) {
7491 new_space_strings_[last++] = new_space_strings_[i];
7493 old_space_strings_.Add(new_space_strings_[i]);
7496 new_space_strings_.Rewind(last);
7497 new_space_strings_.Trim();
7500 for (int i = 0; i < old_space_strings_.length(); ++i) {
7501 if (old_space_strings_[i] == heap_->the_hole_value()) {
7504 ASSERT(old_space_strings_[i]->IsExternalString());
7505 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7506 old_space_strings_[last++] = old_space_strings_[i];
7508 old_space_strings_.Rewind(last);
7509 old_space_strings_.Trim();
7511 if (FLAG_verify_heap) {
7518 void ExternalStringTable::TearDown() {
7519 for (int i = 0; i < new_space_strings_.length(); ++i) {
7520 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7522 new_space_strings_.Free();
7523 for (int i = 0; i < old_space_strings_.length(); ++i) {
7524 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7526 old_space_strings_.Free();
7530 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7531 chunk->set_next_chunk(chunks_queued_for_free_);
7532 chunks_queued_for_free_ = chunk;
7536 void Heap::FreeQueuedChunks() {
7537 if (chunks_queued_for_free_ == NULL) return;
7540 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7541 next = chunk->next_chunk();
7542 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7544 if (chunk->owner()->identity() == LO_SPACE) {
7545 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7546 // If FromAnyPointerAddress encounters a slot that belongs to a large
7547 // chunk queued for deletion it will fail to find the chunk because
7548 // it try to perform a search in the list of pages owned by of the large
7549 // object space and queued chunks were detached from that list.
7550 // To work around this we split large chunk into normal kPageSize aligned
7551 // pieces and initialize size, owner and flags field of every piece.
7552 // If FromAnyPointerAddress encounters a slot that belongs to one of
7553 // these smaller pieces it will treat it as a slot on a normal Page.
7554 Address chunk_end = chunk->address() + chunk->size();
7555 MemoryChunk* inner = MemoryChunk::FromAddress(
7556 chunk->address() + Page::kPageSize);
7557 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7558 while (inner <= inner_last) {
7559 // Size of a large chunk is always a multiple of
7560 // OS::AllocateAlignment() so there is always
7561 // enough space for a fake MemoryChunk header.
7562 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7563 // Guard against overflow.
7564 if (area_end < inner->address()) area_end = chunk_end;
7565 inner->SetArea(inner->address(), area_end);
7566 inner->set_size(Page::kPageSize);
7567 inner->set_owner(lo_space());
7568 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7569 inner = MemoryChunk::FromAddress(
7570 inner->address() + Page::kPageSize);
7574 isolate_->heap()->store_buffer()->Compact();
7575 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7576 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7577 next = chunk->next_chunk();
7578 isolate_->memory_allocator()->Free(chunk);
7580 chunks_queued_for_free_ = NULL;
7584 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7585 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7586 // Tag the page pointer to make it findable in the dump file.
7588 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7590 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7592 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7593 reinterpret_cast<Address>(p);
7594 remembered_unmapped_pages_index_++;
7595 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7599 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7600 memset(object_counts_, 0, sizeof(object_counts_));
7601 memset(object_sizes_, 0, sizeof(object_sizes_));
7602 if (clear_last_time_stats) {
7603 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7604 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7609 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7612 void Heap::CheckpointObjectStats() {
7613 LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7614 Counters* counters = isolate()->counters();
7615 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7616 counters->count_of_##name()->Increment( \
7617 static_cast<int>(object_counts_[name])); \
7618 counters->count_of_##name()->Decrement( \
7619 static_cast<int>(object_counts_last_time_[name])); \
7620 counters->size_of_##name()->Increment( \
7621 static_cast<int>(object_sizes_[name])); \
7622 counters->size_of_##name()->Decrement( \
7623 static_cast<int>(object_sizes_last_time_[name]));
7624 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7625 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7627 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7628 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7629 counters->count_of_CODE_TYPE_##name()->Increment( \
7630 static_cast<int>(object_counts_[index])); \
7631 counters->count_of_CODE_TYPE_##name()->Decrement( \
7632 static_cast<int>(object_counts_last_time_[index])); \
7633 counters->size_of_CODE_TYPE_##name()->Increment( \
7634 static_cast<int>(object_sizes_[index])); \
7635 counters->size_of_CODE_TYPE_##name()->Decrement( \
7636 static_cast<int>(object_sizes_last_time_[index]));
7637 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7638 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7639 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7640 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7641 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7642 static_cast<int>(object_counts_[index])); \
7643 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7644 static_cast<int>(object_counts_last_time_[index])); \
7645 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7646 static_cast<int>(object_sizes_[index])); \
7647 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7648 static_cast<int>(object_sizes_last_time_[index]));
7649 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7650 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7651 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7653 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7654 counters->count_of_CODE_AGE_##name()->Increment( \
7655 static_cast<int>(object_counts_[index])); \
7656 counters->count_of_CODE_AGE_##name()->Decrement( \
7657 static_cast<int>(object_counts_last_time_[index])); \
7658 counters->size_of_CODE_AGE_##name()->Increment( \
7659 static_cast<int>(object_sizes_[index])); \
7660 counters->size_of_CODE_AGE_##name()->Decrement( \
7661 static_cast<int>(object_sizes_last_time_[index]));
7662 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7663 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7665 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7666 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7670 } } // namespace v8::internal