1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
50 #include "store-buffer.h"
51 #include "utils/random-number-generator.h"
52 #include "v8conversions.h"
53 #include "v8threads.h"
55 #include "vm-state-inl.h"
56 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "arm/regexp-macro-assembler-arm.h"
60 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
61 #include "regexp-macro-assembler.h"
62 #include "mips/regexp-macro-assembler-mips.h"
71 code_range_size_(kIs64BitArch ? 512 * MB : 0),
72 // semispace_size_ should be a power of 2 and old_generation_size_ should be
73 // a multiple of Page::kPageSize.
74 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
75 max_semispace_size_(8 * (kPointerSize / 4) * MB),
76 initial_semispace_size_(Page::kPageSize),
77 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
78 max_executable_size_(256ul * (kPointerSize / 4) * MB),
79 // Variables set based on semispace_size_ and old_generation_size_ in
80 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
81 // Will be 4 * reserved_semispace_size_ to ensure that young
82 // generation can be aligned to its size.
83 maximum_committed_(0),
84 survived_since_last_expansion_(0),
86 always_allocate_scope_depth_(0),
87 linear_allocation_scope_depth_(0),
88 contexts_disposed_(0),
90 flush_monomorphic_ics_(false),
91 scan_on_scavenge_pages_(0),
93 old_pointer_space_(NULL),
94 old_data_space_(NULL),
98 property_cell_space_(NULL),
100 gc_state_(NOT_IN_GC),
101 gc_post_processing_depth_(0),
104 remembered_unmapped_pages_index_(0),
105 unflattened_strings_length_(0),
107 allocation_timeout_(0),
108 disallow_allocation_failure_(false),
110 new_space_high_promotion_mode_active_(false),
111 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
112 size_of_old_gen_at_last_old_space_gc_(0),
113 external_allocation_limit_(0),
114 amount_of_external_allocated_memory_(0),
115 amount_of_external_allocated_memory_at_last_global_gc_(0),
116 old_gen_exhausted_(false),
117 inline_allocation_disabled_(false),
118 store_buffer_rebuilder_(store_buffer()),
119 hidden_string_(NULL),
120 gc_safe_size_of_old_object_(NULL),
121 total_regexp_code_generated_(0),
123 young_survivors_after_last_gc_(0),
124 high_survival_rate_period_length_(0),
125 low_survival_rate_period_length_(0),
127 previous_survival_rate_trend_(Heap::STABLE),
128 survival_rate_trend_(Heap::STABLE),
130 total_gc_time_ms_(0.0),
131 max_alive_after_gc_(0),
132 min_in_mutator_(kMaxInt),
133 alive_after_last_gc_(0),
134 last_gc_end_timestamp_(0.0),
137 mark_compact_collector_(this),
140 incremental_marking_(this),
141 number_idle_notifications_(0),
142 last_idle_notification_gc_count_(0),
143 last_idle_notification_gc_count_init_(false),
144 mark_sweeps_since_idle_round_started_(0),
145 gc_count_at_last_idle_gc_(0),
146 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
147 full_codegen_bytes_generated_(0),
148 crankshaft_codegen_bytes_generated_(0),
149 gcs_since_last_deopt_(0),
151 no_weak_object_verification_scope_depth_(0),
153 allocation_sites_scratchpad_length_(0),
154 promotion_queue_(this),
156 external_string_table_(this),
157 chunks_queued_for_free_(NULL),
158 relocation_mutex_(NULL) {
159 // Allow build-time customization of the max semispace size. Building
160 // V8 with snapshots and a non-default max semispace size is much
161 // easier if you can define it as part of the build environment.
162 #if defined(V8_MAX_SEMISPACE_SIZE)
163 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
166 // Ensure old_generation_size_ is a multiple of kPageSize.
167 ASSERT(MB >= Page::kPageSize);
169 intptr_t max_virtual = OS::MaxVirtualMemory();
171 if (max_virtual > 0) {
172 if (code_range_size_ > 0) {
173 // Reserve no more than 1/8 of the memory for the code range.
174 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
178 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
179 native_contexts_list_ = NULL;
180 array_buffers_list_ = Smi::FromInt(0);
181 allocation_sites_list_ = Smi::FromInt(0);
182 // Put a dummy entry in the remembered pages so we can find the list the
183 // minidump even if there are no real unmapped pages.
184 RememberUnmappedPage(NULL, false);
186 ClearObjectStats(true);
190 intptr_t Heap::Capacity() {
191 if (!HasBeenSetUp()) return 0;
193 return new_space_.Capacity() +
194 old_pointer_space_->Capacity() +
195 old_data_space_->Capacity() +
196 code_space_->Capacity() +
197 map_space_->Capacity() +
198 cell_space_->Capacity() +
199 property_cell_space_->Capacity();
203 intptr_t Heap::CommittedMemory() {
204 if (!HasBeenSetUp()) return 0;
206 return new_space_.CommittedMemory() +
207 old_pointer_space_->CommittedMemory() +
208 old_data_space_->CommittedMemory() +
209 code_space_->CommittedMemory() +
210 map_space_->CommittedMemory() +
211 cell_space_->CommittedMemory() +
212 property_cell_space_->CommittedMemory() +
217 size_t Heap::CommittedPhysicalMemory() {
218 if (!HasBeenSetUp()) return 0;
220 return new_space_.CommittedPhysicalMemory() +
221 old_pointer_space_->CommittedPhysicalMemory() +
222 old_data_space_->CommittedPhysicalMemory() +
223 code_space_->CommittedPhysicalMemory() +
224 map_space_->CommittedPhysicalMemory() +
225 cell_space_->CommittedPhysicalMemory() +
226 property_cell_space_->CommittedPhysicalMemory() +
227 lo_space_->CommittedPhysicalMemory();
231 intptr_t Heap::CommittedMemoryExecutable() {
232 if (!HasBeenSetUp()) return 0;
234 return isolate()->memory_allocator()->SizeExecutable();
238 void Heap::UpdateMaximumCommitted() {
239 if (!HasBeenSetUp()) return;
241 intptr_t current_committed_memory = CommittedMemory();
242 if (current_committed_memory > maximum_committed_) {
243 maximum_committed_ = current_committed_memory;
248 intptr_t Heap::Available() {
249 if (!HasBeenSetUp()) return 0;
251 return new_space_.Available() +
252 old_pointer_space_->Available() +
253 old_data_space_->Available() +
254 code_space_->Available() +
255 map_space_->Available() +
256 cell_space_->Available() +
257 property_cell_space_->Available();
261 bool Heap::HasBeenSetUp() {
262 return old_pointer_space_ != NULL &&
263 old_data_space_ != NULL &&
264 code_space_ != NULL &&
265 map_space_ != NULL &&
266 cell_space_ != NULL &&
267 property_cell_space_ != NULL &&
272 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
273 if (IntrusiveMarking::IsMarked(object)) {
274 return IntrusiveMarking::SizeOfMarkedObject(object);
276 return object->SizeFromMap(object->map());
280 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
281 const char** reason) {
282 // Is global GC requested?
283 if (space != NEW_SPACE) {
284 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
285 *reason = "GC in old space requested";
286 return MARK_COMPACTOR;
289 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
290 *reason = "GC in old space forced by flags";
291 return MARK_COMPACTOR;
294 // Is enough data promoted to justify a global GC?
295 if (OldGenerationAllocationLimitReached()) {
296 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
297 *reason = "promotion limit reached";
298 return MARK_COMPACTOR;
301 // Have allocation in OLD and LO failed?
302 if (old_gen_exhausted_) {
303 isolate_->counters()->
304 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
305 *reason = "old generations exhausted";
306 return MARK_COMPACTOR;
309 // Is there enough space left in OLD to guarantee that a scavenge can
312 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
313 // for object promotion. It counts only the bytes that the memory
314 // allocator has not yet allocated from the OS and assigned to any space,
315 // and does not count available bytes already in the old space or code
316 // space. Undercounting is safe---we may get an unrequested full GC when
317 // a scavenge would have succeeded.
318 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
319 isolate_->counters()->
320 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
321 *reason = "scavenge might not succeed";
322 return MARK_COMPACTOR;
331 // TODO(1238405): Combine the infrastructure for --heap-stats and
332 // --log-gc to avoid the complicated preprocessor and flag testing.
333 void Heap::ReportStatisticsBeforeGC() {
334 // Heap::ReportHeapStatistics will also log NewSpace statistics when
335 // compiled --log-gc is set. The following logic is used to avoid
338 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
339 if (FLAG_heap_stats) {
340 ReportHeapStatistics("Before GC");
341 } else if (FLAG_log_gc) {
342 new_space_.ReportStatistics();
344 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
347 new_space_.CollectStatistics();
348 new_space_.ReportStatistics();
349 new_space_.ClearHistograms();
355 void Heap::PrintShortHeapStatistics() {
356 if (!FLAG_trace_gc_verbose) return;
357 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB\n",
359 isolate_->memory_allocator()->Size() / KB,
360 isolate_->memory_allocator()->Available() / KB);
361 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
362 ", available: %6" V8_PTR_PREFIX "d KB"
363 ", committed: %6" V8_PTR_PREFIX "d KB\n",
364 new_space_.Size() / KB,
365 new_space_.Available() / KB,
366 new_space_.CommittedMemory() / KB);
367 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
368 ", available: %6" V8_PTR_PREFIX "d KB"
369 ", committed: %6" V8_PTR_PREFIX "d KB\n",
370 old_pointer_space_->SizeOfObjects() / KB,
371 old_pointer_space_->Available() / KB,
372 old_pointer_space_->CommittedMemory() / KB);
373 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
374 ", available: %6" V8_PTR_PREFIX "d KB"
375 ", committed: %6" V8_PTR_PREFIX "d KB\n",
376 old_data_space_->SizeOfObjects() / KB,
377 old_data_space_->Available() / KB,
378 old_data_space_->CommittedMemory() / KB);
379 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
380 ", available: %6" V8_PTR_PREFIX "d KB"
381 ", committed: %6" V8_PTR_PREFIX "d KB\n",
382 code_space_->SizeOfObjects() / KB,
383 code_space_->Available() / KB,
384 code_space_->CommittedMemory() / KB);
385 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
386 ", available: %6" V8_PTR_PREFIX "d KB"
387 ", committed: %6" V8_PTR_PREFIX "d KB\n",
388 map_space_->SizeOfObjects() / KB,
389 map_space_->Available() / KB,
390 map_space_->CommittedMemory() / KB);
391 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
392 ", available: %6" V8_PTR_PREFIX "d KB"
393 ", committed: %6" V8_PTR_PREFIX "d KB\n",
394 cell_space_->SizeOfObjects() / KB,
395 cell_space_->Available() / KB,
396 cell_space_->CommittedMemory() / KB);
397 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
398 ", available: %6" V8_PTR_PREFIX "d KB"
399 ", committed: %6" V8_PTR_PREFIX "d KB\n",
400 property_cell_space_->SizeOfObjects() / KB,
401 property_cell_space_->Available() / KB,
402 property_cell_space_->CommittedMemory() / KB);
403 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
404 ", available: %6" V8_PTR_PREFIX "d KB"
405 ", committed: %6" V8_PTR_PREFIX "d KB\n",
406 lo_space_->SizeOfObjects() / KB,
407 lo_space_->Available() / KB,
408 lo_space_->CommittedMemory() / KB);
409 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
410 ", available: %6" V8_PTR_PREFIX "d KB"
411 ", committed: %6" V8_PTR_PREFIX "d KB\n",
412 this->SizeOfObjects() / KB,
413 this->Available() / KB,
414 this->CommittedMemory() / KB);
415 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
416 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
417 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
421 // TODO(1238405): Combine the infrastructure for --heap-stats and
422 // --log-gc to avoid the complicated preprocessor and flag testing.
423 void Heap::ReportStatisticsAfterGC() {
424 // Similar to the before GC, we use some complicated logic to ensure that
425 // NewSpace statistics are logged exactly once when --log-gc is turned on.
427 if (FLAG_heap_stats) {
428 new_space_.CollectStatistics();
429 ReportHeapStatistics("After GC");
430 } else if (FLAG_log_gc) {
431 new_space_.ReportStatistics();
434 if (FLAG_log_gc) new_space_.ReportStatistics();
439 void Heap::GarbageCollectionPrologue() {
440 { AllowHeapAllocation for_the_first_part_of_prologue;
441 ClearJSFunctionResultCaches();
443 unflattened_strings_length_ = 0;
445 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
446 mark_compact_collector()->EnableCodeFlushing(true);
450 if (FLAG_verify_heap) {
456 UpdateMaximumCommitted();
459 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
461 if (FLAG_gc_verbose) Print();
463 ReportStatisticsBeforeGC();
466 store_buffer()->GCPrologue();
468 if (isolate()->concurrent_osr_enabled()) {
469 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
474 intptr_t Heap::SizeOfObjects() {
476 AllSpaces spaces(this);
477 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
478 total += space->SizeOfObjects();
484 void Heap::ClearAllICsByKind(Code::Kind kind) {
485 HeapObjectIterator it(code_space());
487 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
488 Code* code = Code::cast(object);
489 Code::Kind current_kind = code->kind();
490 if (current_kind == Code::FUNCTION ||
491 current_kind == Code::OPTIMIZED_FUNCTION) {
492 code->ClearInlineCaches(kind);
498 void Heap::RepairFreeListsAfterBoot() {
499 PagedSpaces spaces(this);
500 for (PagedSpace* space = spaces.next();
502 space = spaces.next()) {
503 space->RepairFreeListsAfterBoot();
508 void Heap::ProcessPretenuringFeedback() {
509 if (FLAG_allocation_site_pretenuring) {
510 int tenure_decisions = 0;
511 int dont_tenure_decisions = 0;
512 int allocation_mementos_found = 0;
513 int allocation_sites = 0;
514 int active_allocation_sites = 0;
516 // If the scratchpad overflowed, we have to iterate over the allocation
518 bool use_scratchpad =
519 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize;
522 Object* list_element = allocation_sites_list();
523 bool trigger_deoptimization = false;
524 while (use_scratchpad ?
525 i < allocation_sites_scratchpad_length_ :
526 list_element->IsAllocationSite()) {
527 AllocationSite* site = use_scratchpad ?
528 AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
529 AllocationSite::cast(list_element);
530 allocation_mementos_found += site->memento_found_count();
531 if (site->memento_found_count() > 0) {
532 active_allocation_sites++;
534 if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
535 if (site->GetPretenureMode() == TENURED) {
538 dont_tenure_decisions++;
541 if (use_scratchpad) {
544 list_element = site->weak_next();
548 if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
550 FlushAllocationSitesScratchpad();
552 if (FLAG_trace_pretenuring_statistics &&
553 (allocation_mementos_found > 0 ||
554 tenure_decisions > 0 ||
555 dont_tenure_decisions > 0)) {
556 PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
557 "#mementos, #tenure decisions, #donttenure decisions) "
558 "(%s, %d, %d, %d, %d, %d)\n",
559 use_scratchpad ? "use scratchpad" : "use list",
561 active_allocation_sites,
562 allocation_mementos_found,
564 dont_tenure_decisions);
570 void Heap::GarbageCollectionEpilogue() {
571 store_buffer()->GCEpilogue();
573 // In release mode, we only zap the from space under heap verification.
574 if (Heap::ShouldZapGarbage()) {
579 if (FLAG_verify_heap) {
584 AllowHeapAllocation for_the_rest_of_the_epilogue;
587 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
588 if (FLAG_print_handles) PrintHandles();
589 if (FLAG_gc_verbose) Print();
590 if (FLAG_code_stats) ReportCodeStatistics("After GC");
592 if (FLAG_deopt_every_n_garbage_collections > 0) {
593 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
594 Deoptimizer::DeoptimizeAll(isolate());
595 gcs_since_last_deopt_ = 0;
599 UpdateMaximumCommitted();
601 isolate_->counters()->alive_after_last_gc()->Set(
602 static_cast<int>(SizeOfObjects()));
604 isolate_->counters()->string_table_capacity()->Set(
605 string_table()->Capacity());
606 isolate_->counters()->number_of_symbols()->Set(
607 string_table()->NumberOfElements());
609 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
610 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
611 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
612 (crankshaft_codegen_bytes_generated_
613 + full_codegen_bytes_generated_)));
616 if (CommittedMemory() > 0) {
617 isolate_->counters()->external_fragmentation_total()->AddSample(
618 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
620 isolate_->counters()->heap_fraction_new_space()->
621 AddSample(static_cast<int>(
622 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
623 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
625 (old_pointer_space()->CommittedMemory() * 100.0) /
627 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
629 (old_data_space()->CommittedMemory() * 100.0) /
631 isolate_->counters()->heap_fraction_code_space()->
632 AddSample(static_cast<int>(
633 (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
634 isolate_->counters()->heap_fraction_map_space()->AddSample(
636 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
637 isolate_->counters()->heap_fraction_cell_space()->AddSample(
639 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
640 isolate_->counters()->heap_fraction_property_cell_space()->
641 AddSample(static_cast<int>(
642 (property_cell_space()->CommittedMemory() * 100.0) /
644 isolate_->counters()->heap_fraction_lo_space()->
645 AddSample(static_cast<int>(
646 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
648 isolate_->counters()->heap_sample_total_committed()->AddSample(
649 static_cast<int>(CommittedMemory() / KB));
650 isolate_->counters()->heap_sample_total_used()->AddSample(
651 static_cast<int>(SizeOfObjects() / KB));
652 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
653 static_cast<int>(map_space()->CommittedMemory() / KB));
654 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
655 static_cast<int>(cell_space()->CommittedMemory() / KB));
656 isolate_->counters()->
657 heap_sample_property_cell_space_committed()->
658 AddSample(static_cast<int>(
659 property_cell_space()->CommittedMemory() / KB));
660 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
661 static_cast<int>(code_space()->CommittedMemory() / KB));
663 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
664 static_cast<int>(MaximumCommittedMemory() / KB));
667 #define UPDATE_COUNTERS_FOR_SPACE(space) \
668 isolate_->counters()->space##_bytes_available()->Set( \
669 static_cast<int>(space()->Available())); \
670 isolate_->counters()->space##_bytes_committed()->Set( \
671 static_cast<int>(space()->CommittedMemory())); \
672 isolate_->counters()->space##_bytes_used()->Set( \
673 static_cast<int>(space()->SizeOfObjects()));
674 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
675 if (space()->CommittedMemory() > 0) { \
676 isolate_->counters()->external_fragmentation_##space()->AddSample( \
677 static_cast<int>(100 - \
678 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
680 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
681 UPDATE_COUNTERS_FOR_SPACE(space) \
682 UPDATE_FRAGMENTATION_FOR_SPACE(space)
684 UPDATE_COUNTERS_FOR_SPACE(new_space)
685 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
686 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
687 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
688 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
689 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
690 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
691 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
692 #undef UPDATE_COUNTERS_FOR_SPACE
693 #undef UPDATE_FRAGMENTATION_FOR_SPACE
694 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
697 ReportStatisticsAfterGC();
699 #ifdef ENABLE_DEBUGGER_SUPPORT
700 isolate_->debug()->AfterGarbageCollection();
701 #endif // ENABLE_DEBUGGER_SUPPORT
705 void Heap::CollectAllGarbage(int flags,
706 const char* gc_reason,
707 const v8::GCCallbackFlags gc_callback_flags) {
708 // Since we are ignoring the return value, the exact choice of space does
709 // not matter, so long as we do not specify NEW_SPACE, which would not
711 mark_compact_collector_.SetFlags(flags);
712 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
713 mark_compact_collector_.SetFlags(kNoGCFlags);
717 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
718 // Since we are ignoring the return value, the exact choice of space does
719 // not matter, so long as we do not specify NEW_SPACE, which would not
721 // Major GC would invoke weak handle callbacks on weakly reachable
722 // handles, but won't collect weakly reachable objects until next
723 // major GC. Therefore if we collect aggressively and weak handle callback
724 // has been invoked, we rerun major GC to release objects which become
726 // Note: as weak callbacks can execute arbitrary code, we cannot
727 // hope that eventually there will be no weak callbacks invocations.
728 // Therefore stop recollecting after several attempts.
729 if (isolate()->concurrent_recompilation_enabled()) {
730 // The optimizing compiler may be unnecessarily holding on to memory.
731 DisallowHeapAllocation no_recursive_gc;
732 isolate()->optimizing_compiler_thread()->Flush();
734 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
735 kReduceMemoryFootprintMask);
736 isolate_->compilation_cache()->Clear();
737 const int kMaxNumberOfAttempts = 7;
738 const int kMinNumberOfAttempts = 2;
739 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
740 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
741 attempt + 1 >= kMinNumberOfAttempts) {
745 mark_compact_collector()->SetFlags(kNoGCFlags);
748 incremental_marking()->UncommitMarkingDeque();
752 bool Heap::CollectGarbage(GarbageCollector collector,
753 const char* gc_reason,
754 const char* collector_reason,
755 const v8::GCCallbackFlags gc_callback_flags) {
756 // The VM is in the GC state until exiting this function.
757 VMState<GC> state(isolate_);
760 // Reset the allocation timeout to the GC interval, but make sure to
761 // allow at least a few allocations after a collection. The reason
762 // for this is that we have a lot of allocation sequences and we
763 // assume that a garbage collection will allow the subsequent
764 // allocation attempts to go through.
765 allocation_timeout_ = Max(6, FLAG_gc_interval);
768 // There may be an allocation memento behind every object in new space.
769 // If we evacuate a not full new space or if we are on the last page of
770 // the new space, then there may be uninitialized memory behind the top
771 // pointer of the new space page. We store a filler object there to
772 // identify the unused space.
773 Address from_top = new_space_.top();
774 Address from_limit = new_space_.limit();
775 if (from_top < from_limit) {
776 int remaining_in_page = static_cast<int>(from_limit - from_top);
777 CreateFillerObjectAt(from_top, remaining_in_page);
780 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
781 if (FLAG_trace_incremental_marking) {
782 PrintF("[IncrementalMarking] Scavenge during marking.\n");
786 if (collector == MARK_COMPACTOR &&
787 !mark_compact_collector()->abort_incremental_marking() &&
788 !incremental_marking()->IsStopped() &&
789 !incremental_marking()->should_hurry() &&
790 FLAG_incremental_marking_steps) {
791 // Make progress in incremental marking.
792 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
793 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
794 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
795 if (!incremental_marking()->IsComplete()) {
796 if (FLAG_trace_incremental_marking) {
797 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
799 collector = SCAVENGER;
800 collector_reason = "incremental marking delaying mark-sweep";
804 bool next_gc_likely_to_collect_more = false;
806 { GCTracer tracer(this, gc_reason, collector_reason);
807 ASSERT(AllowHeapAllocation::IsAllowed());
808 DisallowHeapAllocation no_allocation_during_gc;
809 GarbageCollectionPrologue();
810 // The GC count was incremented in the prologue. Tell the tracer about
812 tracer.set_gc_count(gc_count_);
814 // Tell the tracer which collector we've selected.
815 tracer.set_collector(collector);
818 HistogramTimerScope histogram_timer_scope(
819 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
820 : isolate_->counters()->gc_compactor());
821 next_gc_likely_to_collect_more =
822 PerformGarbageCollection(collector, &tracer, gc_callback_flags);
825 GarbageCollectionEpilogue();
828 // Start incremental marking for the next cycle. The heap snapshot
829 // generator needs incremental marking to stay off after it aborted.
830 if (!mark_compact_collector()->abort_incremental_marking() &&
831 incremental_marking()->IsStopped() &&
832 incremental_marking()->WorthActivating() &&
833 NextGCIsLikelyToBeFull()) {
834 incremental_marking()->Start();
837 return next_gc_likely_to_collect_more;
841 int Heap::NotifyContextDisposed() {
842 if (isolate()->concurrent_recompilation_enabled()) {
843 // Flush the queued recompilation tasks.
844 isolate()->optimizing_compiler_thread()->Flush();
846 flush_monomorphic_ics_ = true;
848 return ++contexts_disposed_;
852 void Heap::PerformScavenge() {
853 GCTracer tracer(this, NULL, NULL);
854 if (incremental_marking()->IsStopped()) {
855 PerformGarbageCollection(SCAVENGER, &tracer);
857 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
862 void Heap::MoveElements(FixedArray* array,
866 if (len == 0) return;
868 ASSERT(array->map() != fixed_cow_array_map());
869 Object** dst_objects = array->data_start() + dst_index;
870 OS::MemMove(dst_objects,
871 array->data_start() + src_index,
873 if (!InNewSpace(array)) {
874 for (int i = 0; i < len; i++) {
875 // TODO(hpayer): check store buffer for entries
876 if (InNewSpace(dst_objects[i])) {
877 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
881 incremental_marking()->RecordWrites(array);
886 // Helper class for verifying the string table.
887 class StringTableVerifier : public ObjectVisitor {
889 void VisitPointers(Object** start, Object** end) {
890 // Visit all HeapObject pointers in [start, end).
891 for (Object** p = start; p < end; p++) {
892 if ((*p)->IsHeapObject()) {
893 // Check that the string is actually internalized.
894 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
895 (*p)->IsInternalizedString());
902 static void VerifyStringTable(Heap* heap) {
903 StringTableVerifier verifier;
904 heap->string_table()->IterateElements(&verifier);
906 #endif // VERIFY_HEAP
909 static bool AbortIncrementalMarkingAndCollectGarbage(
911 AllocationSpace space,
912 const char* gc_reason = NULL) {
913 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914 bool result = heap->CollectGarbage(space, gc_reason);
915 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
920 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
921 bool gc_performed = true;
923 static const int kThreshold = 20;
924 while (gc_performed && counter++ < kThreshold) {
925 gc_performed = false;
926 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928 if (sizes[space] != 0) {
929 MaybeObject* allocation;
930 if (space == NEW_SPACE) {
931 allocation = new_space()->AllocateRaw(sizes[space]);
933 allocation = paged_space(space)->AllocateRaw(sizes[space]);
936 if (!allocation->To<FreeListNode>(&node)) {
937 if (space == NEW_SPACE) {
938 Heap::CollectGarbage(NEW_SPACE,
939 "failed to reserve space in the new space");
941 AbortIncrementalMarkingAndCollectGarbage(
943 static_cast<AllocationSpace>(space),
944 "failed to reserve space in paged space");
949 // Mark with a free list node, in case we have a GC before
951 node->set_size(this, sizes[space]);
952 locations_out[space] = node->address();
959 // Failed to reserve the space after several attempts.
960 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
965 void Heap::EnsureFromSpaceIsCommitted() {
966 if (new_space_.CommitFromSpaceIfNeeded()) return;
968 // Committing memory to from space failed.
969 // Memory is exhausted and we will die.
970 V8::FatalProcessOutOfMemory("Committing semi space failed.");
974 void Heap::ClearJSFunctionResultCaches() {
975 if (isolate_->bootstrapper()->IsActive()) return;
977 Object* context = native_contexts_list_;
978 while (!context->IsUndefined()) {
979 // Get the caches for this context. GC can happen when the context
980 // is not fully initialized, so the caches can be undefined.
981 Object* caches_or_undefined =
982 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
983 if (!caches_or_undefined->IsUndefined()) {
984 FixedArray* caches = FixedArray::cast(caches_or_undefined);
986 int length = caches->length();
987 for (int i = 0; i < length; i++) {
988 JSFunctionResultCache::cast(caches->get(i))->Clear();
991 // Get the next context:
992 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
997 void Heap::ClearNormalizedMapCaches() {
998 if (isolate_->bootstrapper()->IsActive() &&
999 !incremental_marking()->IsMarking()) {
1003 Object* context = native_contexts_list_;
1004 while (!context->IsUndefined()) {
1005 // GC can happen when the context is not fully initialized,
1006 // so the cache can be undefined.
1008 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1009 if (!cache->IsUndefined()) {
1010 NormalizedMapCache::cast(cache)->Clear();
1012 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1017 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
1018 if (start_new_space_size == 0) return;
1020 double survival_rate =
1021 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
1022 start_new_space_size;
1024 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1025 high_survival_rate_period_length_++;
1027 high_survival_rate_period_length_ = 0;
1030 if (survival_rate < kYoungSurvivalRateLowThreshold) {
1031 low_survival_rate_period_length_++;
1033 low_survival_rate_period_length_ = 0;
1036 double survival_rate_diff = survival_rate_ - survival_rate;
1038 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
1039 set_survival_rate_trend(DECREASING);
1040 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
1041 set_survival_rate_trend(INCREASING);
1043 set_survival_rate_trend(STABLE);
1046 survival_rate_ = survival_rate;
1049 bool Heap::PerformGarbageCollection(
1050 GarbageCollector collector,
1052 const v8::GCCallbackFlags gc_callback_flags) {
1053 bool next_gc_likely_to_collect_more = false;
1055 if (collector != SCAVENGER) {
1056 PROFILE(isolate_, CodeMovingGCEvent());
1060 if (FLAG_verify_heap) {
1061 VerifyStringTable(this);
1066 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1069 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1070 VMState<EXTERNAL> state(isolate_);
1071 HandleScope handle_scope(isolate_);
1072 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1075 EnsureFromSpaceIsCommitted();
1077 int start_new_space_size = Heap::new_space()->SizeAsInt();
1079 if (IsHighSurvivalRate()) {
1080 // We speed up the incremental marker if it is running so that it
1081 // does not fall behind the rate of promotion, which would cause a
1082 // constantly growing old space.
1083 incremental_marking()->NotifyOfHighPromotionRate();
1086 if (collector == MARK_COMPACTOR) {
1087 // Perform mark-sweep with optional compaction.
1088 MarkCompact(tracer);
1089 sweep_generation_++;
1091 UpdateSurvivalRateTrend(start_new_space_size);
1093 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1095 old_generation_allocation_limit_ =
1096 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1098 old_gen_exhausted_ = false;
1104 UpdateSurvivalRateTrend(start_new_space_size);
1107 if (!new_space_high_promotion_mode_active_ &&
1108 new_space_.Capacity() == new_space_.MaximumCapacity() &&
1109 IsStableOrIncreasingSurvivalTrend() &&
1110 IsHighSurvivalRate()) {
1111 // Stable high survival rates even though young generation is at
1112 // maximum capacity indicates that most objects will be promoted.
1113 // To decrease scavenger pauses and final mark-sweep pauses, we
1114 // have to limit maximal capacity of the young generation.
1115 SetNewSpaceHighPromotionModeActive(true);
1116 if (FLAG_trace_gc) {
1117 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1118 new_space_.InitialCapacity() / MB);
1120 // The high promotion mode is our indicator to turn on pretenuring. We have
1121 // to deoptimize all optimized code in global pretenuring mode and all
1122 // code which should be tenured in local pretenuring mode.
1123 if (FLAG_pretenuring) {
1124 if (!FLAG_allocation_site_pretenuring) {
1125 isolate_->stack_guard()->FullDeopt();
1128 } else if (new_space_high_promotion_mode_active_ &&
1129 IsStableOrDecreasingSurvivalTrend() &&
1130 IsLowSurvivalRate()) {
1131 // Decreasing low survival rates might indicate that the above high
1132 // promotion mode is over and we should allow the young generation
1134 SetNewSpaceHighPromotionModeActive(false);
1135 if (FLAG_trace_gc) {
1136 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1137 new_space_.MaximumCapacity() / MB);
1139 // Trigger deoptimization here to turn off global pretenuring as soon as
1141 if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
1142 isolate_->stack_guard()->FullDeopt();
1146 if (new_space_high_promotion_mode_active_ &&
1147 new_space_.Capacity() > new_space_.InitialCapacity()) {
1148 new_space_.Shrink();
1151 isolate_->counters()->objs_since_last_young()->Set(0);
1153 // Callbacks that fire after this point might trigger nested GCs and
1154 // restart incremental marking, the assertion can't be moved down.
1155 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1157 gc_post_processing_depth_++;
1158 { AllowHeapAllocation allow_allocation;
1159 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1160 next_gc_likely_to_collect_more =
1161 isolate_->global_handles()->PostGarbageCollectionProcessing(
1164 gc_post_processing_depth_--;
1166 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1168 // Update relocatables.
1169 Relocatable::PostGarbageCollectionProcessing(isolate_);
1171 if (collector == MARK_COMPACTOR) {
1172 // Register the amount of external allocated memory.
1173 amount_of_external_allocated_memory_at_last_global_gc_ =
1174 amount_of_external_allocated_memory_;
1178 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1179 VMState<EXTERNAL> state(isolate_);
1180 HandleScope handle_scope(isolate_);
1181 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1185 if (FLAG_verify_heap) {
1186 VerifyStringTable(this);
1190 return next_gc_likely_to_collect_more;
1194 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1195 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1196 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1197 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1198 v8::GCPrologueCallback callback =
1199 reinterpret_cast<v8::GCPrologueCallback>(
1200 gc_prologue_callbacks_[i].callback);
1201 callback(gc_type, flags);
1203 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1204 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1211 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1212 GCCallbackFlags gc_callback_flags) {
1213 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1214 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1215 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1216 v8::GCPrologueCallback callback =
1217 reinterpret_cast<v8::GCPrologueCallback>(
1218 gc_epilogue_callbacks_[i].callback);
1219 callback(gc_type, gc_callback_flags);
1221 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1222 gc_epilogue_callbacks_[i].callback(
1223 isolate, gc_type, gc_callback_flags);
1230 void Heap::MarkCompact(GCTracer* tracer) {
1231 gc_state_ = MARK_COMPACT;
1232 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1234 uint64_t size_of_objects_before_gc = SizeOfObjects();
1236 mark_compact_collector_.Prepare(tracer);
1239 tracer->set_full_gc_count(ms_count_);
1241 MarkCompactPrologue();
1243 mark_compact_collector_.CollectGarbage();
1245 LOG(isolate_, ResourceEvent("markcompact", "end"));
1247 gc_state_ = NOT_IN_GC;
1249 isolate_->counters()->objs_since_last_full()->Set(0);
1251 flush_monomorphic_ics_ = false;
1253 if (FLAG_allocation_site_pretenuring) {
1254 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1259 void Heap::MarkCompactPrologue() {
1260 // At any old GC clear the keyed lookup cache to enable collection of unused
1262 isolate_->keyed_lookup_cache()->Clear();
1263 isolate_->context_slot_cache()->Clear();
1264 isolate_->descriptor_lookup_cache()->Clear();
1265 RegExpResultsCache::Clear(string_split_cache());
1266 RegExpResultsCache::Clear(regexp_multiple_cache());
1268 isolate_->compilation_cache()->MarkCompactPrologue();
1270 CompletelyClearInstanceofCache();
1272 FlushNumberStringCache();
1273 if (FLAG_cleanup_code_caches_at_gc) {
1274 polymorphic_code_cache()->set_cache(undefined_value());
1277 ClearNormalizedMapCaches();
1281 // Helper class for copying HeapObjects
1282 class ScavengeVisitor: public ObjectVisitor {
1284 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1286 void VisitPointer(Object** p) { ScavengePointer(p); }
1288 void VisitPointers(Object** start, Object** end) {
1289 // Copy all HeapObject pointers in [start, end)
1290 for (Object** p = start; p < end; p++) ScavengePointer(p);
1294 void ScavengePointer(Object** p) {
1295 Object* object = *p;
1296 if (!heap_->InNewSpace(object)) return;
1297 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1298 reinterpret_cast<HeapObject*>(object));
1306 // Visitor class to verify pointers in code or data space do not point into
1308 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1310 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1311 void VisitPointers(Object** start, Object**end) {
1312 for (Object** current = start; current < end; current++) {
1313 if ((*current)->IsHeapObject()) {
1314 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1324 static void VerifyNonPointerSpacePointers(Heap* heap) {
1325 // Verify that there are no pointers to new space in spaces where we
1326 // do not expect them.
1327 VerifyNonPointerSpacePointersVisitor v(heap);
1328 HeapObjectIterator code_it(heap->code_space());
1329 for (HeapObject* object = code_it.Next();
1330 object != NULL; object = code_it.Next())
1331 object->Iterate(&v);
1333 // The old data space was normally swept conservatively so that the iterator
1334 // doesn't work, so we normally skip the next bit.
1335 if (!heap->old_data_space()->was_swept_conservatively()) {
1336 HeapObjectIterator data_it(heap->old_data_space());
1337 for (HeapObject* object = data_it.Next();
1338 object != NULL; object = data_it.Next())
1339 object->Iterate(&v);
1342 #endif // VERIFY_HEAP
1345 void Heap::CheckNewSpaceExpansionCriteria() {
1346 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1347 survived_since_last_expansion_ > new_space_.Capacity() &&
1348 !new_space_high_promotion_mode_active_) {
1349 // Grow the size of new space if there is room to grow, enough data
1350 // has survived scavenge since the last expansion and we are not in
1351 // high promotion mode.
1353 survived_since_last_expansion_ = 0;
1358 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1359 return heap->InNewSpace(*p) &&
1360 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1364 void Heap::ScavengeStoreBufferCallback(
1367 StoreBufferEvent event) {
1368 heap->store_buffer_rebuilder_.Callback(page, event);
1372 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1373 if (event == kStoreBufferStartScanningPagesEvent) {
1374 start_of_current_page_ = NULL;
1375 current_page_ = NULL;
1376 } else if (event == kStoreBufferScanningPageEvent) {
1377 if (current_page_ != NULL) {
1378 // If this page already overflowed the store buffer during this iteration.
1379 if (current_page_->scan_on_scavenge()) {
1380 // Then we should wipe out the entries that have been added for it.
1381 store_buffer_->SetTop(start_of_current_page_);
1382 } else if (store_buffer_->Top() - start_of_current_page_ >=
1383 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1384 // Did we find too many pointers in the previous page? The heuristic is
1385 // that no page can take more then 1/5 the remaining slots in the store
1387 current_page_->set_scan_on_scavenge(true);
1388 store_buffer_->SetTop(start_of_current_page_);
1390 // In this case the page we scanned took a reasonable number of slots in
1391 // the store buffer. It has now been rehabilitated and is no longer
1392 // marked scan_on_scavenge.
1393 ASSERT(!current_page_->scan_on_scavenge());
1396 start_of_current_page_ = store_buffer_->Top();
1397 current_page_ = page;
1398 } else if (event == kStoreBufferFullEvent) {
1399 // The current page overflowed the store buffer again. Wipe out its entries
1400 // in the store buffer and mark it scan-on-scavenge again. This may happen
1401 // several times while scanning.
1402 if (current_page_ == NULL) {
1403 // Store Buffer overflowed while scanning promoted objects. These are not
1404 // in any particular page, though they are likely to be clustered by the
1405 // allocation routines.
1406 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1408 // Store Buffer overflowed while scanning a particular old space page for
1409 // pointers to new space.
1410 ASSERT(current_page_ == page);
1411 ASSERT(page != NULL);
1412 current_page_->set_scan_on_scavenge(true);
1413 ASSERT(start_of_current_page_ != store_buffer_->Top());
1414 store_buffer_->SetTop(start_of_current_page_);
1422 void PromotionQueue::Initialize() {
1423 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1424 // entries (where each is a pair of intptr_t). This allows us to simplify
1425 // the test fpr when to switch pages.
1426 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1428 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1430 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1431 emergency_stack_ = NULL;
1436 void PromotionQueue::RelocateQueueHead() {
1437 ASSERT(emergency_stack_ == NULL);
1439 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1440 intptr_t* head_start = rear_;
1441 intptr_t* head_end =
1442 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1445 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1447 emergency_stack_ = new List<Entry>(2 * entries_count);
1449 while (head_start != head_end) {
1450 int size = static_cast<int>(*(head_start++));
1451 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1452 emergency_stack_->Add(Entry(obj, size));
1458 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1460 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1462 virtual Object* RetainAs(Object* object) {
1463 if (!heap_->InFromSpace(object)) {
1467 MapWord map_word = HeapObject::cast(object)->map_word();
1468 if (map_word.IsForwardingAddress()) {
1469 return map_word.ToForwardingAddress();
1479 void Heap::Scavenge() {
1480 RelocationLock relocation_lock(this);
1483 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1486 gc_state_ = SCAVENGE;
1488 // Implements Cheney's copying algorithm
1489 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1491 // Clear descriptor cache.
1492 isolate_->descriptor_lookup_cache()->Clear();
1494 // Used for updating survived_since_last_expansion_ at function end.
1495 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1497 CheckNewSpaceExpansionCriteria();
1499 SelectScavengingVisitorsTable();
1501 incremental_marking()->PrepareForScavenge();
1503 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1504 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1506 // Flip the semispaces. After flipping, to space is empty, from space has
1509 new_space_.ResetAllocationInfo();
1511 // We need to sweep newly copied objects which can be either in the
1512 // to space or promoted to the old generation. For to-space
1513 // objects, we treat the bottom of the to space as a queue. Newly
1514 // copied and unswept objects lie between a 'front' mark and the
1515 // allocation pointer.
1517 // Promoted objects can go into various old-generation spaces, and
1518 // can be allocated internally in the spaces (from the free list).
1519 // We treat the top of the to space as a queue of addresses of
1520 // promoted objects. The addresses of newly promoted and unswept
1521 // objects lie between a 'front' mark and a 'rear' mark that is
1522 // updated as a side effect of promoting an object.
1524 // There is guaranteed to be enough room at the top of the to space
1525 // for the addresses of promoted objects: every object promoted
1526 // frees up its size in bytes from the top of the new space, and
1527 // objects are at least one pointer in size.
1528 Address new_space_front = new_space_.ToSpaceStart();
1529 promotion_queue_.Initialize();
1532 store_buffer()->Clean();
1535 ScavengeVisitor scavenge_visitor(this);
1537 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1539 // Copy objects reachable from the old generation.
1541 StoreBufferRebuildScope scope(this,
1543 &ScavengeStoreBufferCallback);
1544 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1547 // Copy objects reachable from simple cells by scavenging cell values
1549 HeapObjectIterator cell_iterator(cell_space_);
1550 for (HeapObject* heap_object = cell_iterator.Next();
1551 heap_object != NULL;
1552 heap_object = cell_iterator.Next()) {
1553 if (heap_object->IsCell()) {
1554 Cell* cell = Cell::cast(heap_object);
1555 Address value_address = cell->ValueAddress();
1556 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1560 // Copy objects reachable from global property cells by scavenging global
1561 // property cell values directly.
1562 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1563 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1564 heap_object != NULL;
1565 heap_object = js_global_property_cell_iterator.Next()) {
1566 if (heap_object->IsPropertyCell()) {
1567 PropertyCell* cell = PropertyCell::cast(heap_object);
1568 Address value_address = cell->ValueAddress();
1569 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1570 Address type_address = cell->TypeAddress();
1571 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1575 // Copy objects reachable from the code flushing candidates list.
1576 MarkCompactCollector* collector = mark_compact_collector();
1577 if (collector->is_code_flushing_enabled()) {
1578 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1581 // Scavenge object reachable from the native contexts list directly.
1582 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1584 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1586 while (isolate()->global_handles()->IterateObjectGroups(
1587 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1588 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1590 isolate()->global_handles()->RemoveObjectGroups();
1591 isolate()->global_handles()->RemoveImplicitRefGroups();
1593 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1594 &IsUnscavengedHeapObject);
1595 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1597 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1599 UpdateNewSpaceReferencesInExternalStringTable(
1600 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1602 promotion_queue_.Destroy();
1604 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1606 ScavengeWeakObjectRetainer weak_object_retainer(this);
1607 ProcessWeakReferences(&weak_object_retainer);
1609 ASSERT(new_space_front == new_space_.top());
1612 new_space_.set_age_mark(new_space_.top());
1614 new_space_.LowerInlineAllocationLimit(
1615 new_space_.inline_allocation_limit_step());
1617 // Update how much has survived scavenge.
1618 IncrementYoungSurvivorsCounter(static_cast<int>(
1619 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1621 ProcessPretenuringFeedback();
1623 LOG(isolate_, ResourceEvent("scavenge", "end"));
1625 gc_state_ = NOT_IN_GC;
1627 scavenges_since_last_idle_round_++;
1631 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1633 MapWord first_word = HeapObject::cast(*p)->map_word();
1635 if (!first_word.IsForwardingAddress()) {
1636 // Unreachable external string can be finalized.
1637 heap->FinalizeExternalString(String::cast(*p));
1641 // String is still reachable.
1642 return String::cast(first_word.ToForwardingAddress());
1646 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1647 ExternalStringTableUpdaterCallback updater_func) {
1649 if (FLAG_verify_heap) {
1650 external_string_table_.Verify();
1654 if (external_string_table_.new_space_strings_.is_empty()) return;
1656 Object** start = &external_string_table_.new_space_strings_[0];
1657 Object** end = start + external_string_table_.new_space_strings_.length();
1658 Object** last = start;
1660 for (Object** p = start; p < end; ++p) {
1661 ASSERT(InFromSpace(*p));
1662 String* target = updater_func(this, p);
1664 if (target == NULL) continue;
1666 ASSERT(target->IsExternalString());
1668 if (InNewSpace(target)) {
1669 // String is still in new space. Update the table entry.
1673 // String got promoted. Move it to the old string list.
1674 external_string_table_.AddOldString(target);
1678 ASSERT(last <= end);
1679 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1683 void Heap::UpdateReferencesInExternalStringTable(
1684 ExternalStringTableUpdaterCallback updater_func) {
1686 // Update old space string references.
1687 if (external_string_table_.old_space_strings_.length() > 0) {
1688 Object** start = &external_string_table_.old_space_strings_[0];
1689 Object** end = start + external_string_table_.old_space_strings_.length();
1690 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1693 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1698 struct WeakListVisitor;
1702 static Object* VisitWeakList(Heap* heap,
1704 WeakObjectRetainer* retainer,
1705 bool record_slots) {
1706 Object* undefined = heap->undefined_value();
1707 Object* head = undefined;
1709 MarkCompactCollector* collector = heap->mark_compact_collector();
1710 while (list != undefined) {
1711 // Check whether to keep the candidate in the list.
1712 T* candidate = reinterpret_cast<T*>(list);
1713 Object* retained = retainer->RetainAs(list);
1714 if (retained != NULL) {
1715 if (head == undefined) {
1716 // First element in the list.
1719 // Subsequent elements in the list.
1720 ASSERT(tail != NULL);
1721 WeakListVisitor<T>::SetWeakNext(tail, retained);
1723 Object** next_slot =
1724 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1725 collector->RecordSlot(next_slot, next_slot, retained);
1728 // Retained object is new tail.
1729 ASSERT(!retained->IsUndefined());
1730 candidate = reinterpret_cast<T*>(retained);
1734 // tail is a live object, visit it.
1735 WeakListVisitor<T>::VisitLiveObject(
1736 heap, tail, retainer, record_slots);
1738 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1741 // Move to next element in the list.
1742 list = WeakListVisitor<T>::WeakNext(candidate);
1745 // Terminate the list if there is one or more elements.
1747 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1754 struct WeakListVisitor<JSFunction> {
1755 static void SetWeakNext(JSFunction* function, Object* next) {
1756 function->set_next_function_link(next);
1759 static Object* WeakNext(JSFunction* function) {
1760 return function->next_function_link();
1763 static int WeakNextOffset() {
1764 return JSFunction::kNextFunctionLinkOffset;
1767 static void VisitLiveObject(Heap*, JSFunction*,
1768 WeakObjectRetainer*, bool) {
1771 static void VisitPhantomObject(Heap*, JSFunction*) {
1777 struct WeakListVisitor<Code> {
1778 static void SetWeakNext(Code* code, Object* next) {
1779 code->set_next_code_link(next);
1782 static Object* WeakNext(Code* code) {
1783 return code->next_code_link();
1786 static int WeakNextOffset() {
1787 return Code::kNextCodeLinkOffset;
1790 static void VisitLiveObject(Heap*, Code*,
1791 WeakObjectRetainer*, bool) {
1794 static void VisitPhantomObject(Heap*, Code*) {
1800 struct WeakListVisitor<Context> {
1801 static void SetWeakNext(Context* context, Object* next) {
1802 context->set(Context::NEXT_CONTEXT_LINK,
1804 UPDATE_WRITE_BARRIER);
1807 static Object* WeakNext(Context* context) {
1808 return context->get(Context::NEXT_CONTEXT_LINK);
1811 static void VisitLiveObject(Heap* heap,
1813 WeakObjectRetainer* retainer,
1814 bool record_slots) {
1815 // Process the three weak lists linked off the context.
1816 DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1817 Context::OPTIMIZED_FUNCTIONS_LIST);
1818 DoWeakList<Code>(heap, context, retainer, record_slots,
1819 Context::OPTIMIZED_CODE_LIST);
1820 DoWeakList<Code>(heap, context, retainer, record_slots,
1821 Context::DEOPTIMIZED_CODE_LIST);
1825 static void DoWeakList(Heap* heap,
1827 WeakObjectRetainer* retainer,
1830 // Visit the weak list, removing dead intermediate elements.
1831 Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1834 // Update the list head.
1835 context->set(index, list_head, UPDATE_WRITE_BARRIER);
1838 // Record the updated slot if necessary.
1839 Object** head_slot = HeapObject::RawField(
1840 context, FixedArray::SizeFor(index));
1841 heap->mark_compact_collector()->RecordSlot(
1842 head_slot, head_slot, list_head);
1846 static void VisitPhantomObject(Heap*, Context*) {
1849 static int WeakNextOffset() {
1850 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1855 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1856 // We don't record weak slots during marking or scavenges.
1857 // Instead we do it once when we complete mark-compact cycle.
1858 // Note that write barrier has no effect if we are already in the middle of
1859 // compacting mark-sweep cycle and we have to record slots manually.
1861 gc_state() == MARK_COMPACT &&
1862 mark_compact_collector()->is_compacting();
1863 ProcessArrayBuffers(retainer, record_slots);
1864 ProcessNativeContexts(retainer, record_slots);
1865 // TODO(mvstanton): AllocationSites only need to be processed during
1866 // MARK_COMPACT, as they live in old space. Verify and address.
1867 ProcessAllocationSites(retainer, record_slots);
1870 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1871 bool record_slots) {
1873 VisitWeakList<Context>(
1874 this, native_contexts_list(), retainer, record_slots);
1875 // Update the head of the list of contexts.
1876 native_contexts_list_ = head;
1881 struct WeakListVisitor<JSArrayBufferView> {
1882 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1883 obj->set_weak_next(next);
1886 static Object* WeakNext(JSArrayBufferView* obj) {
1887 return obj->weak_next();
1890 static void VisitLiveObject(Heap*,
1891 JSArrayBufferView* obj,
1892 WeakObjectRetainer* retainer,
1893 bool record_slots) {}
1895 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1897 static int WeakNextOffset() {
1898 return JSArrayBufferView::kWeakNextOffset;
1904 struct WeakListVisitor<JSArrayBuffer> {
1905 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1906 obj->set_weak_next(next);
1909 static Object* WeakNext(JSArrayBuffer* obj) {
1910 return obj->weak_next();
1913 static void VisitLiveObject(Heap* heap,
1914 JSArrayBuffer* array_buffer,
1915 WeakObjectRetainer* retainer,
1916 bool record_slots) {
1917 Object* typed_array_obj =
1918 VisitWeakList<JSArrayBufferView>(
1920 array_buffer->weak_first_view(),
1921 retainer, record_slots);
1922 array_buffer->set_weak_first_view(typed_array_obj);
1923 if (typed_array_obj != heap->undefined_value() && record_slots) {
1924 Object** slot = HeapObject::RawField(
1925 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1926 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1930 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1931 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1934 static int WeakNextOffset() {
1935 return JSArrayBuffer::kWeakNextOffset;
1940 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1941 bool record_slots) {
1942 Object* array_buffer_obj =
1943 VisitWeakList<JSArrayBuffer>(this,
1944 array_buffers_list(),
1945 retainer, record_slots);
1946 set_array_buffers_list(array_buffer_obj);
1950 void Heap::TearDownArrayBuffers() {
1951 Object* undefined = undefined_value();
1952 for (Object* o = array_buffers_list(); o != undefined;) {
1953 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1954 Runtime::FreeArrayBuffer(isolate(), buffer);
1955 o = buffer->weak_next();
1957 array_buffers_list_ = undefined;
1962 struct WeakListVisitor<AllocationSite> {
1963 static void SetWeakNext(AllocationSite* obj, Object* next) {
1964 obj->set_weak_next(next);
1967 static Object* WeakNext(AllocationSite* obj) {
1968 return obj->weak_next();
1971 static void VisitLiveObject(Heap* heap,
1972 AllocationSite* site,
1973 WeakObjectRetainer* retainer,
1974 bool record_slots) {}
1976 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1978 static int WeakNextOffset() {
1979 return AllocationSite::kWeakNextOffset;
1984 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1985 bool record_slots) {
1986 Object* allocation_site_obj =
1987 VisitWeakList<AllocationSite>(this,
1988 allocation_sites_list(),
1989 retainer, record_slots);
1990 set_allocation_sites_list(allocation_site_obj);
1994 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1995 DisallowHeapAllocation no_allocation_scope;
1996 Object* cur = allocation_sites_list();
1997 bool marked = false;
1998 while (cur->IsAllocationSite()) {
1999 AllocationSite* casted = AllocationSite::cast(cur);
2000 if (casted->GetPretenureMode() == flag) {
2001 casted->ResetPretenureDecision();
2002 bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
2004 DependentCode::kAllocationSiteTenuringChangedGroup);
2005 if (got_marked) marked = true;
2007 cur = casted->weak_next();
2009 if (marked) isolate_->stack_guard()->DeoptMarkedCode();
2013 void Heap::EvaluateOldSpaceLocalPretenuring(
2014 uint64_t size_of_objects_before_gc) {
2015 uint64_t size_of_objects_after_gc = SizeOfObjects();
2016 double old_generation_survival_rate =
2017 (static_cast<double>(size_of_objects_after_gc) * 100) /
2018 static_cast<double>(size_of_objects_before_gc);
2020 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2021 // Too many objects died in the old generation, pretenuring of wrong
2022 // allocation sites may be the cause for that. We have to deopt all
2023 // dependent code registered in the allocation sites to re-evaluate
2024 // our pretenuring decisions.
2025 ResetAllAllocationSitesDependentCode(TENURED);
2026 if (FLAG_trace_pretenuring) {
2027 PrintF("Deopt all allocation sites dependent code due to low survival "
2028 "rate in the old generation %f\n", old_generation_survival_rate);
2034 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2035 DisallowHeapAllocation no_allocation;
2036 // All external strings are listed in the external string table.
2038 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
2040 explicit ExternalStringTableVisitorAdapter(
2041 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
2042 virtual void VisitPointers(Object** start, Object** end) {
2043 for (Object** p = start; p < end; p++) {
2044 ASSERT((*p)->IsExternalString());
2045 visitor_->VisitExternalString(Utils::ToLocal(
2046 Handle<String>(String::cast(*p))));
2050 v8::ExternalResourceVisitor* visitor_;
2051 } external_string_table_visitor(visitor);
2053 external_string_table_.Iterate(&external_string_table_visitor);
2057 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
2059 static inline void VisitPointer(Heap* heap, Object** p) {
2060 Object* object = *p;
2061 if (!heap->InNewSpace(object)) return;
2062 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
2063 reinterpret_cast<HeapObject*>(object));
2068 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2069 Address new_space_front) {
2071 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2072 // The addresses new_space_front and new_space_.top() define a
2073 // queue of unprocessed copied objects. Process them until the
2075 while (new_space_front != new_space_.top()) {
2076 if (!NewSpacePage::IsAtEnd(new_space_front)) {
2077 HeapObject* object = HeapObject::FromAddress(new_space_front);
2079 NewSpaceScavenger::IterateBody(object->map(), object);
2082 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2086 // Promote and process all the to-be-promoted objects.
2088 StoreBufferRebuildScope scope(this,
2090 &ScavengeStoreBufferCallback);
2091 while (!promotion_queue()->is_empty()) {
2094 promotion_queue()->remove(&target, &size);
2096 // Promoted object might be already partially visited
2097 // during old space pointer iteration. Thus we search specificly
2098 // for pointers to from semispace instead of looking for pointers
2100 ASSERT(!target->IsMap());
2101 IterateAndMarkPointersToFromSpace(target->address(),
2102 target->address() + size,
2107 // Take another spin if there are now unswept objects in new space
2108 // (there are currently no more unswept promoted objects).
2109 } while (new_space_front != new_space_.top());
2111 return new_space_front;
2115 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2116 STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2119 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2123 static HeapObject* EnsureDoubleAligned(Heap* heap,
2126 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2127 heap->CreateFillerObjectAt(object->address(), kPointerSize);
2128 return HeapObject::FromAddress(object->address() + kPointerSize);
2130 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2137 enum LoggingAndProfiling {
2138 LOGGING_AND_PROFILING_ENABLED,
2139 LOGGING_AND_PROFILING_DISABLED
2143 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2146 template<MarksHandling marks_handling,
2147 LoggingAndProfiling logging_and_profiling_mode>
2148 class ScavengingVisitor : public StaticVisitorBase {
2150 static void Initialize() {
2151 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2152 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2153 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2154 table_.Register(kVisitByteArray, &EvacuateByteArray);
2155 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2156 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2157 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2158 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2160 table_.Register(kVisitNativeContext,
2161 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2162 template VisitSpecialized<Context::kSize>);
2164 table_.Register(kVisitConsString,
2165 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2166 template VisitSpecialized<ConsString::kSize>);
2168 table_.Register(kVisitSlicedString,
2169 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2170 template VisitSpecialized<SlicedString::kSize>);
2172 table_.Register(kVisitSymbol,
2173 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2174 template VisitSpecialized<Symbol::kSize>);
2176 table_.Register(kVisitSharedFunctionInfo,
2177 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2178 template VisitSpecialized<SharedFunctionInfo::kSize>);
2180 table_.Register(kVisitJSWeakMap,
2181 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2184 table_.Register(kVisitJSWeakSet,
2185 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2188 table_.Register(kVisitJSArrayBuffer,
2189 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2192 table_.Register(kVisitJSTypedArray,
2193 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2196 table_.Register(kVisitJSDataView,
2197 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2200 table_.Register(kVisitJSRegExp,
2201 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2204 if (marks_handling == IGNORE_MARKS) {
2205 table_.Register(kVisitJSFunction,
2206 &ObjectEvacuationStrategy<POINTER_OBJECT>::
2207 template VisitSpecialized<JSFunction::kSize>);
2209 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2212 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2214 kVisitDataObjectGeneric>();
2216 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2218 kVisitJSObjectGeneric>();
2220 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2222 kVisitStructGeneric>();
2225 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2230 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2232 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2233 bool should_record = false;
2235 should_record = FLAG_heap_stats;
2237 should_record = should_record || FLAG_log_gc;
2238 if (should_record) {
2239 if (heap->new_space()->Contains(obj)) {
2240 heap->new_space()->RecordAllocation(obj);
2242 heap->new_space()->RecordPromotion(obj);
2247 // Helper function used by CopyObject to copy a source object to an
2248 // allocated target object and update the forwarding pointer in the source
2249 // object. Returns the target object.
2250 INLINE(static void MigrateObject(Heap* heap,
2254 // Copy the content of source to target.
2255 heap->CopyBlock(target->address(), source->address(), size);
2257 // Set the forwarding address.
2258 source->set_map_word(MapWord::FromForwardingAddress(target));
2260 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2261 // Update NewSpace stats if necessary.
2262 RecordCopiedObject(heap, target);
2263 Isolate* isolate = heap->isolate();
2264 HeapProfiler* heap_profiler = isolate->heap_profiler();
2265 if (heap_profiler->is_tracking_object_moves()) {
2266 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2269 if (isolate->logger()->is_logging_code_events() ||
2270 isolate->cpu_profiler()->is_profiling()) {
2271 if (target->IsSharedFunctionInfo()) {
2272 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2273 source->address(), target->address()));
2278 if (marks_handling == TRANSFER_MARKS) {
2279 if (Marking::TransferColor(source, target)) {
2280 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2286 template<ObjectContents object_contents, int alignment>
2287 static inline void EvacuateObject(Map* map,
2291 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2292 SLOW_ASSERT(object->Size() == object_size);
2294 int allocation_size = object_size;
2295 if (alignment != kObjectAlignment) {
2296 ASSERT(alignment == kDoubleAlignment);
2297 allocation_size += kPointerSize;
2300 Heap* heap = map->GetHeap();
2301 if (heap->ShouldBePromoted(object->address(), object_size)) {
2302 MaybeObject* maybe_result;
2304 if (object_contents == DATA_OBJECT) {
2305 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2306 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2308 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2309 maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2312 Object* result = NULL; // Initialization to please compiler.
2313 if (maybe_result->ToObject(&result)) {
2314 HeapObject* target = HeapObject::cast(result);
2316 if (alignment != kObjectAlignment) {
2317 target = EnsureDoubleAligned(heap, target, allocation_size);
2320 // Order is important: slot might be inside of the target if target
2321 // was allocated over a dead object and slot comes from the store
2324 MigrateObject(heap, object, target, object_size);
2326 if (object_contents == POINTER_OBJECT) {
2327 if (map->instance_type() == JS_FUNCTION_TYPE) {
2328 heap->promotion_queue()->insert(
2329 target, JSFunction::kNonWeakFieldsEndOffset);
2331 heap->promotion_queue()->insert(target, object_size);
2335 heap->tracer()->increment_promoted_objects_size(object_size);
2339 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2340 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2341 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2342 Object* result = allocation->ToObjectUnchecked();
2343 HeapObject* target = HeapObject::cast(result);
2345 if (alignment != kObjectAlignment) {
2346 target = EnsureDoubleAligned(heap, target, allocation_size);
2349 // Order is important: slot might be inside of the target if target
2350 // was allocated over a dead object and slot comes from the store
2353 MigrateObject(heap, object, target, object_size);
2358 static inline void EvacuateJSFunction(Map* map,
2360 HeapObject* object) {
2361 ObjectEvacuationStrategy<POINTER_OBJECT>::
2362 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2364 HeapObject* target = *slot;
2365 MarkBit mark_bit = Marking::MarkBitFrom(target);
2366 if (Marking::IsBlack(mark_bit)) {
2367 // This object is black and it might not be rescanned by marker.
2368 // We should explicitly record code entry slot for compaction because
2369 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2370 // miss it as it is not HeapObject-tagged.
2371 Address code_entry_slot =
2372 target->address() + JSFunction::kCodeEntryOffset;
2373 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2374 map->GetHeap()->mark_compact_collector()->
2375 RecordCodeEntrySlot(code_entry_slot, code);
2380 static inline void EvacuateFixedArray(Map* map,
2382 HeapObject* object) {
2383 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2384 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2385 map, slot, object, object_size);
2389 static inline void EvacuateFixedDoubleArray(Map* map,
2391 HeapObject* object) {
2392 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2393 int object_size = FixedDoubleArray::SizeFor(length);
2394 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2395 map, slot, object, object_size);
2399 static inline void EvacuateFixedTypedArray(Map* map,
2401 HeapObject* object) {
2402 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2403 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2404 map, slot, object, object_size);
2408 static inline void EvacuateFixedFloat64Array(Map* map,
2410 HeapObject* object) {
2411 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2412 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2413 map, slot, object, object_size);
2417 static inline void EvacuateByteArray(Map* map,
2419 HeapObject* object) {
2420 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2421 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2422 map, slot, object, object_size);
2426 static inline void EvacuateSeqOneByteString(Map* map,
2428 HeapObject* object) {
2429 int object_size = SeqOneByteString::cast(object)->
2430 SeqOneByteStringSize(map->instance_type());
2431 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2432 map, slot, object, object_size);
2436 static inline void EvacuateSeqTwoByteString(Map* map,
2438 HeapObject* object) {
2439 int object_size = SeqTwoByteString::cast(object)->
2440 SeqTwoByteStringSize(map->instance_type());
2441 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2442 map, slot, object, object_size);
2446 static inline bool IsShortcutCandidate(int type) {
2447 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2450 static inline void EvacuateShortcutCandidate(Map* map,
2452 HeapObject* object) {
2453 ASSERT(IsShortcutCandidate(map->instance_type()));
2455 Heap* heap = map->GetHeap();
2457 if (marks_handling == IGNORE_MARKS &&
2458 ConsString::cast(object)->unchecked_second() ==
2459 heap->empty_string()) {
2461 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2465 if (!heap->InNewSpace(first)) {
2466 object->set_map_word(MapWord::FromForwardingAddress(first));
2470 MapWord first_word = first->map_word();
2471 if (first_word.IsForwardingAddress()) {
2472 HeapObject* target = first_word.ToForwardingAddress();
2475 object->set_map_word(MapWord::FromForwardingAddress(target));
2479 heap->DoScavengeObject(first->map(), slot, first);
2480 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2484 int object_size = ConsString::kSize;
2485 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2486 map, slot, object, object_size);
2489 template<ObjectContents object_contents>
2490 class ObjectEvacuationStrategy {
2492 template<int object_size>
2493 static inline void VisitSpecialized(Map* map,
2495 HeapObject* object) {
2496 EvacuateObject<object_contents, kObjectAlignment>(
2497 map, slot, object, object_size);
2500 static inline void Visit(Map* map,
2502 HeapObject* object) {
2503 int object_size = map->instance_size();
2504 EvacuateObject<object_contents, kObjectAlignment>(
2505 map, slot, object, object_size);
2509 static VisitorDispatchTable<ScavengingCallback> table_;
2513 template<MarksHandling marks_handling,
2514 LoggingAndProfiling logging_and_profiling_mode>
2515 VisitorDispatchTable<ScavengingCallback>
2516 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2519 static void InitializeScavengingVisitorsTables() {
2520 ScavengingVisitor<TRANSFER_MARKS,
2521 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2522 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2523 ScavengingVisitor<TRANSFER_MARKS,
2524 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2525 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2529 void Heap::SelectScavengingVisitorsTable() {
2530 bool logging_and_profiling =
2531 isolate()->logger()->is_logging() ||
2532 isolate()->cpu_profiler()->is_profiling() ||
2533 (isolate()->heap_profiler() != NULL &&
2534 isolate()->heap_profiler()->is_tracking_object_moves());
2536 if (!incremental_marking()->IsMarking()) {
2537 if (!logging_and_profiling) {
2538 scavenging_visitors_table_.CopyFrom(
2539 ScavengingVisitor<IGNORE_MARKS,
2540 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2542 scavenging_visitors_table_.CopyFrom(
2543 ScavengingVisitor<IGNORE_MARKS,
2544 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2547 if (!logging_and_profiling) {
2548 scavenging_visitors_table_.CopyFrom(
2549 ScavengingVisitor<TRANSFER_MARKS,
2550 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2552 scavenging_visitors_table_.CopyFrom(
2553 ScavengingVisitor<TRANSFER_MARKS,
2554 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2557 if (incremental_marking()->IsCompacting()) {
2558 // When compacting forbid short-circuiting of cons-strings.
2559 // Scavenging code relies on the fact that new space object
2560 // can't be evacuated into evacuation candidate but
2561 // short-circuiting violates this assumption.
2562 scavenging_visitors_table_.Register(
2563 StaticVisitorBase::kVisitShortcutCandidate,
2564 scavenging_visitors_table_.GetVisitorById(
2565 StaticVisitorBase::kVisitConsString));
2571 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2572 SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2573 MapWord first_word = object->map_word();
2574 SLOW_ASSERT(!first_word.IsForwardingAddress());
2575 Map* map = first_word.ToMap();
2576 map->GetHeap()->DoScavengeObject(map, p, object);
2580 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2581 int instance_size) {
2583 MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2584 if (!maybe_result->ToObject(&result)) return maybe_result;
2586 // Map::cast cannot be used due to uninitialized map field.
2587 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2588 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2589 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2590 reinterpret_cast<Map*>(result)->set_visitor_id(
2591 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2592 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2593 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2594 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2595 reinterpret_cast<Map*>(result)->set_bit_field(0);
2596 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2597 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2598 Map::OwnsDescriptors::encode(true);
2599 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2604 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2606 ElementsKind elements_kind) {
2608 MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2609 if (!maybe_result->To(&result)) return maybe_result;
2611 Map* map = reinterpret_cast<Map*>(result);
2612 map->set_map_no_write_barrier(meta_map());
2613 map->set_instance_type(instance_type);
2614 map->set_visitor_id(
2615 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2616 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2617 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2618 map->set_instance_size(instance_size);
2619 map->set_inobject_properties(0);
2620 map->set_pre_allocated_property_fields(0);
2621 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2622 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2623 SKIP_WRITE_BARRIER);
2624 map->init_back_pointer(undefined_value());
2625 map->set_unused_property_fields(0);
2626 map->set_instance_descriptors(empty_descriptor_array());
2627 map->set_bit_field(0);
2628 map->set_bit_field2(1 << Map::kIsExtensible);
2629 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2630 Map::OwnsDescriptors::encode(true);
2631 map->set_bit_field3(bit_field3);
2632 map->set_elements_kind(elements_kind);
2638 MaybeObject* Heap::AllocateCodeCache() {
2639 CodeCache* code_cache;
2640 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2641 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2643 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2644 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2649 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2650 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2654 MaybeObject* Heap::AllocateAccessorPair() {
2655 AccessorPair* accessors;
2656 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2657 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2659 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2660 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2661 accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2666 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2667 TypeFeedbackInfo* info;
2668 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2669 if (!maybe_info->To(&info)) return maybe_info;
2671 info->initialize_storage();
2672 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2673 SKIP_WRITE_BARRIER);
2678 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2679 AliasedArgumentsEntry* entry;
2680 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2681 if (!maybe_entry->To(&entry)) return maybe_entry;
2683 entry->set_aliased_context_slot(aliased_context_slot);
2688 const Heap::StringTypeTable Heap::string_type_table[] = {
2689 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2690 {type, size, k##camel_name##MapRootIndex},
2691 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2692 #undef STRING_TYPE_ELEMENT
2696 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2697 #define CONSTANT_STRING_ELEMENT(name, contents) \
2698 {contents, k##name##RootIndex},
2699 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2700 #undef CONSTANT_STRING_ELEMENT
2704 const Heap::StructTable Heap::struct_table[] = {
2705 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2706 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2707 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2708 #undef STRUCT_TABLE_ELEMENT
2712 bool Heap::CreateInitialMaps() {
2714 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2715 if (!maybe_obj->ToObject(&obj)) return false;
2717 // Map::cast cannot be used due to uninitialized map field.
2718 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2719 set_meta_map(new_meta_map);
2720 new_meta_map->set_map(new_meta_map);
2722 { MaybeObject* maybe_obj =
2723 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2724 if (!maybe_obj->ToObject(&obj)) return false;
2726 set_fixed_array_map(Map::cast(obj));
2728 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2729 if (!maybe_obj->ToObject(&obj)) return false;
2731 set_oddball_map(Map::cast(obj));
2733 { MaybeObject* maybe_obj =
2734 AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2735 if (!maybe_obj->ToObject(&obj)) return false;
2737 set_constant_pool_array_map(Map::cast(obj));
2739 // Allocate the empty array.
2740 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2741 if (!maybe_obj->ToObject(&obj)) return false;
2743 set_empty_fixed_array(FixedArray::cast(obj));
2745 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2746 if (!maybe_obj->ToObject(&obj)) return false;
2748 set_null_value(Oddball::cast(obj));
2749 Oddball::cast(obj)->set_kind(Oddball::kNull);
2751 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2752 if (!maybe_obj->ToObject(&obj)) return false;
2754 set_undefined_value(Oddball::cast(obj));
2755 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2756 ASSERT(!InNewSpace(undefined_value()));
2758 // Allocate the empty descriptor array.
2759 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2760 if (!maybe_obj->ToObject(&obj)) return false;
2762 set_empty_descriptor_array(DescriptorArray::cast(obj));
2764 // Allocate the constant pool array.
2765 { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
2766 if (!maybe_obj->ToObject(&obj)) return false;
2768 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2770 // Fix the instance_descriptors for the existing maps.
2771 meta_map()->set_code_cache(empty_fixed_array());
2772 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2773 meta_map()->init_back_pointer(undefined_value());
2774 meta_map()->set_instance_descriptors(empty_descriptor_array());
2776 fixed_array_map()->set_code_cache(empty_fixed_array());
2777 fixed_array_map()->set_dependent_code(
2778 DependentCode::cast(empty_fixed_array()));
2779 fixed_array_map()->init_back_pointer(undefined_value());
2780 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2782 oddball_map()->set_code_cache(empty_fixed_array());
2783 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2784 oddball_map()->init_back_pointer(undefined_value());
2785 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2787 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2788 constant_pool_array_map()->set_dependent_code(
2789 DependentCode::cast(empty_fixed_array()));
2790 constant_pool_array_map()->init_back_pointer(undefined_value());
2791 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2793 // Fix prototype object for existing maps.
2794 meta_map()->set_prototype(null_value());
2795 meta_map()->set_constructor(null_value());
2797 fixed_array_map()->set_prototype(null_value());
2798 fixed_array_map()->set_constructor(null_value());
2800 oddball_map()->set_prototype(null_value());
2801 oddball_map()->set_constructor(null_value());
2803 constant_pool_array_map()->set_prototype(null_value());
2804 constant_pool_array_map()->set_constructor(null_value());
2807 #define ALLOCATE_MAP(instance_type, size, field_name) \
2809 if (!AllocateMap((instance_type), size)->To(&map)) return false; \
2810 set_##field_name##_map(map); \
2813 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2814 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2816 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2817 ASSERT(fixed_array_map() != fixed_cow_array_map());
2819 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2820 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2821 ALLOCATE_MAP(FLOAT32x4_TYPE, Float32x4::kSize, float32x4)
2822 ALLOCATE_MAP(INT32x4_TYPE, Int32x4::kSize, int32x4)
2823 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2824 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2826 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2827 const StringTypeTable& entry = string_type_table[i];
2828 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2829 if (!maybe_obj->ToObject(&obj)) return false;
2831 roots_[entry.index] = Map::cast(obj);
2834 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2835 undetectable_string_map()->set_is_undetectable();
2837 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2838 undetectable_ascii_string_map()->set_is_undetectable();
2840 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2841 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2842 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2844 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2845 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2846 external_##type##_array)
2848 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2849 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2851 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2852 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
2853 fixed_##type##_array)
2855 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2856 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2858 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
2860 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2862 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2863 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2864 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2865 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2868 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2869 const StructTable& entry = struct_table[i];
2871 if (!AllocateMap(entry.type, entry.size)->To(&map))
2873 roots_[entry.index] = map;
2876 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2878 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2879 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2880 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2881 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2882 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2883 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2885 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2886 native_context_map()->set_dictionary_map(true);
2887 native_context_map()->set_visitor_id(
2888 StaticVisitorBase::kVisitNativeContext);
2890 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2891 shared_function_info)
2893 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2895 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2897 external_map()->set_is_extensible(false);
2898 #undef ALLOCATE_VARSIZE_MAP
2903 { ByteArray* byte_array;
2904 if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
2905 set_empty_byte_array(byte_array);
2908 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2909 { ExternalArray* obj; \
2910 if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj)) \
2912 set_empty_external_##type##_array(obj); \
2915 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2916 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2918 ASSERT(!InNewSpace(empty_fixed_array()));
2923 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2924 // Statically ensure that it is safe to allocate heap numbers in paged
2926 int size = HeapNumber::kSize;
2927 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2929 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2932 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2933 if (!maybe_result->ToObject(&result)) return maybe_result;
2936 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2937 HeapNumber::cast(result)->set_value(value);
2942 MaybeObject* Heap::AllocateFloat32x4(float32x4_value_t value,
2943 PretenureFlag pretenure) {
2944 // Statically ensure that it is safe to allocate float32x4 objects in paged
2946 int size = Float32x4::kSize;
2947 STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
2949 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2952 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2953 if (!maybe_result->ToObject(&result)) return maybe_result;
2956 HeapObject::cast(result)->set_map_no_write_barrier(float32x4_map());
2957 Float32x4::cast(result)->set_value(value);
2962 MaybeObject* Heap::AllocateInt32x4(int32x4_value_t value,
2963 PretenureFlag pretenure) {
2964 // Statically ensure that it is safe to allocate int32x4 objects in paged
2966 int size = Int32x4::kSize;
2967 STATIC_ASSERT(Int32x4::kSize <= Page::kMaxRegularHeapObjectSize);
2969 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2972 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2973 if (!maybe_result->ToObject(&result)) return maybe_result;
2976 HeapObject::cast(result)->set_map_no_write_barrier(int32x4_map());
2977 Int32x4::cast(result)->set_value(value);
2982 MaybeObject* Heap::AllocateCell(Object* value) {
2983 int size = Cell::kSize;
2984 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2987 { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2988 if (!maybe_result->ToObject(&result)) return maybe_result;
2990 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2991 Cell::cast(result)->set_value(value);
2996 MaybeObject* Heap::AllocatePropertyCell() {
2997 int size = PropertyCell::kSize;
2998 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
3001 MaybeObject* maybe_result =
3002 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
3003 if (!maybe_result->ToObject(&result)) return maybe_result;
3005 HeapObject::cast(result)->set_map_no_write_barrier(
3006 global_property_cell_map());
3007 PropertyCell* cell = PropertyCell::cast(result);
3008 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
3009 SKIP_WRITE_BARRIER);
3010 cell->set_value(the_hole_value());
3011 cell->set_type(HeapType::None());
3016 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
3018 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
3019 if (!maybe_result->To(&result)) return maybe_result;
3020 result->set_value(value);
3025 MaybeObject* Heap::AllocateAllocationSite() {
3026 AllocationSite* site;
3027 MaybeObject* maybe_result = Allocate(allocation_site_map(),
3029 if (!maybe_result->To(&site)) return maybe_result;
3033 site->set_weak_next(allocation_sites_list());
3034 set_allocation_sites_list(site);
3039 MaybeObject* Heap::CreateOddball(const char* to_string,
3043 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
3044 if (!maybe_result->ToObject(&result)) return maybe_result;
3046 return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3050 bool Heap::CreateApiObjects() {
3053 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3054 if (!maybe_obj->ToObject(&obj)) return false;
3056 // Don't use Smi-only elements optimizations for objects with the neander
3057 // map. There are too many cases where element values are set directly with a
3058 // bottleneck to trap the Smi-only -> fast elements transition, and there
3059 // appears to be no benefit for optimize this case.
3060 Map* new_neander_map = Map::cast(obj);
3061 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3062 set_neander_map(new_neander_map);
3064 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3065 if (!maybe_obj->ToObject(&obj)) return false;
3068 { MaybeObject* maybe_elements = AllocateFixedArray(2);
3069 if (!maybe_elements->ToObject(&elements)) return false;
3071 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3072 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3073 set_message_listeners(JSObject::cast(obj));
3079 void Heap::CreateJSEntryStub() {
3081 set_js_entry_code(*stub.GetCode(isolate()));
3085 void Heap::CreateJSConstructEntryStub() {
3086 JSConstructEntryStub stub;
3087 set_js_construct_entry_code(*stub.GetCode(isolate()));
3091 void Heap::CreateFixedStubs() {
3092 // Here we create roots for fixed stubs. They are needed at GC
3093 // for cooking and uncooking (check out frames.cc).
3094 // The eliminates the need for doing dictionary lookup in the
3095 // stub cache for these stubs.
3096 HandleScope scope(isolate());
3097 // gcc-4.4 has problem generating correct code of following snippet:
3098 // { JSEntryStub stub;
3099 // js_entry_code_ = *stub.GetCode();
3101 // { JSConstructEntryStub stub;
3102 // js_construct_entry_code_ = *stub.GetCode();
3104 // To workaround the problem, make separate functions without inlining.
3105 Heap::CreateJSEntryStub();
3106 Heap::CreateJSConstructEntryStub();
3108 // Create stubs that should be there, so we don't unexpectedly have to
3109 // create them if we need them during the creation of another stub.
3110 // Stub creation mixes raw pointers and handles in an unsafe manner so
3111 // we cannot create stubs while we are creating stubs.
3112 CodeStub::GenerateStubsAheadOfTime(isolate());
3116 bool Heap::CreateInitialObjects() {
3119 // The -0 value must be set before NumberFromDouble works.
3120 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3121 if (!maybe_obj->ToObject(&obj)) return false;
3123 set_minus_zero_value(HeapNumber::cast(obj));
3124 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3126 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3127 if (!maybe_obj->ToObject(&obj)) return false;
3129 set_nan_value(HeapNumber::cast(obj));
3131 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3132 if (!maybe_obj->ToObject(&obj)) return false;
3134 set_infinity_value(HeapNumber::cast(obj));
3136 // The hole has not been created yet, but we want to put something
3137 // predictable in the gaps in the string table, so lets make that Smi zero.
3138 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3140 // Allocate initial string table.
3141 { MaybeObject* maybe_obj =
3142 StringTable::Allocate(this, kInitialStringTableSize);
3143 if (!maybe_obj->ToObject(&obj)) return false;
3145 // Don't use set_string_table() due to asserts.
3146 roots_[kStringTableRootIndex] = obj;
3148 // Finish initializing oddballs after creating the string table.
3149 { MaybeObject* maybe_obj =
3150 undefined_value()->Initialize(this,
3153 Oddball::kUndefined);
3154 if (!maybe_obj->ToObject(&obj)) return false;
3157 // Initialize the null_value.
3158 { MaybeObject* maybe_obj = null_value()->Initialize(
3159 this, "null", Smi::FromInt(0), Oddball::kNull);
3160 if (!maybe_obj->ToObject(&obj)) return false;
3163 { MaybeObject* maybe_obj = CreateOddball("true",
3166 if (!maybe_obj->ToObject(&obj)) return false;
3168 set_true_value(Oddball::cast(obj));
3170 { MaybeObject* maybe_obj = CreateOddball("false",
3173 if (!maybe_obj->ToObject(&obj)) return false;
3175 set_false_value(Oddball::cast(obj));
3177 { MaybeObject* maybe_obj = CreateOddball("hole",
3180 if (!maybe_obj->ToObject(&obj)) return false;
3182 set_the_hole_value(Oddball::cast(obj));
3184 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3186 Oddball::kUninitialized);
3187 if (!maybe_obj->ToObject(&obj)) return false;
3189 set_uninitialized_value(Oddball::cast(obj));
3191 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3193 Oddball::kArgumentMarker);
3194 if (!maybe_obj->ToObject(&obj)) return false;
3196 set_arguments_marker(Oddball::cast(obj));
3198 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3201 if (!maybe_obj->ToObject(&obj)) return false;
3203 set_no_interceptor_result_sentinel(obj);
3205 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3208 if (!maybe_obj->ToObject(&obj)) return false;
3210 set_termination_exception(obj);
3212 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3213 { MaybeObject* maybe_obj =
3214 InternalizeUtf8String(constant_string_table[i].contents);
3215 if (!maybe_obj->ToObject(&obj)) return false;
3217 roots_[constant_string_table[i].index] = String::cast(obj);
3220 // Allocate the hidden string which is used to identify the hidden properties
3221 // in JSObjects. The hash code has a special value so that it will not match
3222 // the empty string when searching for the property. It cannot be part of the
3223 // loop above because it needs to be allocated manually with the special
3224 // hash code in place. The hash code for the hidden_string is zero to ensure
3225 // that it will always be at the first entry in property descriptors.
3226 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3227 OneByteVector("", 0), String::kEmptyStringHash);
3228 if (!maybe_obj->ToObject(&obj)) return false;
3230 hidden_string_ = String::cast(obj);
3232 // Allocate the code_stubs dictionary. The initial size is set to avoid
3233 // expanding the dictionary during bootstrapping.
3234 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3235 if (!maybe_obj->ToObject(&obj)) return false;
3237 set_code_stubs(UnseededNumberDictionary::cast(obj));
3240 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3241 // is set to avoid expanding the dictionary during bootstrapping.
3242 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3243 if (!maybe_obj->ToObject(&obj)) return false;
3245 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3247 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3248 if (!maybe_obj->ToObject(&obj)) return false;
3250 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3252 set_instanceof_cache_function(Smi::FromInt(0));
3253 set_instanceof_cache_map(Smi::FromInt(0));
3254 set_instanceof_cache_answer(Smi::FromInt(0));
3258 // Allocate the dictionary of intrinsic function names.
3259 { MaybeObject* maybe_obj =
3260 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3261 if (!maybe_obj->ToObject(&obj)) return false;
3263 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3265 if (!maybe_obj->ToObject(&obj)) return false;
3267 set_intrinsic_function_names(NameDictionary::cast(obj));
3269 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3270 if (!maybe_obj->ToObject(&obj)) return false;
3272 set_number_string_cache(FixedArray::cast(obj));
3274 // Allocate cache for single character one byte strings.
3275 { MaybeObject* maybe_obj =
3276 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3277 if (!maybe_obj->ToObject(&obj)) return false;
3279 set_single_character_string_cache(FixedArray::cast(obj));
3281 // Allocate cache for string split.
3282 { MaybeObject* maybe_obj = AllocateFixedArray(
3283 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3284 if (!maybe_obj->ToObject(&obj)) return false;
3286 set_string_split_cache(FixedArray::cast(obj));
3288 { MaybeObject* maybe_obj = AllocateFixedArray(
3289 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3290 if (!maybe_obj->ToObject(&obj)) return false;
3292 set_regexp_multiple_cache(FixedArray::cast(obj));
3294 // Allocate cache for external strings pointing to native source code.
3295 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3296 if (!maybe_obj->ToObject(&obj)) return false;
3298 set_natives_source_cache(FixedArray::cast(obj));
3300 { MaybeObject* maybe_obj = AllocateCell(undefined_value());
3301 if (!maybe_obj->ToObject(&obj)) return false;
3303 set_undefined_cell(Cell::cast(obj));
3305 // Allocate object to hold object observation state.
3306 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3307 if (!maybe_obj->ToObject(&obj)) return false;
3309 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3310 if (!maybe_obj->ToObject(&obj)) return false;
3312 set_observation_state(JSObject::cast(obj));
3314 { MaybeObject* maybe_obj = AllocateSymbol();
3315 if (!maybe_obj->ToObject(&obj)) return false;
3317 Symbol::cast(obj)->set_is_private(true);
3318 set_frozen_symbol(Symbol::cast(obj));
3320 { MaybeObject* maybe_obj = AllocateSymbol();
3321 if (!maybe_obj->ToObject(&obj)) return false;
3323 Symbol::cast(obj)->set_is_private(true);
3324 set_elements_transition_symbol(Symbol::cast(obj));
3326 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3327 if (!maybe_obj->ToObject(&obj)) return false;
3329 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3330 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3332 { MaybeObject* maybe_obj = AllocateSymbol();
3333 if (!maybe_obj->ToObject(&obj)) return false;
3335 Symbol::cast(obj)->set_is_private(true);
3336 set_observed_symbol(Symbol::cast(obj));
3338 { MaybeObject* maybe_obj = AllocateFixedArray(0, TENURED);
3339 if (!maybe_obj->ToObject(&obj)) return false;
3341 set_materialized_objects(FixedArray::cast(obj));
3343 // Handling of script id generation is in Factory::NewScript.
3344 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3346 { MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
3347 if (!maybe_obj->ToObject(&obj)) return false;
3349 set_allocation_sites_scratchpad(FixedArray::cast(obj));
3350 InitializeAllocationSitesScratchpad();
3352 // Initialize keyed lookup cache.
3353 isolate_->keyed_lookup_cache()->Clear();
3355 // Initialize context slot cache.
3356 isolate_->context_slot_cache()->Clear();
3358 // Initialize descriptor cache.
3359 isolate_->descriptor_lookup_cache()->Clear();
3361 // Initialize compilation cache.
3362 isolate_->compilation_cache()->Clear();
3368 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3369 RootListIndex writable_roots[] = {
3370 kStoreBufferTopRootIndex,
3371 kStackLimitRootIndex,
3372 kNumberStringCacheRootIndex,
3373 kInstanceofCacheFunctionRootIndex,
3374 kInstanceofCacheMapRootIndex,
3375 kInstanceofCacheAnswerRootIndex,
3376 kCodeStubsRootIndex,
3377 kNonMonomorphicCacheRootIndex,
3378 kPolymorphicCodeCacheRootIndex,
3379 kLastScriptIdRootIndex,
3380 kEmptyScriptRootIndex,
3381 kRealStackLimitRootIndex,
3382 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3383 kConstructStubDeoptPCOffsetRootIndex,
3384 kGetterStubDeoptPCOffsetRootIndex,
3385 kSetterStubDeoptPCOffsetRootIndex,
3386 kStringTableRootIndex,
3389 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3390 if (root_index == writable_roots[i])
3397 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3398 return !RootCanBeWrittenAfterInitialization(root_index) &&
3399 !InNewSpace(roots_array_start()[root_index]);
3403 Object* RegExpResultsCache::Lookup(Heap* heap,
3405 Object* key_pattern,
3406 ResultsCacheType type) {
3408 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3409 if (type == STRING_SPLIT_SUBSTRINGS) {
3410 ASSERT(key_pattern->IsString());
3411 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3412 cache = heap->string_split_cache();
3414 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3415 ASSERT(key_pattern->IsFixedArray());
3416 cache = heap->regexp_multiple_cache();
3419 uint32_t hash = key_string->Hash();
3420 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3421 ~(kArrayEntriesPerCacheEntry - 1));
3422 if (cache->get(index + kStringOffset) == key_string &&
3423 cache->get(index + kPatternOffset) == key_pattern) {
3424 return cache->get(index + kArrayOffset);
3427 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3428 if (cache->get(index + kStringOffset) == key_string &&
3429 cache->get(index + kPatternOffset) == key_pattern) {
3430 return cache->get(index + kArrayOffset);
3432 return Smi::FromInt(0);
3436 void RegExpResultsCache::Enter(Heap* heap,
3438 Object* key_pattern,
3439 FixedArray* value_array,
3440 ResultsCacheType type) {
3442 if (!key_string->IsInternalizedString()) return;
3443 if (type == STRING_SPLIT_SUBSTRINGS) {
3444 ASSERT(key_pattern->IsString());
3445 if (!key_pattern->IsInternalizedString()) return;
3446 cache = heap->string_split_cache();
3448 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3449 ASSERT(key_pattern->IsFixedArray());
3450 cache = heap->regexp_multiple_cache();
3453 uint32_t hash = key_string->Hash();
3454 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3455 ~(kArrayEntriesPerCacheEntry - 1));
3456 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3457 cache->set(index + kStringOffset, key_string);
3458 cache->set(index + kPatternOffset, key_pattern);
3459 cache->set(index + kArrayOffset, value_array);
3462 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3463 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3464 cache->set(index2 + kStringOffset, key_string);
3465 cache->set(index2 + kPatternOffset, key_pattern);
3466 cache->set(index2 + kArrayOffset, value_array);
3468 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3469 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3470 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3471 cache->set(index + kStringOffset, key_string);
3472 cache->set(index + kPatternOffset, key_pattern);
3473 cache->set(index + kArrayOffset, value_array);
3476 // If the array is a reasonably short list of substrings, convert it into a
3477 // list of internalized strings.
3478 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3479 for (int i = 0; i < value_array->length(); i++) {
3480 String* str = String::cast(value_array->get(i));
3481 Object* internalized_str;
3482 MaybeObject* maybe_string = heap->InternalizeString(str);
3483 if (maybe_string->ToObject(&internalized_str)) {
3484 value_array->set(i, internalized_str);
3488 // Convert backing store to a copy-on-write array.
3489 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3493 void RegExpResultsCache::Clear(FixedArray* cache) {
3494 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3495 cache->set(i, Smi::FromInt(0));
3500 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3501 MaybeObject* maybe_obj =
3502 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3507 int Heap::FullSizeNumberStringCacheLength() {
3508 // Compute the size of the number string cache based on the max newspace size.
3509 // The number string cache has a minimum size based on twice the initial cache
3510 // size to ensure that it is bigger after being made 'full size'.
3511 int number_string_cache_size = max_semispace_size_ / 512;
3512 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3513 Min(0x4000, number_string_cache_size));
3514 // There is a string and a number per entry so the length is twice the number
3516 return number_string_cache_size * 2;
3520 void Heap::AllocateFullSizeNumberStringCache() {
3521 // The idea is to have a small number string cache in the snapshot to keep
3522 // boot-time memory usage down. If we expand the number string cache already
3523 // while creating the snapshot then that didn't work out.
3524 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3525 MaybeObject* maybe_obj =
3526 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3528 if (maybe_obj->ToObject(&new_cache)) {
3529 // We don't bother to repopulate the cache with entries from the old cache.
3530 // It will be repopulated soon enough with new strings.
3531 set_number_string_cache(FixedArray::cast(new_cache));
3533 // If allocation fails then we just return without doing anything. It is only
3534 // a cache, so best effort is OK here.
3538 void Heap::FlushNumberStringCache() {
3539 // Flush the number to string cache.
3540 int len = number_string_cache()->length();
3541 for (int i = 0; i < len; i++) {
3542 number_string_cache()->set_undefined(i);
3547 static inline int double_get_hash(double d) {
3548 DoubleRepresentation rep(d);
3549 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3553 static inline int smi_get_hash(Smi* smi) {
3554 return smi->value();
3558 Object* Heap::GetNumberStringCache(Object* number) {
3560 int mask = (number_string_cache()->length() >> 1) - 1;
3561 if (number->IsSmi()) {
3562 hash = smi_get_hash(Smi::cast(number)) & mask;
3564 hash = double_get_hash(number->Number()) & mask;
3566 Object* key = number_string_cache()->get(hash * 2);
3567 if (key == number) {
3568 return String::cast(number_string_cache()->get(hash * 2 + 1));
3569 } else if (key->IsHeapNumber() &&
3570 number->IsHeapNumber() &&
3571 key->Number() == number->Number()) {
3572 return String::cast(number_string_cache()->get(hash * 2 + 1));
3574 return undefined_value();
3578 void Heap::SetNumberStringCache(Object* number, String* string) {
3580 int mask = (number_string_cache()->length() >> 1) - 1;
3581 if (number->IsSmi()) {
3582 hash = smi_get_hash(Smi::cast(number)) & mask;
3584 hash = double_get_hash(number->Number()) & mask;
3586 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3587 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3588 // The first time we have a hash collision, we move to the full sized
3589 // number string cache.
3590 AllocateFullSizeNumberStringCache();
3593 number_string_cache()->set(hash * 2, number);
3594 number_string_cache()->set(hash * 2 + 1, string);
3598 MaybeObject* Heap::NumberToString(Object* number,
3599 bool check_number_string_cache) {
3600 isolate_->counters()->number_to_string_runtime()->Increment();
3601 if (check_number_string_cache) {
3602 Object* cached = GetNumberStringCache(number);
3603 if (cached != undefined_value()) {
3609 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3611 if (number->IsSmi()) {
3612 int num = Smi::cast(number)->value();
3613 str = IntToCString(num, buffer);
3615 double num = HeapNumber::cast(number)->value();
3616 str = DoubleToCString(num, buffer);
3621 // We tenure the allocated string since it is referenced from the
3622 // number-string cache which lives in the old space.
3623 MaybeObject* maybe_js_string =
3624 AllocateStringFromOneByte(CStrVector(str), TENURED);
3625 if (maybe_js_string->ToObject(&js_string)) {
3626 SetNumberStringCache(number, String::cast(js_string));
3628 return maybe_js_string;
3632 MaybeObject* Heap::Uint32ToString(uint32_t value,
3633 bool check_number_string_cache) {
3635 MaybeObject* maybe = NumberFromUint32(value);
3636 if (!maybe->To<Object>(&number)) return maybe;
3637 return NumberToString(number, check_number_string_cache);
3641 MaybeObject* Heap::AllocateAllocationSitesScratchpad() {
3642 MaybeObject* maybe_obj =
3643 AllocateFixedArray(kAllocationSiteScratchpadSize, TENURED);
3648 void Heap::FlushAllocationSitesScratchpad() {
3649 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3650 allocation_sites_scratchpad()->set_undefined(i);
3652 allocation_sites_scratchpad_length_ = 0;
3656 void Heap::InitializeAllocationSitesScratchpad() {
3657 ASSERT(allocation_sites_scratchpad()->length() ==
3658 kAllocationSiteScratchpadSize);
3659 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3660 allocation_sites_scratchpad()->set_undefined(i);
3665 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
3666 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3667 allocation_sites_scratchpad()->set(
3668 allocation_sites_scratchpad_length_, site);
3669 allocation_sites_scratchpad_length_++;
3674 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3675 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3679 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3680 ExternalArrayType array_type) {
3681 switch (array_type) {
3682 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3683 case kExternal##Type##Array: \
3684 return kExternal##Type##ArrayMapRootIndex;
3686 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3687 #undef ARRAY_TYPE_TO_ROOT_INDEX
3691 return kUndefinedValueRootIndex;
3696 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3697 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3701 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3702 ExternalArrayType array_type) {
3703 switch (array_type) {
3704 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3705 case kExternal##Type##Array: \
3706 return kFixed##Type##ArrayMapRootIndex;
3708 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3709 #undef ARRAY_TYPE_TO_ROOT_INDEX
3713 return kUndefinedValueRootIndex;
3718 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3719 ElementsKind elementsKind) {
3720 switch (elementsKind) {
3721 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3722 case EXTERNAL_##TYPE##_ELEMENTS: \
3723 return kEmptyExternal##Type##ArrayRootIndex;
3725 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3726 #undef ELEMENT_KIND_TO_ROOT_INDEX
3730 return kUndefinedValueRootIndex;
3735 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3736 return ExternalArray::cast(
3737 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3741 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3742 // We need to distinguish the minus zero value and this cannot be
3743 // done after conversion to int. Doing this by comparing bit
3744 // patterns is faster than using fpclassify() et al.
3745 if (IsMinusZero(value)) {
3746 return AllocateHeapNumber(-0.0, pretenure);
3749 int int_value = FastD2I(value);
3750 if (value == int_value && Smi::IsValid(int_value)) {
3751 return Smi::FromInt(int_value);
3754 // Materialize the value in the heap.
3755 return AllocateHeapNumber(value, pretenure);
3759 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3760 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3761 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3762 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3764 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3765 if (!maybe_result->To(&result)) return maybe_result;
3766 result->set_foreign_address(address);
3771 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3772 SharedFunctionInfo* share;
3773 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3774 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3776 // Set pointer fields.
3777 share->set_name(name);
3778 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3779 share->set_code(illegal);
3780 share->set_optimized_code_map(Smi::FromInt(0));
3781 share->set_scope_info(ScopeInfo::Empty(isolate_));
3782 Code* construct_stub =
3783 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3784 share->set_construct_stub(construct_stub);
3785 share->set_instance_class_name(Object_string());
3786 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3787 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3788 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3789 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3790 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3791 share->set_ast_node_count(0);
3792 share->set_counters(0);
3794 // Set integer fields (smi or int, depending on the architecture).
3795 share->set_length(0);
3796 share->set_formal_parameter_count(0);
3797 share->set_expected_nof_properties(0);
3798 share->set_num_literals(0);
3799 share->set_start_position_and_type(0);
3800 share->set_end_position(0);
3801 share->set_function_token_position(0);
3802 // All compiler hints default to false or 0.
3803 share->set_compiler_hints(0);
3804 share->set_opt_count_and_bailout_reason(0);
3810 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3815 Object* stack_trace,
3816 Object* stack_frames) {
3818 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3819 if (!maybe_result->ToObject(&result)) return maybe_result;
3821 JSMessageObject* message = JSMessageObject::cast(result);
3822 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3823 message->initialize_elements();
3824 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3825 message->set_type(type);
3826 message->set_arguments(arguments);
3827 message->set_start_position(start_position);
3828 message->set_end_position(end_position);
3829 message->set_script(script);
3830 message->set_stack_trace(stack_trace);
3831 message->set_stack_frames(stack_frames);
3836 MaybeObject* Heap::AllocateExternalStringFromAscii(
3837 const ExternalAsciiString::Resource* resource) {
3838 size_t length = resource->length();
3839 if (length > static_cast<size_t>(String::kMaxLength)) {
3840 isolate()->context()->mark_out_of_memory();
3841 return Failure::OutOfMemoryException(0x5);
3844 Map* map = external_ascii_string_map();
3846 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3847 if (!maybe_result->ToObject(&result)) return maybe_result;
3850 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3851 external_string->set_length(static_cast<int>(length));
3852 external_string->set_hash_field(String::kEmptyHashField);
3853 external_string->set_resource(resource);
3859 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3860 const ExternalTwoByteString::Resource* resource) {
3861 size_t length = resource->length();
3862 if (length > static_cast<size_t>(String::kMaxLength)) {
3863 isolate()->context()->mark_out_of_memory();
3864 return Failure::OutOfMemoryException(0x6);
3867 // For small strings we check whether the resource contains only
3868 // one byte characters. If yes, we use a different string map.
3869 static const size_t kOneByteCheckLengthLimit = 32;
3870 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3871 String::IsOneByte(resource->data(), static_cast<int>(length));
3872 Map* map = is_one_byte ?
3873 external_string_with_one_byte_data_map() : external_string_map();
3875 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3876 if (!maybe_result->ToObject(&result)) return maybe_result;
3879 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3880 external_string->set_length(static_cast<int>(length));
3881 external_string->set_hash_field(String::kEmptyHashField);
3882 external_string->set_resource(resource);
3888 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3889 if (code <= String::kMaxOneByteCharCode) {
3890 Object* value = single_character_string_cache()->get(code);
3891 if (value != undefined_value()) return value;
3894 buffer[0] = static_cast<uint8_t>(code);
3896 OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
3897 MaybeObject* maybe_result = InternalizeStringWithKey(&key);
3899 if (!maybe_result->ToObject(&result)) return maybe_result;
3900 single_character_string_cache()->set(code, result);
3904 SeqTwoByteString* result;
3905 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3906 if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
3908 result->SeqTwoByteStringSet(0, code);
3913 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3914 if (length < 0 || length > ByteArray::kMaxLength) {
3915 return Failure::OutOfMemoryException(0x7);
3917 int size = ByteArray::SizeFor(length);
3918 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3920 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3921 if (!maybe_result->ToObject(&result)) return maybe_result;
3924 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3926 reinterpret_cast<ByteArray*>(result)->set_length(length);
3931 void Heap::CreateFillerObjectAt(Address addr, int size) {
3932 if (size == 0) return;
3933 HeapObject* filler = HeapObject::FromAddress(addr);
3934 if (size == kPointerSize) {
3935 filler->set_map_no_write_barrier(one_pointer_filler_map());
3936 } else if (size == 2 * kPointerSize) {
3937 filler->set_map_no_write_barrier(two_pointer_filler_map());
3939 filler->set_map_no_write_barrier(free_space_map());
3940 FreeSpace::cast(filler)->set_size(size);
3945 MaybeObject* Heap::AllocateExternalArray(int length,
3946 ExternalArrayType array_type,
3947 void* external_pointer,
3948 PretenureFlag pretenure) {
3949 int size = ExternalArray::kAlignedSize;
3950 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3952 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3953 if (!maybe_result->ToObject(&result)) return maybe_result;
3956 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3957 MapForExternalArrayType(array_type));
3958 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3959 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3965 static void ForFixedTypedArray(ExternalArrayType array_type,
3967 ElementsKind* element_kind) {
3968 switch (array_type) {
3969 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3970 case kExternal##Type##Array: \
3971 *element_size = size; \
3972 *element_kind = TYPE##_ELEMENTS; \
3975 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3976 #undef TYPED_ARRAY_CASE
3979 *element_size = 0; // Bogus
3980 *element_kind = UINT8_ELEMENTS; // Bogus
3986 MaybeObject* Heap::AllocateFixedTypedArray(int length,
3987 ExternalArrayType array_type,
3988 PretenureFlag pretenure) {
3990 ElementsKind elements_kind;
3991 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3992 int size = OBJECT_POINTER_ALIGN(
3993 length * element_size + FixedTypedArrayBase::kDataOffset);
3994 #ifndef V8_HOST_ARCH_64_BIT
3995 if (array_type == kExternalFloat64Array) {
3996 size += kPointerSize;
3999 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4002 MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
4003 if (!maybe_object->To(&object)) return maybe_object;
4005 if (array_type == kExternalFloat64Array) {
4006 object = EnsureDoubleAligned(this, object, size);
4009 FixedTypedArrayBase* elements =
4010 reinterpret_cast<FixedTypedArrayBase*>(object);
4011 elements->set_map(MapForFixedTypedArray(array_type));
4012 elements->set_length(length);
4017 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4019 Handle<Object> self_reference,
4022 int prologue_offset) {
4023 // Allocate ByteArray before the Code object, so that we do not risk
4024 // leaving uninitialized Code object (and breaking the heap).
4025 ByteArray* reloc_info;
4026 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4027 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4030 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4031 int obj_size = Code::SizeFor(body_size);
4032 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4033 MaybeObject* maybe_result;
4034 // Large code objects and code objects which should stay at a fixed address
4035 // are allocated in large object space.
4037 bool force_lo_space = obj_size > code_space()->AreaSize();
4038 if (force_lo_space) {
4039 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4041 maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4043 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4045 if (immovable && !force_lo_space &&
4046 // Objects on the first page of each space are never moved.
4047 !code_space_->FirstPage()->Contains(result->address())) {
4048 // Discard the first code allocation, which was on a page where it could be
4050 CreateFillerObjectAt(result->address(), obj_size);
4051 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4052 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4055 // Initialize the object
4056 result->set_map_no_write_barrier(code_map());
4057 Code* code = Code::cast(result);
4058 ASSERT(!isolate_->code_range()->exists() ||
4059 isolate_->code_range()->contains(code->address()));
4060 code->set_instruction_size(desc.instr_size);
4061 code->set_relocation_info(reloc_info);
4062 code->set_flags(flags);
4063 code->set_raw_kind_specific_flags1(0);
4064 code->set_raw_kind_specific_flags2(0);
4065 code->set_is_crankshafted(crankshafted);
4066 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4067 code->set_raw_type_feedback_info(undefined_value());
4068 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4069 code->set_gc_metadata(Smi::FromInt(0));
4070 code->set_ic_age(global_ic_age_);
4071 code->set_prologue_offset(prologue_offset);
4072 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4073 code->set_marked_for_deoptimization(false);
4075 code->set_constant_pool(empty_constant_pool_array());
4077 #ifdef ENABLE_DEBUGGER_SUPPORT
4078 if (code->kind() == Code::FUNCTION) {
4079 code->set_has_debug_break_slots(
4080 isolate_->debugger()->IsDebuggerActive());
4084 // Allow self references to created code object by patching the handle to
4085 // point to the newly allocated Code object.
4086 if (!self_reference.is_null()) {
4087 *(self_reference.location()) = code;
4089 // Migrate generated code.
4090 // The generated code can contain Object** values (typically from handles)
4091 // that are dereferenced during the copy to point directly to the actual heap
4092 // objects. These pointers can include references to the code object itself,
4093 // through the self_reference parameter.
4094 code->CopyFrom(desc);
4097 if (FLAG_verify_heap) {
4105 MaybeObject* Heap::CopyCode(Code* code) {
4106 // Allocate an object the same size as the code object.
4107 int obj_size = code->Size();
4108 MaybeObject* maybe_result;
4109 if (obj_size > code_space()->AreaSize()) {
4110 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4112 maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4116 if (!maybe_result->ToObject(&result)) return maybe_result;
4118 // Copy code object.
4119 Address old_addr = code->address();
4120 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4121 CopyBlock(new_addr, old_addr, obj_size);
4122 // Relocate the copy.
4123 Code* new_code = Code::cast(result);
4124 ASSERT(!isolate_->code_range()->exists() ||
4125 isolate_->code_range()->contains(code->address()));
4126 new_code->Relocate(new_addr - old_addr);
4131 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4132 // Allocate ByteArray before the Code object, so that we do not risk
4133 // leaving uninitialized Code object (and breaking the heap).
4134 Object* reloc_info_array;
4135 { MaybeObject* maybe_reloc_info_array =
4136 AllocateByteArray(reloc_info.length(), TENURED);
4137 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4138 return maybe_reloc_info_array;
4142 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4144 int new_obj_size = Code::SizeFor(new_body_size);
4146 Address old_addr = code->address();
4148 size_t relocation_offset =
4149 static_cast<size_t>(code->instruction_end() - old_addr);
4151 MaybeObject* maybe_result;
4152 if (new_obj_size > code_space()->AreaSize()) {
4153 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4155 maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4159 if (!maybe_result->ToObject(&result)) return maybe_result;
4161 // Copy code object.
4162 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4164 // Copy header and instructions.
4165 CopyBytes(new_addr, old_addr, relocation_offset);
4167 Code* new_code = Code::cast(result);
4168 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4170 // Copy patched rinfo.
4171 CopyBytes(new_code->relocation_start(),
4173 static_cast<size_t>(reloc_info.length()));
4175 // Relocate the copy.
4176 ASSERT(!isolate_->code_range()->exists() ||
4177 isolate_->code_range()->contains(code->address()));
4178 new_code->Relocate(new_addr - old_addr);
4181 if (FLAG_verify_heap) {
4189 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4190 AllocationSite* allocation_site) {
4191 memento->set_map_no_write_barrier(allocation_memento_map());
4192 ASSERT(allocation_site->map() == allocation_site_map());
4193 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4194 if (FLAG_allocation_site_pretenuring) {
4195 allocation_site->IncrementMementoCreateCount();
4200 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4201 Handle<AllocationSite> allocation_site) {
4202 ASSERT(gc_state_ == NOT_IN_GC);
4203 ASSERT(map->instance_type() != MAP_TYPE);
4204 // If allocation failures are disallowed, we may allocate in a different
4205 // space when new space is full and the object is not a large object.
4206 AllocationSpace retry_space =
4207 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4208 int size = map->instance_size() + AllocationMemento::kSize;
4210 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4211 if (!maybe_result->ToObject(&result)) return maybe_result;
4212 // No need for write barrier since object is white and map is in old space.
4213 HeapObject::cast(result)->set_map_no_write_barrier(map);
4214 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4215 reinterpret_cast<Address>(result) + map->instance_size());
4216 InitializeAllocationMemento(alloc_memento, *allocation_site);
4221 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4222 ASSERT(gc_state_ == NOT_IN_GC);
4223 ASSERT(map->instance_type() != MAP_TYPE);
4224 // If allocation failures are disallowed, we may allocate in a different
4225 // space when new space is full and the object is not a large object.
4226 AllocationSpace retry_space =
4227 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4228 int size = map->instance_size();
4230 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4231 if (!maybe_result->ToObject(&result)) return maybe_result;
4232 // No need for write barrier since object is white and map is in old space.
4233 HeapObject::cast(result)->set_map_no_write_barrier(map);
4238 void Heap::InitializeFunction(JSFunction* function,
4239 SharedFunctionInfo* shared,
4240 Object* prototype) {
4241 ASSERT(!prototype->IsMap());
4242 function->initialize_properties();
4243 function->initialize_elements();
4244 function->set_shared(shared);
4245 function->set_code(shared->code());
4246 function->set_prototype_or_initial_map(prototype);
4247 function->set_context(undefined_value());
4248 function->set_literals_or_bindings(empty_fixed_array());
4249 function->set_next_function_link(undefined_value());
4253 MaybeObject* Heap::AllocateFunction(Map* function_map,
4254 SharedFunctionInfo* shared,
4256 PretenureFlag pretenure) {
4257 AllocationSpace space =
4258 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4260 { MaybeObject* maybe_result = Allocate(function_map, space);
4261 if (!maybe_result->ToObject(&result)) return maybe_result;
4263 InitializeFunction(JSFunction::cast(result), shared, prototype);
4268 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4269 // To get fast allocation and map sharing for arguments objects we
4270 // allocate them based on an arguments boilerplate.
4272 JSObject* boilerplate;
4273 int arguments_object_size;
4274 bool strict_mode_callee = callee->IsJSFunction() &&
4275 !JSFunction::cast(callee)->shared()->is_classic_mode();
4276 if (strict_mode_callee) {
4278 isolate()->context()->native_context()->
4279 strict_mode_arguments_boilerplate();
4280 arguments_object_size = kArgumentsObjectSizeStrict;
4283 isolate()->context()->native_context()->arguments_boilerplate();
4284 arguments_object_size = kArgumentsObjectSize;
4287 // Check that the size of the boilerplate matches our
4288 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4289 // on the size being a known constant.
4290 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4292 // Do the allocation.
4294 { MaybeObject* maybe_result =
4295 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4296 if (!maybe_result->ToObject(&result)) return maybe_result;
4299 // Copy the content. The arguments boilerplate doesn't have any
4300 // fields that point to new space so it's safe to skip the write
4302 CopyBlock(HeapObject::cast(result)->address(),
4303 boilerplate->address(),
4304 JSObject::kHeaderSize);
4306 // Set the length property.
4307 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4308 Smi::FromInt(length),
4309 SKIP_WRITE_BARRIER);
4310 // Set the callee property for non-strict mode arguments object only.
4311 if (!strict_mode_callee) {
4312 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4316 // Check the state of the object
4317 ASSERT(JSObject::cast(result)->HasFastProperties());
4318 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4324 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4325 FixedArray* properties,
4327 obj->set_properties(properties);
4328 obj->initialize_elements();
4329 // TODO(1240798): Initialize the object's body using valid initial values
4330 // according to the object's initial map. For example, if the map's
4331 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4332 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4333 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4334 // verification code has to cope with (temporarily) invalid objects. See
4335 // for example, JSArray::JSArrayVerify).
4337 // We cannot always fill with one_pointer_filler_map because objects
4338 // created from API functions expect their internal fields to be initialized
4339 // with undefined_value.
4340 // Pre-allocated fields need to be initialized with undefined_value as well
4341 // so that object accesses before the constructor completes (e.g. in the
4342 // debugger) will not cause a crash.
4343 if (map->constructor()->IsJSFunction() &&
4344 JSFunction::cast(map->constructor())->shared()->
4345 IsInobjectSlackTrackingInProgress()) {
4346 // We might want to shrink the object later.
4347 ASSERT(obj->GetInternalFieldCount() == 0);
4348 filler = Heap::one_pointer_filler_map();
4350 filler = Heap::undefined_value();
4352 obj->InitializeBody(map, Heap::undefined_value(), filler);
4356 MaybeObject* Heap::AllocateJSObjectFromMap(
4357 Map* map, PretenureFlag pretenure, bool allocate_properties) {
4358 // JSFunctions should be allocated using AllocateFunction to be
4359 // properly initialized.
4360 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4362 // Both types of global objects should be allocated using
4363 // AllocateGlobalObject to be properly initialized.
4364 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4365 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4367 // Allocate the backing storage for the properties.
4368 FixedArray* properties;
4369 if (allocate_properties) {
4370 int prop_size = map->InitialPropertiesLength();
4371 ASSERT(prop_size >= 0);
4372 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4373 if (!maybe_properties->To(&properties)) return maybe_properties;
4376 properties = empty_fixed_array();
4379 // Allocate the JSObject.
4380 int size = map->instance_size();
4381 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4383 MaybeObject* maybe_obj = Allocate(map, space);
4384 if (!maybe_obj->To(&obj)) return maybe_obj;
4386 // Initialize the JSObject.
4387 InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4388 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4389 JSObject::cast(obj)->HasExternalArrayElements());
4394 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4395 Map* map, Handle<AllocationSite> allocation_site) {
4396 // JSFunctions should be allocated using AllocateFunction to be
4397 // properly initialized.
4398 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4400 // Both types of global objects should be allocated using
4401 // AllocateGlobalObject to be properly initialized.
4402 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4403 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4405 // Allocate the backing storage for the properties.
4406 int prop_size = map->InitialPropertiesLength();
4407 ASSERT(prop_size >= 0);
4408 FixedArray* properties;
4409 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4410 if (!maybe_properties->To(&properties)) return maybe_properties;
4413 // Allocate the JSObject.
4414 int size = map->instance_size();
4415 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4417 MaybeObject* maybe_obj =
4418 AllocateWithAllocationSite(map, space, allocation_site);
4419 if (!maybe_obj->To(&obj)) return maybe_obj;
4421 // Initialize the JSObject.
4422 InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4423 ASSERT(JSObject::cast(obj)->HasFastElements());
4428 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4429 PretenureFlag pretenure) {
4430 ASSERT(constructor->has_initial_map());
4431 // Allocate the object based on the constructors initial map.
4432 MaybeObject* result = AllocateJSObjectFromMap(
4433 constructor->initial_map(), pretenure);
4435 // Make sure result is NOT a global object if valid.
4436 Object* non_failure;
4437 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4443 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4444 Handle<AllocationSite> allocation_site) {
4445 ASSERT(constructor->has_initial_map());
4446 // Allocate the object based on the constructors initial map, or the payload
4448 Map* initial_map = constructor->initial_map();
4450 ElementsKind to_kind = allocation_site->GetElementsKind();
4451 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4452 if (to_kind != initial_map->elements_kind()) {
4453 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4454 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4455 // Possibly alter the mode, since we found an updated elements kind
4456 // in the type info cell.
4457 mode = AllocationSite::GetMode(to_kind);
4460 MaybeObject* result;
4461 if (mode == TRACK_ALLOCATION_SITE) {
4462 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4465 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4468 // Make sure result is NOT a global object if valid.
4469 Object* non_failure;
4470 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4476 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4477 // Allocate a fresh map. Modules do not have a prototype.
4479 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4480 if (!maybe_map->To(&map)) return maybe_map;
4481 // Allocate the object based on the map.
4483 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4484 if (!maybe_module->To(&module)) return maybe_module;
4485 module->set_context(context);
4486 module->set_scope_info(scope_info);
4491 MaybeObject* Heap::AllocateJSArrayAndStorage(
4492 ElementsKind elements_kind,
4495 ArrayStorageAllocationMode mode,
4496 PretenureFlag pretenure) {
4497 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4499 if (!maybe_array->To(&array)) return maybe_array;
4501 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4502 // for performance reasons.
4503 ASSERT(capacity >= length);
4505 if (capacity == 0) {
4506 array->set_length(Smi::FromInt(0));
4507 array->set_elements(empty_fixed_array());
4511 FixedArrayBase* elms;
4512 MaybeObject* maybe_elms = NULL;
4513 if (IsFastDoubleElementsKind(elements_kind)) {
4514 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4515 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4517 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4518 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4521 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4522 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4523 maybe_elms = AllocateUninitializedFixedArray(capacity);
4525 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4526 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4529 if (!maybe_elms->To(&elms)) return maybe_elms;
4531 array->set_elements(elms);
4532 array->set_length(Smi::FromInt(length));
4537 MaybeObject* Heap::AllocateJSArrayStorage(
4541 ArrayStorageAllocationMode mode) {
4542 ASSERT(capacity >= length);
4544 if (capacity == 0) {
4545 array->set_length(Smi::FromInt(0));
4546 array->set_elements(empty_fixed_array());
4550 FixedArrayBase* elms;
4551 MaybeObject* maybe_elms = NULL;
4552 ElementsKind elements_kind = array->GetElementsKind();
4553 if (IsFastDoubleElementsKind(elements_kind)) {
4554 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4555 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4557 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4558 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4561 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4562 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4563 maybe_elms = AllocateUninitializedFixedArray(capacity);
4565 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4566 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4569 if (!maybe_elms->To(&elms)) return maybe_elms;
4571 array->set_elements(elms);
4572 array->set_length(Smi::FromInt(length));
4577 MaybeObject* Heap::AllocateJSArrayWithElements(
4578 FixedArrayBase* elements,
4579 ElementsKind elements_kind,
4581 PretenureFlag pretenure) {
4582 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4584 if (!maybe_array->To(&array)) return maybe_array;
4586 array->set_elements(elements);
4587 array->set_length(Smi::FromInt(length));
4588 array->ValidateElements();
4593 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4595 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4596 // maps. Will probably depend on the identity of the handler object, too.
4598 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4599 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4600 map->set_prototype(prototype);
4602 // Allocate the proxy object.
4604 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4605 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4606 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4607 result->set_handler(handler);
4608 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4613 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4615 Object* construct_trap,
4616 Object* prototype) {
4618 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4619 // maps. Will probably depend on the identity of the handler object, too.
4621 MaybeObject* maybe_map_obj =
4622 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4623 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4624 map->set_prototype(prototype);
4626 // Allocate the proxy object.
4627 JSFunctionProxy* result;
4628 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4629 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4630 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4631 result->set_handler(handler);
4632 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4633 result->set_call_trap(call_trap);
4634 result->set_construct_trap(construct_trap);
4639 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4640 // Never used to copy functions. If functions need to be copied we
4641 // have to be careful to clear the literals array.
4642 SLOW_ASSERT(!source->IsJSFunction());
4645 Map* map = source->map();
4646 int object_size = map->instance_size();
4649 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4651 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4653 // If we're forced to always allocate, we use the general allocation
4654 // functions which may leave us with an object in old space.
4655 if (always_allocate()) {
4656 { MaybeObject* maybe_clone =
4657 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4658 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4660 Address clone_address = HeapObject::cast(clone)->address();
4661 CopyBlock(clone_address,
4664 // Update write barrier for all fields that lie beyond the header.
4665 RecordWrites(clone_address,
4666 JSObject::kHeaderSize,
4667 (object_size - JSObject::kHeaderSize) / kPointerSize);
4669 wb_mode = SKIP_WRITE_BARRIER;
4671 { int adjusted_object_size = site != NULL
4672 ? object_size + AllocationMemento::kSize
4674 MaybeObject* maybe_clone =
4675 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4676 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4678 SLOW_ASSERT(InNewSpace(clone));
4679 // Since we know the clone is allocated in new space, we can copy
4680 // the contents without worrying about updating the write barrier.
4681 CopyBlock(HeapObject::cast(clone)->address(),
4686 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4687 reinterpret_cast<Address>(clone) + object_size);
4688 InitializeAllocationMemento(alloc_memento, site);
4693 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4694 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4695 FixedArray* properties = FixedArray::cast(source->properties());
4696 // Update elements if necessary.
4697 if (elements->length() > 0) {
4699 { MaybeObject* maybe_elem;
4700 if (elements->map() == fixed_cow_array_map()) {
4701 maybe_elem = FixedArray::cast(elements);
4702 } else if (source->HasFastDoubleElements()) {
4703 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4705 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4707 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4709 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4711 // Update properties if necessary.
4712 if (properties->length() > 0) {
4714 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4715 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4717 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4719 // Return the new clone.
4724 MaybeObject* Heap::ReinitializeJSReceiver(
4725 JSReceiver* object, InstanceType type, int size) {
4726 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4728 // Allocate fresh map.
4729 // TODO(rossberg): Once we optimize proxies, cache these maps.
4731 MaybeObject* maybe = AllocateMap(type, size);
4732 if (!maybe->To<Map>(&map)) return maybe;
4734 // Check that the receiver has at least the size of the fresh object.
4735 int size_difference = object->map()->instance_size() - map->instance_size();
4736 ASSERT(size_difference >= 0);
4738 map->set_prototype(object->map()->prototype());
4740 // Allocate the backing storage for the properties.
4741 int prop_size = map->unused_property_fields() - map->inobject_properties();
4743 maybe = AllocateFixedArray(prop_size, TENURED);
4744 if (!maybe->ToObject(&properties)) return maybe;
4746 // Functions require some allocation, which might fail here.
4747 SharedFunctionInfo* shared = NULL;
4748 if (type == JS_FUNCTION_TYPE) {
4750 OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
4752 maybe = InternalizeStringWithKey(&key);
4753 if (!maybe->To<String>(&name)) return maybe;
4754 maybe = AllocateSharedFunctionInfo(name);
4755 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4758 // Because of possible retries of this function after failure,
4759 // we must NOT fail after this point, where we have changed the type!
4761 // Reset the map for the object.
4762 object->set_map(map);
4763 JSObject* jsobj = JSObject::cast(object);
4765 // Reinitialize the object from the constructor map.
4766 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4768 // Functions require some minimal initialization.
4769 if (type == JS_FUNCTION_TYPE) {
4770 map->set_function_with_prototype(true);
4771 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4772 JSFunction::cast(object)->set_context(
4773 isolate()->context()->native_context());
4776 // Put in filler if the new object is smaller than the old.
4777 if (size_difference > 0) {
4778 CreateFillerObjectAt(
4779 object->address() + map->instance_size(), size_difference);
4786 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4787 JSGlobalProxy* object) {
4788 ASSERT(constructor->has_initial_map());
4789 Map* map = constructor->initial_map();
4791 // Check that the already allocated object has the same size and type as
4792 // objects allocated using the constructor.
4793 ASSERT(map->instance_size() == object->map()->instance_size());
4794 ASSERT(map->instance_type() == object->map()->instance_type());
4796 // Allocate the backing storage for the properties.
4797 int prop_size = map->unused_property_fields() - map->inobject_properties();
4799 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4800 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4803 // Reset the map for the object.
4804 object->set_map(constructor->initial_map());
4806 // Reinitialize the object from the constructor map.
4807 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4812 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4813 PretenureFlag pretenure) {
4814 int length = string.length();
4816 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4819 { MaybeObject* maybe_result =
4820 AllocateRawOneByteString(string.length(), pretenure);
4821 if (!maybe_result->ToObject(&result)) return maybe_result;
4824 // Copy the characters into the new object.
4825 CopyChars(SeqOneByteString::cast(result)->GetChars(),
4832 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4833 int non_ascii_start,
4834 PretenureFlag pretenure) {
4835 // Continue counting the number of characters in the UTF-8 string, starting
4836 // from the first non-ascii character or word.
4837 Access<UnicodeCache::Utf8Decoder>
4838 decoder(isolate_->unicode_cache()->utf8_decoder());
4839 decoder->Reset(string.start() + non_ascii_start,
4840 string.length() - non_ascii_start);
4841 int utf16_length = decoder->Utf16Length();
4842 ASSERT(utf16_length > 0);
4846 int chars = non_ascii_start + utf16_length;
4847 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4848 if (!maybe_result->ToObject(&result)) return maybe_result;
4850 // Convert and copy the characters into the new object.
4851 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4852 // Copy ascii portion.
4853 uint16_t* data = twobyte->GetChars();
4854 if (non_ascii_start != 0) {
4855 const char* ascii_data = string.start();
4856 for (int i = 0; i < non_ascii_start; i++) {
4857 *data++ = *ascii_data++;
4860 // Now write the remainder.
4861 decoder->WriteUtf16(data, utf16_length);
4866 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4867 PretenureFlag pretenure) {
4868 // Check if the string is an ASCII string.
4870 int length = string.length();
4871 const uc16* start = string.start();
4873 if (String::IsOneByte(start, length)) {
4874 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4875 if (!maybe_result->ToObject(&result)) return maybe_result;
4876 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4877 } else { // It's not a one byte string.
4878 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4879 if (!maybe_result->ToObject(&result)) return maybe_result;
4880 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4886 Map* Heap::InternalizedStringMapForString(String* string) {
4887 // If the string is in new space it cannot be used as internalized.
4888 if (InNewSpace(string)) return NULL;
4890 // Find the corresponding internalized string map for strings.
4891 switch (string->map()->instance_type()) {
4892 case STRING_TYPE: return internalized_string_map();
4893 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4894 case CONS_STRING_TYPE: return cons_internalized_string_map();
4895 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4896 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4897 case EXTERNAL_ASCII_STRING_TYPE:
4898 return external_ascii_internalized_string_map();
4899 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4900 return external_internalized_string_with_one_byte_data_map();
4901 case SHORT_EXTERNAL_STRING_TYPE:
4902 return short_external_internalized_string_map();
4903 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4904 return short_external_ascii_internalized_string_map();
4905 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4906 return short_external_internalized_string_with_one_byte_data_map();
4907 default: return NULL; // No match found.
4912 static inline void WriteOneByteData(Vector<const char> vector,
4915 // Only works for ascii.
4916 ASSERT(vector.length() == len);
4917 OS::MemCopy(chars, vector.start(), len);
4920 static inline void WriteTwoByteData(Vector<const char> vector,
4923 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4924 unsigned stream_length = vector.length();
4925 while (stream_length != 0) {
4926 unsigned consumed = 0;
4927 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4928 ASSERT(c != unibrow::Utf8::kBadChar);
4929 ASSERT(consumed <= stream_length);
4930 stream_length -= consumed;
4932 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4935 *chars++ = unibrow::Utf16::LeadSurrogate(c);
4936 *chars++ = unibrow::Utf16::TrailSurrogate(c);
4943 ASSERT(stream_length == 0);
4948 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4949 ASSERT(s->length() == len);
4950 String::WriteToFlat(s, chars, 0, len);
4954 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4955 ASSERT(s->length() == len);
4956 String::WriteToFlat(s, chars, 0, len);
4960 template<bool is_one_byte, typename T>
4961 MaybeObject* Heap::AllocateInternalizedStringImpl(
4962 T t, int chars, uint32_t hash_field) {
4964 // Compute map and object size.
4969 if (chars > SeqOneByteString::kMaxLength) {
4970 return Failure::OutOfMemoryException(0x9);
4972 map = ascii_internalized_string_map();
4973 size = SeqOneByteString::SizeFor(chars);
4975 if (chars > SeqTwoByteString::kMaxLength) {
4976 return Failure::OutOfMemoryException(0xa);
4978 map = internalized_string_map();
4979 size = SeqTwoByteString::SizeFor(chars);
4981 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4985 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4986 if (!maybe_result->ToObject(&result)) return maybe_result;
4989 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4990 // Set length and hash fields of the allocated string.
4991 String* answer = String::cast(result);
4992 answer->set_length(chars);
4993 answer->set_hash_field(hash_field);
4995 ASSERT_EQ(size, answer->Size());
4998 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5000 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5006 // Need explicit instantiations.
5008 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5010 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5011 String*, int, uint32_t);
5013 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5014 Vector<const char>, int, uint32_t);
5017 MaybeObject* Heap::AllocateRawOneByteString(int length,
5018 PretenureFlag pretenure) {
5019 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5020 return Failure::OutOfMemoryException(0xb);
5022 int size = SeqOneByteString::SizeFor(length);
5023 ASSERT(size <= SeqOneByteString::kMaxSize);
5024 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5027 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5028 if (!maybe_result->ToObject(&result)) return maybe_result;
5031 // Partially initialize the object.
5032 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5033 String::cast(result)->set_length(length);
5034 String::cast(result)->set_hash_field(String::kEmptyHashField);
5035 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5041 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5042 PretenureFlag pretenure) {
5043 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5044 return Failure::OutOfMemoryException(0xc);
5046 int size = SeqTwoByteString::SizeFor(length);
5047 ASSERT(size <= SeqTwoByteString::kMaxSize);
5048 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5051 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5052 if (!maybe_result->ToObject(&result)) return maybe_result;
5055 // Partially initialize the object.
5056 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5057 String::cast(result)->set_length(length);
5058 String::cast(result)->set_hash_field(String::kEmptyHashField);
5059 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5064 MaybeObject* Heap::AllocateJSArray(
5065 ElementsKind elements_kind,
5066 PretenureFlag pretenure) {
5067 Context* native_context = isolate()->context()->native_context();
5068 JSFunction* array_function = native_context->array_function();
5069 Map* map = array_function->initial_map();
5070 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5071 if (transition_map != NULL) map = transition_map;
5072 return AllocateJSObjectFromMap(map, pretenure);
5076 MaybeObject* Heap::AllocateEmptyFixedArray() {
5077 int size = FixedArray::SizeFor(0);
5079 { MaybeObject* maybe_result =
5080 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5081 if (!maybe_result->ToObject(&result)) return maybe_result;
5083 // Initialize the object.
5084 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5086 reinterpret_cast<FixedArray*>(result)->set_length(0);
5091 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5092 return AllocateExternalArray(0, array_type, NULL, TENURED);
5096 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5097 int len = src->length();
5099 { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5100 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5102 if (InNewSpace(obj)) {
5103 HeapObject* dst = HeapObject::cast(obj);
5104 dst->set_map_no_write_barrier(map);
5105 CopyBlock(dst->address() + kPointerSize,
5106 src->address() + kPointerSize,
5107 FixedArray::SizeFor(len) - kPointerSize);
5110 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5111 FixedArray* result = FixedArray::cast(obj);
5112 result->set_length(len);
5115 DisallowHeapAllocation no_gc;
5116 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5117 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5122 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5124 int len = src->length();
5126 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5127 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5129 HeapObject* dst = HeapObject::cast(obj);
5130 dst->set_map_no_write_barrier(map);
5132 dst->address() + FixedDoubleArray::kLengthOffset,
5133 src->address() + FixedDoubleArray::kLengthOffset,
5134 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5139 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5141 int int64_entries = src->count_of_int64_entries();
5142 int ptr_entries = src->count_of_ptr_entries();
5143 int int32_entries = src->count_of_int32_entries();
5145 { MaybeObject* maybe_obj =
5146 AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5147 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5149 HeapObject* dst = HeapObject::cast(obj);
5150 dst->set_map_no_write_barrier(map);
5152 dst->address() + ConstantPoolArray::kLengthOffset,
5153 src->address() + ConstantPoolArray::kLengthOffset,
5154 ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5155 - ConstantPoolArray::kLengthOffset);
5160 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5161 if (length < 0 || length > FixedArray::kMaxLength) {
5162 return Failure::OutOfMemoryException(0xe);
5164 int size = FixedArray::SizeFor(length);
5165 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5167 return AllocateRaw(size, space, OLD_POINTER_SPACE);
5171 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5172 PretenureFlag pretenure,
5174 ASSERT(length >= 0);
5175 ASSERT(empty_fixed_array()->IsFixedArray());
5176 if (length == 0) return empty_fixed_array();
5178 ASSERT(!InNewSpace(filler));
5180 { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5181 if (!maybe_result->ToObject(&result)) return maybe_result;
5184 HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5185 FixedArray* array = FixedArray::cast(result);
5186 array->set_length(length);
5187 MemsetPointer(array->data_start(), filler, length);
5192 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5193 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5197 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5198 PretenureFlag pretenure) {
5199 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5203 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5204 if (length == 0) return empty_fixed_array();
5207 { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5208 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5211 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5213 FixedArray::cast(obj)->set_length(length);
5218 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5219 int size = FixedDoubleArray::SizeFor(0);
5221 { MaybeObject* maybe_result =
5222 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5223 if (!maybe_result->ToObject(&result)) return maybe_result;
5225 // Initialize the object.
5226 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5227 fixed_double_array_map());
5228 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5233 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5235 PretenureFlag pretenure) {
5236 if (length == 0) return empty_fixed_array();
5238 Object* elements_object;
5239 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5240 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5241 FixedDoubleArray* elements =
5242 reinterpret_cast<FixedDoubleArray*>(elements_object);
5244 elements->set_map_no_write_barrier(fixed_double_array_map());
5245 elements->set_length(length);
5250 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5252 PretenureFlag pretenure) {
5253 if (length == 0) return empty_fixed_array();
5255 Object* elements_object;
5256 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5257 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5258 FixedDoubleArray* elements =
5259 reinterpret_cast<FixedDoubleArray*>(elements_object);
5261 for (int i = 0; i < length; ++i) {
5262 elements->set_the_hole(i);
5265 elements->set_map_no_write_barrier(fixed_double_array_map());
5266 elements->set_length(length);
5271 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5272 PretenureFlag pretenure) {
5273 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5274 return Failure::OutOfMemoryException(0xf);
5276 int size = FixedDoubleArray::SizeFor(length);
5277 #ifndef V8_HOST_ARCH_64_BIT
5278 size += kPointerSize;
5280 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5283 { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5284 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5287 return EnsureDoubleAligned(this, object, size);
5291 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5292 int number_of_ptr_entries,
5293 int number_of_int32_entries) {
5294 ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5295 number_of_int32_entries > 0);
5296 int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5297 number_of_ptr_entries,
5298 number_of_int32_entries);
5299 #ifndef V8_HOST_ARCH_64_BIT
5300 size += kPointerSize;
5302 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5305 { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5306 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5308 object = EnsureDoubleAligned(this, object, size);
5309 HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5311 ConstantPoolArray* constant_pool =
5312 reinterpret_cast<ConstantPoolArray*>(object);
5313 constant_pool->SetEntryCounts(number_of_int64_entries,
5314 number_of_ptr_entries,
5315 number_of_int32_entries);
5316 if (number_of_ptr_entries > 0) {
5318 HeapObject::RawField(
5320 constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5322 number_of_ptr_entries);
5324 return constant_pool;
5328 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
5329 int size = ConstantPoolArray::SizeFor(0, 0, 0);
5331 { MaybeObject* maybe_result =
5332 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5333 if (!maybe_result->ToObject(&result)) return maybe_result;
5335 HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
5336 ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
5341 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5343 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5344 if (!maybe_result->ToObject(&result)) return maybe_result;
5346 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5348 ASSERT(result->IsHashTable());
5353 MaybeObject* Heap::AllocateSymbol() {
5354 // Statically ensure that it is safe to allocate symbols in paged spaces.
5355 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
5358 MaybeObject* maybe =
5359 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5360 if (!maybe->ToObject(&result)) return maybe;
5362 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5364 // Generate a random hash value.
5368 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5370 } while (hash == 0 && attempts < 30);
5371 if (hash == 0) hash = 1; // never return 0
5373 Symbol::cast(result)->set_hash_field(
5374 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5375 Symbol::cast(result)->set_name(undefined_value());
5376 Symbol::cast(result)->set_flags(Smi::FromInt(0));
5378 ASSERT(!Symbol::cast(result)->is_private());
5383 MaybeObject* Heap::AllocatePrivateSymbol() {
5384 MaybeObject* maybe = AllocateSymbol();
5386 if (!maybe->To(&symbol)) return maybe;
5387 symbol->set_is_private(true);
5392 MaybeObject* Heap::AllocateNativeContext() {
5394 { MaybeObject* maybe_result =
5395 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5396 if (!maybe_result->ToObject(&result)) return maybe_result;
5398 Context* context = reinterpret_cast<Context*>(result);
5399 context->set_map_no_write_barrier(native_context_map());
5400 context->set_js_array_maps(undefined_value());
5401 ASSERT(context->IsNativeContext());
5402 ASSERT(result->IsContext());
5407 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5408 ScopeInfo* scope_info) {
5410 { MaybeObject* maybe_result =
5411 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5412 if (!maybe_result->ToObject(&result)) return maybe_result;
5414 Context* context = reinterpret_cast<Context*>(result);
5415 context->set_map_no_write_barrier(global_context_map());
5416 context->set_closure(function);
5417 context->set_previous(function->context());
5418 context->set_extension(scope_info);
5419 context->set_global_object(function->context()->global_object());
5420 ASSERT(context->IsGlobalContext());
5421 ASSERT(result->IsContext());
5426 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5428 { MaybeObject* maybe_result =
5429 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5430 if (!maybe_result->ToObject(&result)) return maybe_result;
5432 Context* context = reinterpret_cast<Context*>(result);
5433 context->set_map_no_write_barrier(module_context_map());
5434 // Instance link will be set later.
5435 context->set_extension(Smi::FromInt(0));
5440 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5441 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5443 { MaybeObject* maybe_result = AllocateFixedArray(length);
5444 if (!maybe_result->ToObject(&result)) return maybe_result;
5446 Context* context = reinterpret_cast<Context*>(result);
5447 context->set_map_no_write_barrier(function_context_map());
5448 context->set_closure(function);
5449 context->set_previous(function->context());
5450 context->set_extension(Smi::FromInt(0));
5451 context->set_global_object(function->context()->global_object());
5456 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5459 Object* thrown_object) {
5460 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5462 { MaybeObject* maybe_result =
5463 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5464 if (!maybe_result->ToObject(&result)) return maybe_result;
5466 Context* context = reinterpret_cast<Context*>(result);
5467 context->set_map_no_write_barrier(catch_context_map());
5468 context->set_closure(function);
5469 context->set_previous(previous);
5470 context->set_extension(name);
5471 context->set_global_object(previous->global_object());
5472 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5477 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5479 JSReceiver* extension) {
5481 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5482 if (!maybe_result->ToObject(&result)) return maybe_result;
5484 Context* context = reinterpret_cast<Context*>(result);
5485 context->set_map_no_write_barrier(with_context_map());
5486 context->set_closure(function);
5487 context->set_previous(previous);
5488 context->set_extension(extension);
5489 context->set_global_object(previous->global_object());
5494 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5496 ScopeInfo* scope_info) {
5498 { MaybeObject* maybe_result =
5499 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5500 if (!maybe_result->ToObject(&result)) return maybe_result;
5502 Context* context = reinterpret_cast<Context*>(result);
5503 context->set_map_no_write_barrier(block_context_map());
5504 context->set_closure(function);
5505 context->set_previous(previous);
5506 context->set_extension(scope_info);
5507 context->set_global_object(previous->global_object());
5512 MaybeObject* Heap::AllocateScopeInfo(int length) {
5513 FixedArray* scope_info;
5514 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5515 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5516 scope_info->set_map_no_write_barrier(scope_info_map());
5521 MaybeObject* Heap::AllocateExternal(void* value) {
5523 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5524 if (!maybe_result->To(&foreign)) return maybe_result;
5527 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5528 if (!maybe_result->To(&external)) return maybe_result;
5530 external->SetInternalField(0, foreign);
5535 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5538 #define MAKE_CASE(NAME, Name, name) \
5539 case NAME##_TYPE: map = name##_map(); break;
5540 STRUCT_LIST(MAKE_CASE)
5544 return Failure::InternalError();
5546 int size = map->instance_size();
5547 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5549 { MaybeObject* maybe_result = Allocate(map, space);
5550 if (!maybe_result->ToObject(&result)) return maybe_result;
5552 Struct::cast(result)->InitializeBody(size);
5557 bool Heap::IsHeapIterable() {
5558 return (!old_pointer_space()->was_swept_conservatively() &&
5559 !old_data_space()->was_swept_conservatively());
5563 void Heap::EnsureHeapIsIterable() {
5564 ASSERT(AllowHeapAllocation::IsAllowed());
5565 if (!IsHeapIterable()) {
5566 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5568 ASSERT(IsHeapIterable());
5572 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5573 incremental_marking()->Step(step_size,
5574 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5576 if (incremental_marking()->IsComplete()) {
5577 bool uncommit = false;
5578 if (gc_count_at_last_idle_gc_ == gc_count_) {
5579 // No GC since the last full GC, the mutator is probably not active.
5580 isolate_->compilation_cache()->Clear();
5583 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5584 mark_sweeps_since_idle_round_started_++;
5585 gc_count_at_last_idle_gc_ = gc_count_;
5587 new_space_.Shrink();
5588 UncommitFromSpace();
5594 bool Heap::IdleNotification(int hint) {
5595 // Hints greater than this value indicate that
5596 // the embedder is requesting a lot of GC work.
5597 const int kMaxHint = 1000;
5598 const int kMinHintForIncrementalMarking = 10;
5599 // Minimal hint that allows to do full GC.
5600 const int kMinHintForFullGC = 100;
5601 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5602 // The size factor is in range [5..250]. The numbers here are chosen from
5603 // experiments. If you changes them, make sure to test with
5604 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5605 intptr_t step_size =
5606 size_factor * IncrementalMarking::kAllocatedThreshold;
5608 if (contexts_disposed_ > 0) {
5609 contexts_disposed_ = 0;
5610 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5611 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5612 incremental_marking()->IsStopped()) {
5613 HistogramTimerScope scope(isolate_->counters()->gc_context());
5614 CollectAllGarbage(kReduceMemoryFootprintMask,
5615 "idle notification: contexts disposed");
5617 AdvanceIdleIncrementalMarking(step_size);
5620 // After context disposal there is likely a lot of garbage remaining, reset
5621 // the idle notification counters in order to trigger more incremental GCs
5622 // on subsequent idle notifications.
5627 if (!FLAG_incremental_marking || Serializer::enabled()) {
5628 return IdleGlobalGC();
5631 // By doing small chunks of GC work in each IdleNotification,
5632 // perform a round of incremental GCs and after that wait until
5633 // the mutator creates enough garbage to justify a new round.
5634 // An incremental GC progresses as follows:
5635 // 1. many incremental marking steps,
5636 // 2. one old space mark-sweep-compact,
5637 // 3. many lazy sweep steps.
5638 // Use mark-sweep-compact events to count incremental GCs in a round.
5640 if (incremental_marking()->IsStopped()) {
5641 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5642 !IsSweepingComplete() &&
5643 !AdvanceSweepers(static_cast<int>(step_size))) {
5648 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5649 if (EnoughGarbageSinceLastIdleRound()) {
5656 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5657 mark_sweeps_since_idle_round_started_;
5659 if (incremental_marking()->IsStopped()) {
5660 // If there are no more than two GCs left in this idle round and we are
5661 // allowed to do a full GC, then make those GCs full in order to compact
5663 // TODO(ulan): Once we enable code compaction for incremental marking,
5664 // we can get rid of this special case and always start incremental marking.
5665 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5666 CollectAllGarbage(kReduceMemoryFootprintMask,
5667 "idle notification: finalize idle round");
5668 mark_sweeps_since_idle_round_started_++;
5669 } else if (hint > kMinHintForIncrementalMarking) {
5670 incremental_marking()->Start();
5673 if (!incremental_marking()->IsStopped() &&
5674 hint > kMinHintForIncrementalMarking) {
5675 AdvanceIdleIncrementalMarking(step_size);
5678 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5687 bool Heap::IdleGlobalGC() {
5688 static const int kIdlesBeforeScavenge = 4;
5689 static const int kIdlesBeforeMarkSweep = 7;
5690 static const int kIdlesBeforeMarkCompact = 8;
5691 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5692 static const unsigned int kGCsBetweenCleanup = 4;
5694 if (!last_idle_notification_gc_count_init_) {
5695 last_idle_notification_gc_count_ = gc_count_;
5696 last_idle_notification_gc_count_init_ = true;
5699 bool uncommit = true;
5700 bool finished = false;
5702 // Reset the number of idle notifications received when a number of
5703 // GCs have taken place. This allows another round of cleanup based
5704 // on idle notifications if enough work has been carried out to
5705 // provoke a number of garbage collections.
5706 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5707 number_idle_notifications_ =
5708 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5710 number_idle_notifications_ = 0;
5711 last_idle_notification_gc_count_ = gc_count_;
5714 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5715 CollectGarbage(NEW_SPACE, "idle notification");
5716 new_space_.Shrink();
5717 last_idle_notification_gc_count_ = gc_count_;
5718 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5719 // Before doing the mark-sweep collections we clear the
5720 // compilation cache to avoid hanging on to source code and
5721 // generated code for cached functions.
5722 isolate_->compilation_cache()->Clear();
5724 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5725 new_space_.Shrink();
5726 last_idle_notification_gc_count_ = gc_count_;
5728 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5729 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5730 new_space_.Shrink();
5731 last_idle_notification_gc_count_ = gc_count_;
5732 number_idle_notifications_ = 0;
5734 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5735 // If we have received more than kIdlesBeforeMarkCompact idle
5736 // notifications we do not perform any cleanup because we don't
5737 // expect to gain much by doing so.
5741 if (uncommit) UncommitFromSpace();
5749 void Heap::Print() {
5750 if (!HasBeenSetUp()) return;
5751 isolate()->PrintStack(stdout);
5752 AllSpaces spaces(this);
5753 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5759 void Heap::ReportCodeStatistics(const char* title) {
5760 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5761 PagedSpace::ResetCodeStatistics(isolate());
5762 // We do not look for code in new space, map space, or old space. If code
5763 // somehow ends up in those spaces, we would miss it here.
5764 code_space_->CollectCodeStatistics();
5765 lo_space_->CollectCodeStatistics();
5766 PagedSpace::ReportCodeStatistics(isolate());
5770 // This function expects that NewSpace's allocated objects histogram is
5771 // populated (via a call to CollectStatistics or else as a side effect of a
5772 // just-completed scavenge collection).
5773 void Heap::ReportHeapStatistics(const char* title) {
5775 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5777 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5778 old_generation_allocation_limit_);
5781 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5782 isolate_->global_handles()->PrintStats();
5785 PrintF("Heap statistics : ");
5786 isolate_->memory_allocator()->ReportStatistics();
5787 PrintF("To space : ");
5788 new_space_.ReportStatistics();
5789 PrintF("Old pointer space : ");
5790 old_pointer_space_->ReportStatistics();
5791 PrintF("Old data space : ");
5792 old_data_space_->ReportStatistics();
5793 PrintF("Code space : ");
5794 code_space_->ReportStatistics();
5795 PrintF("Map space : ");
5796 map_space_->ReportStatistics();
5797 PrintF("Cell space : ");
5798 cell_space_->ReportStatistics();
5799 PrintF("PropertyCell space : ");
5800 property_cell_space_->ReportStatistics();
5801 PrintF("Large object space : ");
5802 lo_space_->ReportStatistics();
5803 PrintF(">>>>>> ========================================= >>>>>>\n");
5808 bool Heap::Contains(HeapObject* value) {
5809 return Contains(value->address());
5813 bool Heap::Contains(Address addr) {
5814 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5815 return HasBeenSetUp() &&
5816 (new_space_.ToSpaceContains(addr) ||
5817 old_pointer_space_->Contains(addr) ||
5818 old_data_space_->Contains(addr) ||
5819 code_space_->Contains(addr) ||
5820 map_space_->Contains(addr) ||
5821 cell_space_->Contains(addr) ||
5822 property_cell_space_->Contains(addr) ||
5823 lo_space_->SlowContains(addr));
5827 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5828 return InSpace(value->address(), space);
5832 bool Heap::InSpace(Address addr, AllocationSpace space) {
5833 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5834 if (!HasBeenSetUp()) return false;
5838 return new_space_.ToSpaceContains(addr);
5839 case OLD_POINTER_SPACE:
5840 return old_pointer_space_->Contains(addr);
5841 case OLD_DATA_SPACE:
5842 return old_data_space_->Contains(addr);
5844 return code_space_->Contains(addr);
5846 return map_space_->Contains(addr);
5848 return cell_space_->Contains(addr);
5849 case PROPERTY_CELL_SPACE:
5850 return property_cell_space_->Contains(addr);
5852 return lo_space_->SlowContains(addr);
5860 void Heap::Verify() {
5861 CHECK(HasBeenSetUp());
5863 store_buffer()->Verify();
5865 VerifyPointersVisitor visitor;
5866 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5868 new_space_.Verify();
5870 old_pointer_space_->Verify(&visitor);
5871 map_space_->Verify(&visitor);
5873 VerifyPointersVisitor no_dirty_regions_visitor;
5874 old_data_space_->Verify(&no_dirty_regions_visitor);
5875 code_space_->Verify(&no_dirty_regions_visitor);
5876 cell_space_->Verify(&no_dirty_regions_visitor);
5877 property_cell_space_->Verify(&no_dirty_regions_visitor);
5879 lo_space_->Verify();
5884 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
5885 Utf8StringKey key(string, HashSeed());
5886 return InternalizeStringWithKey(&key);
5890 MaybeObject* Heap::InternalizeString(String* string) {
5891 if (string->IsInternalizedString()) return string;
5892 Object* result = NULL;
5894 { MaybeObject* maybe_new_table =
5895 string_table()->LookupString(string, &result);
5896 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5898 // Can't use set_string_table because StringTable::cast knows that
5899 // StringTable is a singleton and checks for identity.
5900 roots_[kStringTableRootIndex] = new_table;
5901 ASSERT(result != NULL);
5906 bool Heap::InternalizeStringIfExists(String* string, String** result) {
5907 if (string->IsInternalizedString()) {
5911 return string_table()->LookupStringIfExists(string, result);
5915 MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) {
5916 Object* result = NULL;
5918 { MaybeObject* maybe_new_table =
5919 string_table()->LookupKey(key, &result);
5920 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5922 // Can't use set_string_table because StringTable::cast knows that
5923 // StringTable is a singleton and checks for identity.
5924 roots_[kStringTableRootIndex] = new_table;
5925 ASSERT(result != NULL);
5930 void Heap::ZapFromSpace() {
5931 NewSpacePageIterator it(new_space_.FromSpaceStart(),
5932 new_space_.FromSpaceEnd());
5933 while (it.has_next()) {
5934 NewSpacePage* page = it.next();
5935 for (Address cursor = page->area_start(), limit = page->area_end();
5937 cursor += kPointerSize) {
5938 Memory::Address_at(cursor) = kFromSpaceZapValue;
5944 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5946 ObjectSlotCallback callback) {
5947 Address slot_address = start;
5949 // We are not collecting slots on new space objects during mutation
5950 // thus we have to scan for pointers to evacuation candidates when we
5951 // promote objects. But we should not record any slots in non-black
5952 // objects. Grey object's slots would be rescanned.
5953 // White object might not survive until the end of collection
5954 // it would be a violation of the invariant to record it's slots.
5955 bool record_slots = false;
5956 if (incremental_marking()->IsCompacting()) {
5957 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5958 record_slots = Marking::IsBlack(mark_bit);
5961 while (slot_address < end) {
5962 Object** slot = reinterpret_cast<Object**>(slot_address);
5963 Object* object = *slot;
5964 // If the store buffer becomes overfull we mark pages as being exempt from
5965 // the store buffer. These pages are scanned to find pointers that point
5966 // to the new space. In that case we may hit newly promoted objects and
5967 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5968 if (object->IsHeapObject()) {
5969 if (Heap::InFromSpace(object)) {
5970 callback(reinterpret_cast<HeapObject**>(slot),
5971 HeapObject::cast(object));
5972 Object* new_object = *slot;
5973 if (InNewSpace(new_object)) {
5974 SLOW_ASSERT(Heap::InToSpace(new_object));
5975 SLOW_ASSERT(new_object->IsHeapObject());
5976 store_buffer_.EnterDirectlyIntoStoreBuffer(
5977 reinterpret_cast<Address>(slot));
5979 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5980 } else if (record_slots &&
5981 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5982 mark_compact_collector()->RecordSlot(slot, slot, object);
5985 slot_address += kPointerSize;
5991 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5994 bool IsAMapPointerAddress(Object** addr) {
5995 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5996 int mod = a % Map::kSize;
5997 return mod >= Map::kPointerFieldsBeginOffset &&
5998 mod < Map::kPointerFieldsEndOffset;
6002 bool EverythingsAPointer(Object** addr) {
6007 static void CheckStoreBuffer(Heap* heap,
6010 Object**** store_buffer_position,
6011 Object*** store_buffer_top,
6012 CheckStoreBufferFilter filter,
6013 Address special_garbage_start,
6014 Address special_garbage_end) {
6015 Map* free_space_map = heap->free_space_map();
6016 for ( ; current < limit; current++) {
6017 Object* o = *current;
6018 Address current_address = reinterpret_cast<Address>(current);
6020 if (o == free_space_map) {
6021 Address current_address = reinterpret_cast<Address>(current);
6022 FreeSpace* free_space =
6023 FreeSpace::cast(HeapObject::FromAddress(current_address));
6024 int skip = free_space->Size();
6025 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6027 current_address += skip - kPointerSize;
6028 current = reinterpret_cast<Object**>(current_address);
6031 // Skip the current linear allocation space between top and limit which is
6032 // unmarked with the free space map, but can contain junk.
6033 if (current_address == special_garbage_start &&
6034 special_garbage_end != special_garbage_start) {
6035 current_address = special_garbage_end - kPointerSize;
6036 current = reinterpret_cast<Object**>(current_address);
6039 if (!(*filter)(current)) continue;
6040 ASSERT(current_address < special_garbage_start ||
6041 current_address >= special_garbage_end);
6042 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6043 // We have to check that the pointer does not point into new space
6044 // without trying to cast it to a heap object since the hash field of
6045 // a string can contain values like 1 and 3 which are tagged null
6047 if (!heap->InNewSpace(o)) continue;
6048 while (**store_buffer_position < current &&
6049 *store_buffer_position < store_buffer_top) {
6050 (*store_buffer_position)++;
6052 if (**store_buffer_position != current ||
6053 *store_buffer_position == store_buffer_top) {
6054 Object** obj_start = current;
6055 while (!(*obj_start)->IsMap()) obj_start--;
6062 // Check that the store buffer contains all intergenerational pointers by
6063 // scanning a page and ensuring that all pointers to young space are in the
6065 void Heap::OldPointerSpaceCheckStoreBuffer() {
6066 OldSpace* space = old_pointer_space();
6067 PageIterator pages(space);
6069 store_buffer()->SortUniq();
6071 while (pages.has_next()) {
6072 Page* page = pages.next();
6073 Object** current = reinterpret_cast<Object**>(page->area_start());
6075 Address end = page->area_end();
6077 Object*** store_buffer_position = store_buffer()->Start();
6078 Object*** store_buffer_top = store_buffer()->Top();
6080 Object** limit = reinterpret_cast<Object**>(end);
6081 CheckStoreBuffer(this,
6084 &store_buffer_position,
6086 &EverythingsAPointer,
6093 void Heap::MapSpaceCheckStoreBuffer() {
6094 MapSpace* space = map_space();
6095 PageIterator pages(space);
6097 store_buffer()->SortUniq();
6099 while (pages.has_next()) {
6100 Page* page = pages.next();
6101 Object** current = reinterpret_cast<Object**>(page->area_start());
6103 Address end = page->area_end();
6105 Object*** store_buffer_position = store_buffer()->Start();
6106 Object*** store_buffer_top = store_buffer()->Top();
6108 Object** limit = reinterpret_cast<Object**>(end);
6109 CheckStoreBuffer(this,
6112 &store_buffer_position,
6114 &IsAMapPointerAddress,
6121 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6122 LargeObjectIterator it(lo_space());
6123 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6124 // We only have code, sequential strings, or fixed arrays in large
6125 // object space, and only fixed arrays can possibly contain pointers to
6126 // the young generation.
6127 if (object->IsFixedArray()) {
6128 Object*** store_buffer_position = store_buffer()->Start();
6129 Object*** store_buffer_top = store_buffer()->Top();
6130 Object** current = reinterpret_cast<Object**>(object->address());
6132 reinterpret_cast<Object**>(object->address() + object->Size());
6133 CheckStoreBuffer(this,
6136 &store_buffer_position,
6138 &EverythingsAPointer,
6147 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6148 IterateStrongRoots(v, mode);
6149 IterateWeakRoots(v, mode);
6153 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6154 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6155 v->Synchronize(VisitorSynchronization::kStringTable);
6156 if (mode != VISIT_ALL_IN_SCAVENGE &&
6157 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6158 // Scavenge collections have special processing for this.
6159 external_string_table_.Iterate(v);
6161 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6165 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6166 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6167 v->Synchronize(VisitorSynchronization::kStrongRootList);
6169 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6170 v->Synchronize(VisitorSynchronization::kInternalizedString);
6172 isolate_->bootstrapper()->Iterate(v);
6173 v->Synchronize(VisitorSynchronization::kBootstrapper);
6174 isolate_->Iterate(v);
6175 v->Synchronize(VisitorSynchronization::kTop);
6176 Relocatable::Iterate(isolate_, v);
6177 v->Synchronize(VisitorSynchronization::kRelocatable);
6179 #ifdef ENABLE_DEBUGGER_SUPPORT
6180 isolate_->debug()->Iterate(v);
6181 if (isolate_->deoptimizer_data() != NULL) {
6182 isolate_->deoptimizer_data()->Iterate(v);
6185 v->Synchronize(VisitorSynchronization::kDebug);
6186 isolate_->compilation_cache()->Iterate(v);
6187 v->Synchronize(VisitorSynchronization::kCompilationCache);
6189 // Iterate over local handles in handle scopes.
6190 isolate_->handle_scope_implementer()->Iterate(v);
6191 isolate_->IterateDeferredHandles(v);
6192 v->Synchronize(VisitorSynchronization::kHandleScope);
6194 // Iterate over the builtin code objects and code stubs in the
6195 // heap. Note that it is not necessary to iterate over code objects
6196 // on scavenge collections.
6197 if (mode != VISIT_ALL_IN_SCAVENGE) {
6198 isolate_->builtins()->IterateBuiltins(v);
6200 v->Synchronize(VisitorSynchronization::kBuiltins);
6202 // Iterate over global handles.
6204 case VISIT_ONLY_STRONG:
6205 isolate_->global_handles()->IterateStrongRoots(v);
6207 case VISIT_ALL_IN_SCAVENGE:
6208 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6210 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6212 isolate_->global_handles()->IterateAllRoots(v);
6215 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6217 // Iterate over eternal handles.
6218 if (mode == VISIT_ALL_IN_SCAVENGE) {
6219 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6221 isolate_->eternal_handles()->IterateAllRoots(v);
6223 v->Synchronize(VisitorSynchronization::kEternalHandles);
6225 // Iterate over pointers being held by inactive threads.
6226 isolate_->thread_manager()->Iterate(v);
6227 v->Synchronize(VisitorSynchronization::kThreadManager);
6229 // Iterate over the pointers the Serialization/Deserialization code is
6231 // During garbage collection this keeps the partial snapshot cache alive.
6232 // During deserialization of the startup snapshot this creates the partial
6233 // snapshot cache and deserializes the objects it refers to. During
6234 // serialization this does nothing, since the partial snapshot cache is
6235 // empty. However the next thing we do is create the partial snapshot,
6236 // filling up the partial snapshot cache with objects it needs as we go.
6237 SerializerDeserializer::Iterate(isolate_, v);
6238 // We don't do a v->Synchronize call here, because in debug mode that will
6239 // output a flag to the snapshot. However at this point the serializer and
6240 // deserializer are deliberately a little unsynchronized (see above) so the
6241 // checking of the sync flag in the snapshot would fail.
6245 // TODO(1236194): Since the heap size is configurable on the command line
6246 // and through the API, we should gracefully handle the case that the heap
6247 // size is not big enough to fit all the initial objects.
6248 bool Heap::ConfigureHeap(int max_semispace_size,
6249 intptr_t max_old_gen_size,
6250 intptr_t max_executable_size) {
6251 if (HasBeenSetUp()) return false;
6253 if (FLAG_stress_compaction) {
6254 // This will cause more frequent GCs when stressing.
6255 max_semispace_size_ = Page::kPageSize;
6258 if (max_semispace_size > 0) {
6259 if (max_semispace_size < Page::kPageSize) {
6260 max_semispace_size = Page::kPageSize;
6261 if (FLAG_trace_gc) {
6262 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6263 Page::kPageSize >> 10);
6266 max_semispace_size_ = max_semispace_size;
6269 if (Snapshot::IsEnabled()) {
6270 // If we are using a snapshot we always reserve the default amount
6271 // of memory for each semispace because code in the snapshot has
6272 // write-barrier code that relies on the size and alignment of new
6273 // space. We therefore cannot use a larger max semispace size
6274 // than the default reserved semispace size.
6275 if (max_semispace_size_ > reserved_semispace_size_) {
6276 max_semispace_size_ = reserved_semispace_size_;
6277 if (FLAG_trace_gc) {
6278 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6279 reserved_semispace_size_ >> 10);
6283 // If we are not using snapshots we reserve space for the actual
6284 // max semispace size.
6285 reserved_semispace_size_ = max_semispace_size_;
6288 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6289 if (max_executable_size > 0) {
6290 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6293 // The max executable size must be less than or equal to the max old
6295 if (max_executable_size_ > max_old_generation_size_) {
6296 max_executable_size_ = max_old_generation_size_;
6299 // The new space size must be a power of two to support single-bit testing
6301 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6302 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6303 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6305 // The external allocation limit should be below 256 MB on all architectures
6306 // to avoid unnecessary low memory notifications, as that is the threshold
6307 // for some embedders.
6308 external_allocation_limit_ = 12 * max_semispace_size_;
6309 ASSERT(external_allocation_limit_ <= 256 * MB);
6311 // The old generation is paged and needs at least one page for each space.
6312 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6313 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6315 RoundUp(max_old_generation_size_,
6318 // We rely on being able to allocate new arrays in paged spaces.
6319 ASSERT(Page::kMaxRegularHeapObjectSize >=
6321 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6322 AllocationMemento::kSize));
6329 bool Heap::ConfigureHeapDefault() {
6330 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6331 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6332 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6336 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6337 *stats->start_marker = HeapStats::kStartMarker;
6338 *stats->end_marker = HeapStats::kEndMarker;
6339 *stats->new_space_size = new_space_.SizeAsInt();
6340 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6341 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6342 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6343 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6344 *stats->old_data_space_capacity = old_data_space_->Capacity();
6345 *stats->code_space_size = code_space_->SizeOfObjects();
6346 *stats->code_space_capacity = code_space_->Capacity();
6347 *stats->map_space_size = map_space_->SizeOfObjects();
6348 *stats->map_space_capacity = map_space_->Capacity();
6349 *stats->cell_space_size = cell_space_->SizeOfObjects();
6350 *stats->cell_space_capacity = cell_space_->Capacity();
6351 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6352 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6353 *stats->lo_space_size = lo_space_->Size();
6354 isolate_->global_handles()->RecordStats(stats);
6355 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6356 *stats->memory_allocator_capacity =
6357 isolate()->memory_allocator()->Size() +
6358 isolate()->memory_allocator()->Available();
6359 *stats->os_error = OS::GetLastError();
6360 isolate()->memory_allocator()->Available();
6361 if (take_snapshot) {
6362 HeapIterator iterator(this);
6363 for (HeapObject* obj = iterator.next();
6365 obj = iterator.next()) {
6366 InstanceType type = obj->map()->instance_type();
6367 ASSERT(0 <= type && type <= LAST_TYPE);
6368 stats->objects_per_type[type]++;
6369 stats->size_per_type[type] += obj->Size();
6375 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6376 return old_pointer_space_->SizeOfObjects()
6377 + old_data_space_->SizeOfObjects()
6378 + code_space_->SizeOfObjects()
6379 + map_space_->SizeOfObjects()
6380 + cell_space_->SizeOfObjects()
6381 + property_cell_space_->SizeOfObjects()
6382 + lo_space_->SizeOfObjects();
6386 bool Heap::AdvanceSweepers(int step_size) {
6387 ASSERT(isolate()->num_sweeper_threads() == 0);
6388 bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6389 sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6390 return sweeping_complete;
6394 int64_t Heap::PromotedExternalMemorySize() {
6395 if (amount_of_external_allocated_memory_
6396 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6397 return amount_of_external_allocated_memory_
6398 - amount_of_external_allocated_memory_at_last_global_gc_;
6402 void Heap::EnableInlineAllocation() {
6403 if (!inline_allocation_disabled_) return;
6404 inline_allocation_disabled_ = false;
6406 // Update inline allocation limit for new space.
6407 new_space()->UpdateInlineAllocationLimit(0);
6411 void Heap::DisableInlineAllocation() {
6412 if (inline_allocation_disabled_) return;
6413 inline_allocation_disabled_ = true;
6415 // Update inline allocation limit for new space.
6416 new_space()->UpdateInlineAllocationLimit(0);
6418 // Update inline allocation limit for old spaces.
6419 PagedSpaces spaces(this);
6420 for (PagedSpace* space = spaces.next();
6422 space = spaces.next()) {
6423 space->EmptyAllocationInfo();
6428 V8_DECLARE_ONCE(initialize_gc_once);
6430 static void InitializeGCOnce() {
6431 InitializeScavengingVisitorsTables();
6432 NewSpaceScavenger::Initialize();
6433 MarkCompactCollector::Initialize();
6437 bool Heap::SetUp() {
6439 allocation_timeout_ = FLAG_gc_interval;
6442 // Initialize heap spaces and initial maps and objects. Whenever something
6443 // goes wrong, just return false. The caller should check the results and
6444 // call Heap::TearDown() to release allocated memory.
6446 // If the heap is not yet configured (e.g. through the API), configure it.
6447 // Configuration is based on the flags new-space-size (really the semispace
6448 // size) and old-space-size if set or the initial values of semispace_size_
6449 // and old_generation_size_ otherwise.
6451 if (!ConfigureHeapDefault()) return false;
6454 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6456 MarkMapPointersAsEncoded(false);
6458 // Set up memory allocator.
6459 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6462 // Set up new space.
6463 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6467 // Initialize old pointer space.
6468 old_pointer_space_ =
6470 max_old_generation_size_,
6473 if (old_pointer_space_ == NULL) return false;
6474 if (!old_pointer_space_->SetUp()) return false;
6476 // Initialize old data space.
6479 max_old_generation_size_,
6482 if (old_data_space_ == NULL) return false;
6483 if (!old_data_space_->SetUp()) return false;
6485 // Initialize the code space, set its maximum capacity to the old
6486 // generation size. It needs executable memory.
6487 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6488 // virtual address space, so that they can call each other with near calls.
6489 if (code_range_size_ > 0) {
6490 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6496 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6497 if (code_space_ == NULL) return false;
6498 if (!code_space_->SetUp()) return false;
6500 // Initialize map space.
6501 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6502 if (map_space_ == NULL) return false;
6503 if (!map_space_->SetUp()) return false;
6505 // Initialize simple cell space.
6506 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6507 if (cell_space_ == NULL) return false;
6508 if (!cell_space_->SetUp()) return false;
6510 // Initialize global property cell space.
6511 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6512 PROPERTY_CELL_SPACE);
6513 if (property_cell_space_ == NULL) return false;
6514 if (!property_cell_space_->SetUp()) return false;
6516 // The large object code space may contain code or data. We set the memory
6517 // to be non-executable here for safety, but this means we need to enable it
6518 // explicitly when allocating large code objects.
6519 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6520 if (lo_space_ == NULL) return false;
6521 if (!lo_space_->SetUp()) return false;
6523 // Set up the seed that is used to randomize the string hash function.
6524 ASSERT(hash_seed() == 0);
6525 if (FLAG_randomize_hashes) {
6526 if (FLAG_hash_seed == 0) {
6527 int rnd = isolate()->random_number_generator()->NextInt();
6528 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6530 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6534 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6535 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6537 store_buffer()->SetUp();
6539 mark_compact_collector()->SetUp();
6541 if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6547 bool Heap::CreateHeapObjects() {
6548 // Create initial maps.
6549 if (!CreateInitialMaps()) return false;
6550 if (!CreateApiObjects()) return false;
6552 // Create initial objects
6553 if (!CreateInitialObjects()) return false;
6555 native_contexts_list_ = undefined_value();
6556 array_buffers_list_ = undefined_value();
6557 allocation_sites_list_ = undefined_value();
6558 weak_object_to_code_table_ = undefined_value();
6563 void Heap::SetStackLimits() {
6564 ASSERT(isolate_ != NULL);
6565 ASSERT(isolate_ == isolate());
6566 // On 64 bit machines, pointers are generally out of range of Smis. We write
6567 // something that looks like an out of range Smi to the GC.
6569 // Set up the special root array entries containing the stack limits.
6570 // These are actually addresses, but the tag makes the GC ignore it.
6571 roots_[kStackLimitRootIndex] =
6572 reinterpret_cast<Object*>(
6573 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6574 roots_[kRealStackLimitRootIndex] =
6575 reinterpret_cast<Object*>(
6576 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6580 void Heap::TearDown() {
6582 if (FLAG_verify_heap) {
6587 UpdateMaximumCommitted();
6589 if (FLAG_print_cumulative_gc_stat) {
6591 PrintF("gc_count=%d ", gc_count_);
6592 PrintF("mark_sweep_count=%d ", ms_count_);
6593 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6594 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6595 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6596 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6597 get_max_alive_after_gc());
6598 PrintF("total_marking_time=%.1f ", marking_time());
6599 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6603 if (FLAG_print_max_heap_committed) {
6605 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6606 MaximumCommittedMemory());
6607 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6608 new_space_.MaximumCommittedMemory());
6609 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6610 old_data_space_->MaximumCommittedMemory());
6611 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6612 old_pointer_space_->MaximumCommittedMemory());
6613 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6614 old_pointer_space_->MaximumCommittedMemory());
6615 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6616 code_space_->MaximumCommittedMemory());
6617 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6618 map_space_->MaximumCommittedMemory());
6619 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6620 cell_space_->MaximumCommittedMemory());
6621 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6622 property_cell_space_->MaximumCommittedMemory());
6623 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6624 lo_space_->MaximumCommittedMemory());
6628 TearDownArrayBuffers();
6630 isolate_->global_handles()->TearDown();
6632 external_string_table_.TearDown();
6634 mark_compact_collector()->TearDown();
6636 new_space_.TearDown();
6638 if (old_pointer_space_ != NULL) {
6639 old_pointer_space_->TearDown();
6640 delete old_pointer_space_;
6641 old_pointer_space_ = NULL;
6644 if (old_data_space_ != NULL) {
6645 old_data_space_->TearDown();
6646 delete old_data_space_;
6647 old_data_space_ = NULL;
6650 if (code_space_ != NULL) {
6651 code_space_->TearDown();
6656 if (map_space_ != NULL) {
6657 map_space_->TearDown();
6662 if (cell_space_ != NULL) {
6663 cell_space_->TearDown();
6668 if (property_cell_space_ != NULL) {
6669 property_cell_space_->TearDown();
6670 delete property_cell_space_;
6671 property_cell_space_ = NULL;
6674 if (lo_space_ != NULL) {
6675 lo_space_->TearDown();
6680 store_buffer()->TearDown();
6681 incremental_marking()->TearDown();
6683 isolate_->memory_allocator()->TearDown();
6685 delete relocation_mutex_;
6686 relocation_mutex_ = NULL;
6690 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6692 bool pass_isolate) {
6693 ASSERT(callback != NULL);
6694 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6695 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6696 return gc_prologue_callbacks_.Add(pair);
6700 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6701 ASSERT(callback != NULL);
6702 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6703 if (gc_prologue_callbacks_[i].callback == callback) {
6704 gc_prologue_callbacks_.Remove(i);
6712 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6714 bool pass_isolate) {
6715 ASSERT(callback != NULL);
6716 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6717 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6718 return gc_epilogue_callbacks_.Add(pair);
6722 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6723 ASSERT(callback != NULL);
6724 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6725 if (gc_epilogue_callbacks_[i].callback == callback) {
6726 gc_epilogue_callbacks_.Remove(i);
6734 MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6735 DependentCode* dep) {
6736 ASSERT(!InNewSpace(obj));
6737 ASSERT(!InNewSpace(dep));
6738 MaybeObject* maybe_obj =
6739 WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6740 WeakHashTable* table;
6741 if (!maybe_obj->To(&table)) return maybe_obj;
6742 if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6743 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6745 set_weak_object_to_code_table(table);
6746 ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6747 return weak_object_to_code_table_;
6751 DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6752 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6753 if (dep->IsDependentCode()) return DependentCode::cast(dep);
6754 return DependentCode::cast(empty_fixed_array());
6758 void Heap::EnsureWeakObjectToCodeTable() {
6759 if (!weak_object_to_code_table()->IsHashTable()) {
6760 set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6765 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
6766 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
6771 class PrintHandleVisitor: public ObjectVisitor {
6773 void VisitPointers(Object** start, Object** end) {
6774 for (Object** p = start; p < end; p++)
6775 PrintF(" handle %p to %p\n",
6776 reinterpret_cast<void*>(p),
6777 reinterpret_cast<void*>(*p));
6782 void Heap::PrintHandles() {
6783 PrintF("Handles:\n");
6784 PrintHandleVisitor v;
6785 isolate_->handle_scope_implementer()->Iterate(&v);
6791 Space* AllSpaces::next() {
6792 switch (counter_++) {
6794 return heap_->new_space();
6795 case OLD_POINTER_SPACE:
6796 return heap_->old_pointer_space();
6797 case OLD_DATA_SPACE:
6798 return heap_->old_data_space();
6800 return heap_->code_space();
6802 return heap_->map_space();
6804 return heap_->cell_space();
6805 case PROPERTY_CELL_SPACE:
6806 return heap_->property_cell_space();
6808 return heap_->lo_space();
6815 PagedSpace* PagedSpaces::next() {
6816 switch (counter_++) {
6817 case OLD_POINTER_SPACE:
6818 return heap_->old_pointer_space();
6819 case OLD_DATA_SPACE:
6820 return heap_->old_data_space();
6822 return heap_->code_space();
6824 return heap_->map_space();
6826 return heap_->cell_space();
6827 case PROPERTY_CELL_SPACE:
6828 return heap_->property_cell_space();
6836 OldSpace* OldSpaces::next() {
6837 switch (counter_++) {
6838 case OLD_POINTER_SPACE:
6839 return heap_->old_pointer_space();
6840 case OLD_DATA_SPACE:
6841 return heap_->old_data_space();
6843 return heap_->code_space();
6850 SpaceIterator::SpaceIterator(Heap* heap)
6852 current_space_(FIRST_SPACE),
6858 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6860 current_space_(FIRST_SPACE),
6862 size_func_(size_func) {
6866 SpaceIterator::~SpaceIterator() {
6867 // Delete active iterator if any.
6872 bool SpaceIterator::has_next() {
6873 // Iterate until no more spaces.
6874 return current_space_ != LAST_SPACE;
6878 ObjectIterator* SpaceIterator::next() {
6879 if (iterator_ != NULL) {
6882 // Move to the next space
6884 if (current_space_ > LAST_SPACE) {
6889 // Return iterator for the new current space.
6890 return CreateIterator();
6894 // Create an iterator for the space to iterate.
6895 ObjectIterator* SpaceIterator::CreateIterator() {
6896 ASSERT(iterator_ == NULL);
6898 switch (current_space_) {
6900 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6902 case OLD_POINTER_SPACE:
6904 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6906 case OLD_DATA_SPACE:
6907 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6910 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6913 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6916 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6918 case PROPERTY_CELL_SPACE:
6919 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
6923 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6927 // Return the newly allocated iterator;
6928 ASSERT(iterator_ != NULL);
6933 class HeapObjectsFilter {
6935 virtual ~HeapObjectsFilter() {}
6936 virtual bool SkipObject(HeapObject* object) = 0;
6940 class UnreachableObjectsFilter : public HeapObjectsFilter {
6942 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6943 MarkReachableObjects();
6946 ~UnreachableObjectsFilter() {
6947 heap_->mark_compact_collector()->ClearMarkbits();
6950 bool SkipObject(HeapObject* object) {
6951 MarkBit mark_bit = Marking::MarkBitFrom(object);
6952 return !mark_bit.Get();
6956 class MarkingVisitor : public ObjectVisitor {
6958 MarkingVisitor() : marking_stack_(10) {}
6960 void VisitPointers(Object** start, Object** end) {
6961 for (Object** p = start; p < end; p++) {
6962 if (!(*p)->IsHeapObject()) continue;
6963 HeapObject* obj = HeapObject::cast(*p);
6964 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6965 if (!mark_bit.Get()) {
6967 marking_stack_.Add(obj);
6972 void TransitiveClosure() {
6973 while (!marking_stack_.is_empty()) {
6974 HeapObject* obj = marking_stack_.RemoveLast();
6980 List<HeapObject*> marking_stack_;
6983 void MarkReachableObjects() {
6984 MarkingVisitor visitor;
6985 heap_->IterateRoots(&visitor, VISIT_ALL);
6986 visitor.TransitiveClosure();
6990 DisallowHeapAllocation no_allocation_;
6994 HeapIterator::HeapIterator(Heap* heap)
6996 filtering_(HeapIterator::kNoFiltering),
7002 HeapIterator::HeapIterator(Heap* heap,
7003 HeapIterator::HeapObjectsFiltering filtering)
7005 filtering_(filtering),
7011 HeapIterator::~HeapIterator() {
7016 void HeapIterator::Init() {
7017 // Start the iteration.
7018 space_iterator_ = new SpaceIterator(heap_);
7019 switch (filtering_) {
7020 case kFilterUnreachable:
7021 filter_ = new UnreachableObjectsFilter(heap_);
7026 object_iterator_ = space_iterator_->next();
7030 void HeapIterator::Shutdown() {
7032 // Assert that in filtering mode we have iterated through all
7033 // objects. Otherwise, heap will be left in an inconsistent state.
7034 if (filtering_ != kNoFiltering) {
7035 ASSERT(object_iterator_ == NULL);
7038 // Make sure the last iterator is deallocated.
7039 delete space_iterator_;
7040 space_iterator_ = NULL;
7041 object_iterator_ = NULL;
7047 HeapObject* HeapIterator::next() {
7048 if (filter_ == NULL) return NextObject();
7050 HeapObject* obj = NextObject();
7051 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7056 HeapObject* HeapIterator::NextObject() {
7057 // No iterator means we are done.
7058 if (object_iterator_ == NULL) return NULL;
7060 if (HeapObject* obj = object_iterator_->next_object()) {
7061 // If the current iterator has more objects we are fine.
7064 // Go though the spaces looking for one that has objects.
7065 while (space_iterator_->has_next()) {
7066 object_iterator_ = space_iterator_->next();
7067 if (HeapObject* obj = object_iterator_->next_object()) {
7072 // Done with the last space.
7073 object_iterator_ = NULL;
7078 void HeapIterator::reset() {
7079 // Restart the iterator.
7087 Object* const PathTracer::kAnyGlobalObject = NULL;
7089 class PathTracer::MarkVisitor: public ObjectVisitor {
7091 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7092 void VisitPointers(Object** start, Object** end) {
7093 // Scan all HeapObject pointers in [start, end)
7094 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7095 if ((*p)->IsHeapObject())
7096 tracer_->MarkRecursively(p, this);
7101 PathTracer* tracer_;
7105 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7107 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7108 void VisitPointers(Object** start, Object** end) {
7109 // Scan all HeapObject pointers in [start, end)
7110 for (Object** p = start; p < end; p++) {
7111 if ((*p)->IsHeapObject())
7112 tracer_->UnmarkRecursively(p, this);
7117 PathTracer* tracer_;
7121 void PathTracer::VisitPointers(Object** start, Object** end) {
7122 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7123 // Visit all HeapObject pointers in [start, end)
7124 for (Object** p = start; !done && (p < end); p++) {
7125 if ((*p)->IsHeapObject()) {
7127 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7133 void PathTracer::Reset() {
7134 found_target_ = false;
7135 object_stack_.Clear();
7139 void PathTracer::TracePathFrom(Object** root) {
7140 ASSERT((search_target_ == kAnyGlobalObject) ||
7141 search_target_->IsHeapObject());
7142 found_target_in_trace_ = false;
7145 MarkVisitor mark_visitor(this);
7146 MarkRecursively(root, &mark_visitor);
7148 UnmarkVisitor unmark_visitor(this);
7149 UnmarkRecursively(root, &unmark_visitor);
7155 static bool SafeIsNativeContext(HeapObject* obj) {
7156 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7160 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7161 if (!(*p)->IsHeapObject()) return;
7163 HeapObject* obj = HeapObject::cast(*p);
7165 Object* map = obj->map();
7167 if (!map->IsHeapObject()) return; // visited before
7169 if (found_target_in_trace_) return; // stop if target found
7170 object_stack_.Add(obj);
7171 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7172 (obj == search_target_)) {
7173 found_target_in_trace_ = true;
7174 found_target_ = true;
7178 bool is_native_context = SafeIsNativeContext(obj);
7181 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7183 Address map_addr = map_p->address();
7185 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7187 // Scan the object body.
7188 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7189 // This is specialized to scan Context's properly.
7190 Object** start = reinterpret_cast<Object**>(obj->address() +
7191 Context::kHeaderSize);
7192 Object** end = reinterpret_cast<Object**>(obj->address() +
7193 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7194 mark_visitor->VisitPointers(start, end);
7196 obj->IterateBody(map_p->instance_type(),
7197 obj->SizeFromMap(map_p),
7201 // Scan the map after the body because the body is a lot more interesting
7202 // when doing leak detection.
7203 MarkRecursively(&map, mark_visitor);
7205 if (!found_target_in_trace_) // don't pop if found the target
7206 object_stack_.RemoveLast();
7210 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7211 if (!(*p)->IsHeapObject()) return;
7213 HeapObject* obj = HeapObject::cast(*p);
7215 Object* map = obj->map();
7217 if (map->IsHeapObject()) return; // unmarked already
7219 Address map_addr = reinterpret_cast<Address>(map);
7221 map_addr -= kMarkTag;
7223 ASSERT_TAG_ALIGNED(map_addr);
7225 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7227 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7229 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7231 obj->IterateBody(Map::cast(map_p)->instance_type(),
7232 obj->SizeFromMap(Map::cast(map_p)),
7237 void PathTracer::ProcessResults() {
7238 if (found_target_) {
7239 PrintF("=====================================\n");
7240 PrintF("==== Path to object ====\n");
7241 PrintF("=====================================\n\n");
7243 ASSERT(!object_stack_.is_empty());
7244 for (int i = 0; i < object_stack_.length(); i++) {
7245 if (i > 0) PrintF("\n |\n |\n V\n\n");
7246 Object* obj = object_stack_[i];
7249 PrintF("=====================================\n");
7254 // Triggers a depth-first traversal of reachable objects from one
7255 // given root object and finds a path to a specific heap object and
7257 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7258 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7259 tracer.VisitPointer(&root);
7263 // Triggers a depth-first traversal of reachable objects from roots
7264 // and finds a path to a specific heap object and prints it.
7265 void Heap::TracePathToObject(Object* target) {
7266 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7267 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7271 // Triggers a depth-first traversal of reachable objects from roots
7272 // and finds a path to any global object and prints it. Useful for
7273 // determining the source for leaks of global objects.
7274 void Heap::TracePathToGlobal() {
7275 PathTracer tracer(PathTracer::kAnyGlobalObject,
7276 PathTracer::FIND_ALL,
7278 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7283 static intptr_t CountTotalHolesSize(Heap* heap) {
7284 intptr_t holes_size = 0;
7285 OldSpaces spaces(heap);
7286 for (OldSpace* space = spaces.next();
7288 space = spaces.next()) {
7289 holes_size += space->Waste() + space->Available();
7295 GCTracer::GCTracer(Heap* heap,
7296 const char* gc_reason,
7297 const char* collector_reason)
7299 start_object_size_(0),
7300 start_memory_size_(0),
7303 allocated_since_last_gc_(0),
7304 spent_in_mutator_(0),
7305 promoted_objects_size_(0),
7306 nodes_died_in_new_space_(0),
7307 nodes_copied_in_new_space_(0),
7310 gc_reason_(gc_reason),
7311 collector_reason_(collector_reason) {
7312 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7313 start_time_ = OS::TimeCurrentMillis();
7314 start_object_size_ = heap_->SizeOfObjects();
7315 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7317 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7321 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7323 allocated_since_last_gc_ =
7324 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7326 if (heap_->last_gc_end_timestamp_ > 0) {
7327 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7330 steps_count_ = heap_->incremental_marking()->steps_count();
7331 steps_took_ = heap_->incremental_marking()->steps_took();
7332 longest_step_ = heap_->incremental_marking()->longest_step();
7333 steps_count_since_last_gc_ =
7334 heap_->incremental_marking()->steps_count_since_last_gc();
7335 steps_took_since_last_gc_ =
7336 heap_->incremental_marking()->steps_took_since_last_gc();
7340 GCTracer::~GCTracer() {
7341 // Printf ONE line iff flag is set.
7342 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7344 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7346 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7347 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7349 double time = heap_->last_gc_end_timestamp_ - start_time_;
7351 // Update cumulative GC statistics if required.
7352 if (FLAG_print_cumulative_gc_stat) {
7353 heap_->total_gc_time_ms_ += time;
7354 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7355 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7356 heap_->alive_after_last_gc_);
7358 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7361 } else if (FLAG_trace_gc_verbose) {
7362 heap_->total_gc_time_ms_ += time;
7365 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7367 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7369 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7370 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7372 if (!FLAG_trace_gc_nvp) {
7373 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7375 double end_memory_size_mb =
7376 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7378 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7380 static_cast<double>(start_object_size_) / MB,
7381 static_cast<double>(start_memory_size_) / MB,
7382 SizeOfHeapObjects(),
7383 end_memory_size_mb);
7385 if (external_time > 0) PrintF("%d / ", external_time);
7386 PrintF("%.1f ms", time);
7387 if (steps_count_ > 0) {
7388 if (collector_ == SCAVENGER) {
7389 PrintF(" (+ %.1f ms in %d steps since last GC)",
7390 steps_took_since_last_gc_,
7391 steps_count_since_last_gc_);
7393 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7394 "biggest step %.1f ms)",
7401 if (gc_reason_ != NULL) {
7402 PrintF(" [%s]", gc_reason_);
7405 if (collector_reason_ != NULL) {
7406 PrintF(" [%s]", collector_reason_);
7411 PrintF("pause=%.1f ", time);
7412 PrintF("mutator=%.1f ", spent_in_mutator_);
7414 switch (collector_) {
7418 case MARK_COMPACTOR:
7426 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7427 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7428 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7429 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7430 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7431 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7432 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7433 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7434 PrintF("compaction_ptrs=%.1f ",
7435 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7436 PrintF("intracompaction_ptrs=%.1f ",
7437 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7438 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7439 PrintF("weakcollection_process=%.1f ",
7440 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7441 PrintF("weakcollection_clear=%.1f ",
7442 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7444 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7445 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7446 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7447 in_free_list_or_wasted_before_gc_);
7448 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7450 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7451 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7452 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7453 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7454 PrintF("nodes_promoted=%d ", nodes_promoted_);
7456 if (collector_ == SCAVENGER) {
7457 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7458 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7460 PrintF("stepscount=%d ", steps_count_);
7461 PrintF("stepstook=%.1f ", steps_took_);
7462 PrintF("longeststep=%.1f ", longest_step_);
7468 heap_->PrintShortHeapStatistics();
7472 const char* GCTracer::CollectorString() {
7473 switch (collector_) {
7476 case MARK_COMPACTOR:
7477 return "Mark-sweep";
7479 return "Unknown GC";
7483 int KeyedLookupCache::Hash(Map* map, Name* name) {
7484 // Uses only lower 32 bits if pointers are larger.
7485 uintptr_t addr_hash =
7486 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7487 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7491 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7492 int index = (Hash(map, name) & kHashMask);
7493 for (int i = 0; i < kEntriesPerBucket; i++) {
7494 Key& key = keys_[index + i];
7495 if ((key.map == map) && key.name->Equals(name)) {
7496 return field_offsets_[index + i];
7503 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7504 if (!name->IsUniqueName()) {
7505 String* internalized_string;
7506 if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7507 String::cast(name), &internalized_string)) {
7510 name = internalized_string;
7512 // This cache is cleared only between mark compact passes, so we expect the
7513 // cache to only contain old space names.
7514 ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7516 int index = (Hash(map, name) & kHashMask);
7517 // After a GC there will be free slots, so we use them in order (this may
7518 // help to get the most frequently used one in position 0).
7519 for (int i = 0; i< kEntriesPerBucket; i++) {
7520 Key& key = keys_[index];
7521 Object* free_entry_indicator = NULL;
7522 if (key.map == free_entry_indicator) {
7525 field_offsets_[index + i] = field_offset;
7529 // No free entry found in this bucket, so we move them all down one and
7530 // put the new entry at position zero.
7531 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7532 Key& key = keys_[index + i];
7533 Key& key2 = keys_[index + i - 1];
7535 field_offsets_[index + i] = field_offsets_[index + i - 1];
7538 // Write the new first entry.
7539 Key& key = keys_[index];
7542 field_offsets_[index] = field_offset;
7546 void KeyedLookupCache::Clear() {
7547 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7551 void DescriptorLookupCache::Clear() {
7552 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7557 void Heap::GarbageCollectionGreedyCheck() {
7558 ASSERT(FLAG_gc_greedy);
7559 if (isolate_->bootstrapper()->IsActive()) return;
7560 if (disallow_allocation_failure()) return;
7561 CollectGarbage(NEW_SPACE);
7566 void ExternalStringTable::CleanUp() {
7568 for (int i = 0; i < new_space_strings_.length(); ++i) {
7569 if (new_space_strings_[i] == heap_->the_hole_value()) {
7572 ASSERT(new_space_strings_[i]->IsExternalString());
7573 if (heap_->InNewSpace(new_space_strings_[i])) {
7574 new_space_strings_[last++] = new_space_strings_[i];
7576 old_space_strings_.Add(new_space_strings_[i]);
7579 new_space_strings_.Rewind(last);
7580 new_space_strings_.Trim();
7583 for (int i = 0; i < old_space_strings_.length(); ++i) {
7584 if (old_space_strings_[i] == heap_->the_hole_value()) {
7587 ASSERT(old_space_strings_[i]->IsExternalString());
7588 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7589 old_space_strings_[last++] = old_space_strings_[i];
7591 old_space_strings_.Rewind(last);
7592 old_space_strings_.Trim();
7594 if (FLAG_verify_heap) {
7601 void ExternalStringTable::TearDown() {
7602 for (int i = 0; i < new_space_strings_.length(); ++i) {
7603 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7605 new_space_strings_.Free();
7606 for (int i = 0; i < old_space_strings_.length(); ++i) {
7607 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7609 old_space_strings_.Free();
7613 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7614 chunk->set_next_chunk(chunks_queued_for_free_);
7615 chunks_queued_for_free_ = chunk;
7619 void Heap::FreeQueuedChunks() {
7620 if (chunks_queued_for_free_ == NULL) return;
7623 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7624 next = chunk->next_chunk();
7625 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7627 if (chunk->owner()->identity() == LO_SPACE) {
7628 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7629 // If FromAnyPointerAddress encounters a slot that belongs to a large
7630 // chunk queued for deletion it will fail to find the chunk because
7631 // it try to perform a search in the list of pages owned by of the large
7632 // object space and queued chunks were detached from that list.
7633 // To work around this we split large chunk into normal kPageSize aligned
7634 // pieces and initialize size, owner and flags field of every piece.
7635 // If FromAnyPointerAddress encounters a slot that belongs to one of
7636 // these smaller pieces it will treat it as a slot on a normal Page.
7637 Address chunk_end = chunk->address() + chunk->size();
7638 MemoryChunk* inner = MemoryChunk::FromAddress(
7639 chunk->address() + Page::kPageSize);
7640 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7641 while (inner <= inner_last) {
7642 // Size of a large chunk is always a multiple of
7643 // OS::AllocateAlignment() so there is always
7644 // enough space for a fake MemoryChunk header.
7645 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7646 // Guard against overflow.
7647 if (area_end < inner->address()) area_end = chunk_end;
7648 inner->SetArea(inner->address(), area_end);
7649 inner->set_size(Page::kPageSize);
7650 inner->set_owner(lo_space());
7651 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7652 inner = MemoryChunk::FromAddress(
7653 inner->address() + Page::kPageSize);
7657 isolate_->heap()->store_buffer()->Compact();
7658 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7659 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7660 next = chunk->next_chunk();
7661 isolate_->memory_allocator()->Free(chunk);
7663 chunks_queued_for_free_ = NULL;
7667 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7668 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7669 // Tag the page pointer to make it findable in the dump file.
7671 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7673 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7675 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7676 reinterpret_cast<Address>(p);
7677 remembered_unmapped_pages_index_++;
7678 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7682 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7683 memset(object_counts_, 0, sizeof(object_counts_));
7684 memset(object_sizes_, 0, sizeof(object_sizes_));
7685 if (clear_last_time_stats) {
7686 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7687 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7692 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7695 void Heap::CheckpointObjectStats() {
7696 LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7697 Counters* counters = isolate()->counters();
7698 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7699 counters->count_of_##name()->Increment( \
7700 static_cast<int>(object_counts_[name])); \
7701 counters->count_of_##name()->Decrement( \
7702 static_cast<int>(object_counts_last_time_[name])); \
7703 counters->size_of_##name()->Increment( \
7704 static_cast<int>(object_sizes_[name])); \
7705 counters->size_of_##name()->Decrement( \
7706 static_cast<int>(object_sizes_last_time_[name]));
7707 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7708 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7710 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7711 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7712 counters->count_of_CODE_TYPE_##name()->Increment( \
7713 static_cast<int>(object_counts_[index])); \
7714 counters->count_of_CODE_TYPE_##name()->Decrement( \
7715 static_cast<int>(object_counts_last_time_[index])); \
7716 counters->size_of_CODE_TYPE_##name()->Increment( \
7717 static_cast<int>(object_sizes_[index])); \
7718 counters->size_of_CODE_TYPE_##name()->Decrement( \
7719 static_cast<int>(object_sizes_last_time_[index]));
7720 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7721 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7722 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7723 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7724 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7725 static_cast<int>(object_counts_[index])); \
7726 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7727 static_cast<int>(object_counts_last_time_[index])); \
7728 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7729 static_cast<int>(object_sizes_[index])); \
7730 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7731 static_cast<int>(object_sizes_last_time_[index]));
7732 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7733 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7734 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7736 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7737 counters->count_of_CODE_AGE_##name()->Increment( \
7738 static_cast<int>(object_counts_[index])); \
7739 counters->count_of_CODE_AGE_##name()->Decrement( \
7740 static_cast<int>(object_counts_last_time_[index])); \
7741 counters->size_of_CODE_AGE_##name()->Increment( \
7742 static_cast<int>(object_sizes_[index])); \
7743 counters->size_of_CODE_AGE_##name()->Decrement( \
7744 static_cast<int>(object_sizes_last_time_[index]));
7745 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7746 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7748 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7749 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7753 } } // namespace v8::internal