1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
109 property_cell_space_(NULL),
111 gc_state_(NOT_IN_GC),
112 gc_post_processing_depth_(0),
115 remembered_unmapped_pages_index_(0),
116 unflattened_strings_length_(0),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
121 new_space_high_promotion_mode_active_(false),
122 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false),
128 store_buffer_rebuilder_(store_buffer()),
129 hidden_string_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
133 total_regexp_code_generated_(0),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 low_survival_rate_period_length_(0),
139 previous_survival_rate_trend_(Heap::STABLE),
140 survival_rate_trend_(Heap::STABLE),
142 total_gc_time_ms_(0.0),
143 max_alive_after_gc_(0),
144 min_in_mutator_(kMaxInt),
145 alive_after_last_gc_(0),
146 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 gc_count_at_last_idle_gc_(0),
157 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158 gcs_since_last_deopt_(0),
160 no_weak_embedded_maps_verification_scope_depth_(0),
162 promotion_queue_(this),
164 chunks_queued_for_free_(NULL),
165 relocation_mutex_(NULL) {
166 // Allow build-time customization of the max semispace size. Building
167 // V8 with snapshots and a non-default max semispace size is much
168 // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 intptr_t max_virtual = OS::MaxVirtualMemory();
175 if (max_virtual > 0) {
176 if (code_range_size_ > 0) {
177 // Reserve no more than 1/8 of the memory for the code range.
178 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183 native_contexts_list_ = NULL;
184 array_buffers_list_ = Smi::FromInt(0);
185 mark_compact_collector_.heap_ = this;
186 external_string_table_.heap_ = this;
187 // Put a dummy entry in the remembered pages so we can find the list the
188 // minidump even if there are no real unmapped pages.
189 RememberUnmappedPage(NULL, false);
191 ClearObjectStats(true);
195 intptr_t Heap::Capacity() {
196 if (!HasBeenSetUp()) return 0;
198 return new_space_.Capacity() +
199 old_pointer_space_->Capacity() +
200 old_data_space_->Capacity() +
201 code_space_->Capacity() +
202 map_space_->Capacity() +
203 cell_space_->Capacity() +
204 property_cell_space_->Capacity();
208 intptr_t Heap::CommittedMemory() {
209 if (!HasBeenSetUp()) return 0;
211 return new_space_.CommittedMemory() +
212 old_pointer_space_->CommittedMemory() +
213 old_data_space_->CommittedMemory() +
214 code_space_->CommittedMemory() +
215 map_space_->CommittedMemory() +
216 cell_space_->CommittedMemory() +
217 property_cell_space_->CommittedMemory() +
222 size_t Heap::CommittedPhysicalMemory() {
223 if (!HasBeenSetUp()) return 0;
225 return new_space_.CommittedPhysicalMemory() +
226 old_pointer_space_->CommittedPhysicalMemory() +
227 old_data_space_->CommittedPhysicalMemory() +
228 code_space_->CommittedPhysicalMemory() +
229 map_space_->CommittedPhysicalMemory() +
230 cell_space_->CommittedPhysicalMemory() +
231 property_cell_space_->CommittedPhysicalMemory() +
232 lo_space_->CommittedPhysicalMemory();
236 intptr_t Heap::CommittedMemoryExecutable() {
237 if (!HasBeenSetUp()) return 0;
239 return isolate()->memory_allocator()->SizeExecutable();
243 intptr_t Heap::Available() {
244 if (!HasBeenSetUp()) return 0;
246 return new_space_.Available() +
247 old_pointer_space_->Available() +
248 old_data_space_->Available() +
249 code_space_->Available() +
250 map_space_->Available() +
251 cell_space_->Available() +
252 property_cell_space_->Available();
256 bool Heap::HasBeenSetUp() {
257 return old_pointer_space_ != NULL &&
258 old_data_space_ != NULL &&
259 code_space_ != NULL &&
260 map_space_ != NULL &&
261 cell_space_ != NULL &&
262 property_cell_space_ != NULL &&
267 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
268 if (IntrusiveMarking::IsMarked(object)) {
269 return IntrusiveMarking::SizeOfMarkedObject(object);
271 return object->SizeFromMap(object->map());
275 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
276 const char** reason) {
277 // Is global GC requested?
278 if (space != NEW_SPACE) {
279 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
280 *reason = "GC in old space requested";
281 return MARK_COMPACTOR;
284 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
285 *reason = "GC in old space forced by flags";
286 return MARK_COMPACTOR;
289 // Is enough data promoted to justify a global GC?
290 if (OldGenerationAllocationLimitReached()) {
291 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
292 *reason = "promotion limit reached";
293 return MARK_COMPACTOR;
296 // Have allocation in OLD and LO failed?
297 if (old_gen_exhausted_) {
298 isolate_->counters()->
299 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
300 *reason = "old generations exhausted";
301 return MARK_COMPACTOR;
304 // Is there enough space left in OLD to guarantee that a scavenge can
307 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
308 // for object promotion. It counts only the bytes that the memory
309 // allocator has not yet allocated from the OS and assigned to any space,
310 // and does not count available bytes already in the old space or code
311 // space. Undercounting is safe---we may get an unrequested full GC when
312 // a scavenge would have succeeded.
313 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
314 isolate_->counters()->
315 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
316 *reason = "scavenge might not succeed";
317 return MARK_COMPACTOR;
326 // TODO(1238405): Combine the infrastructure for --heap-stats and
327 // --log-gc to avoid the complicated preprocessor and flag testing.
328 void Heap::ReportStatisticsBeforeGC() {
329 // Heap::ReportHeapStatistics will also log NewSpace statistics when
330 // compiled --log-gc is set. The following logic is used to avoid
333 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
334 if (FLAG_heap_stats) {
335 ReportHeapStatistics("Before GC");
336 } else if (FLAG_log_gc) {
337 new_space_.ReportStatistics();
339 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
342 new_space_.CollectStatistics();
343 new_space_.ReportStatistics();
344 new_space_.ClearHistograms();
350 void Heap::PrintShortHeapStatistics() {
351 if (!FLAG_trace_gc_verbose) return;
352 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
353 ", available: %6" V8_PTR_PREFIX "d KB\n",
354 isolate_->memory_allocator()->Size() / KB,
355 isolate_->memory_allocator()->Available() / KB);
356 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
357 ", available: %6" V8_PTR_PREFIX "d KB"
358 ", committed: %6" V8_PTR_PREFIX "d KB\n",
359 new_space_.Size() / KB,
360 new_space_.Available() / KB,
361 new_space_.CommittedMemory() / KB);
362 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
363 ", available: %6" V8_PTR_PREFIX "d KB"
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 old_pointer_space_->SizeOfObjects() / KB,
366 old_pointer_space_->Available() / KB,
367 old_pointer_space_->CommittedMemory() / KB);
368 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
369 ", available: %6" V8_PTR_PREFIX "d KB"
370 ", committed: %6" V8_PTR_PREFIX "d KB\n",
371 old_data_space_->SizeOfObjects() / KB,
372 old_data_space_->Available() / KB,
373 old_data_space_->CommittedMemory() / KB);
374 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
375 ", available: %6" V8_PTR_PREFIX "d KB"
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 code_space_->SizeOfObjects() / KB,
378 code_space_->Available() / KB,
379 code_space_->CommittedMemory() / KB);
380 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
381 ", available: %6" V8_PTR_PREFIX "d KB"
382 ", committed: %6" V8_PTR_PREFIX "d KB\n",
383 map_space_->SizeOfObjects() / KB,
384 map_space_->Available() / KB,
385 map_space_->CommittedMemory() / KB);
386 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
387 ", available: %6" V8_PTR_PREFIX "d KB"
388 ", committed: %6" V8_PTR_PREFIX "d KB\n",
389 cell_space_->SizeOfObjects() / KB,
390 cell_space_->Available() / KB,
391 cell_space_->CommittedMemory() / KB);
392 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
393 ", available: %6" V8_PTR_PREFIX "d KB"
394 ", committed: %6" V8_PTR_PREFIX "d KB\n",
395 property_cell_space_->SizeOfObjects() / KB,
396 property_cell_space_->Available() / KB,
397 property_cell_space_->CommittedMemory() / KB);
398 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
399 ", available: %6" V8_PTR_PREFIX "d KB"
400 ", committed: %6" V8_PTR_PREFIX "d KB\n",
401 lo_space_->SizeOfObjects() / KB,
402 lo_space_->Available() / KB,
403 lo_space_->CommittedMemory() / KB);
404 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
405 ", available: %6" V8_PTR_PREFIX "d KB"
406 ", committed: %6" V8_PTR_PREFIX "d KB\n",
407 this->SizeOfObjects() / KB,
408 this->Available() / KB,
409 this->CommittedMemory() / KB);
410 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
411 amount_of_external_allocated_memory_ / KB);
412 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
416 // TODO(1238405): Combine the infrastructure for --heap-stats and
417 // --log-gc to avoid the complicated preprocessor and flag testing.
418 void Heap::ReportStatisticsAfterGC() {
419 // Similar to the before GC, we use some complicated logic to ensure that
420 // NewSpace statistics are logged exactly once when --log-gc is turned on.
422 if (FLAG_heap_stats) {
423 new_space_.CollectStatistics();
424 ReportHeapStatistics("After GC");
425 } else if (FLAG_log_gc) {
426 new_space_.ReportStatistics();
429 if (FLAG_log_gc) new_space_.ReportStatistics();
434 void Heap::GarbageCollectionPrologue() {
435 { AllowHeapAllocation for_the_first_part_of_prologue;
436 isolate_->transcendental_cache()->Clear();
437 ClearJSFunctionResultCaches();
439 unflattened_strings_length_ = 0;
441 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
442 mark_compact_collector()->EnableCodeFlushing(true);
446 if (FLAG_verify_heap) {
453 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455 if (FLAG_gc_verbose) Print();
457 ReportStatisticsBeforeGC();
460 store_buffer()->GCPrologue();
464 intptr_t Heap::SizeOfObjects() {
466 AllSpaces spaces(this);
467 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
468 total += space->SizeOfObjects();
474 void Heap::RepairFreeListsAfterBoot() {
475 PagedSpaces spaces(this);
476 for (PagedSpace* space = spaces.next();
478 space = spaces.next()) {
479 space->RepairFreeListsAfterBoot();
484 void Heap::GarbageCollectionEpilogue() {
485 store_buffer()->GCEpilogue();
487 // In release mode, we only zap the from space under heap verification.
488 if (Heap::ShouldZapGarbage()) {
493 if (FLAG_verify_heap) {
498 AllowHeapAllocation for_the_rest_of_the_epilogue;
501 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
502 if (FLAG_print_handles) PrintHandles();
503 if (FLAG_gc_verbose) Print();
504 if (FLAG_code_stats) ReportCodeStatistics("After GC");
506 if (FLAG_deopt_every_n_garbage_collections > 0) {
507 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
508 Deoptimizer::DeoptimizeAll(isolate());
509 gcs_since_last_deopt_ = 0;
513 isolate_->counters()->alive_after_last_gc()->Set(
514 static_cast<int>(SizeOfObjects()));
516 isolate_->counters()->string_table_capacity()->Set(
517 string_table()->Capacity());
518 isolate_->counters()->number_of_symbols()->Set(
519 string_table()->NumberOfElements());
521 if (CommittedMemory() > 0) {
522 isolate_->counters()->external_fragmentation_total()->AddSample(
523 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525 isolate_->counters()->heap_fraction_map_space()->AddSample(
527 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
528 isolate_->counters()->heap_fraction_cell_space()->AddSample(
530 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
531 isolate_->counters()->heap_fraction_property_cell_space()->
532 AddSample(static_cast<int>(
533 (property_cell_space()->CommittedMemory() * 100.0) /
536 isolate_->counters()->heap_sample_total_committed()->AddSample(
537 static_cast<int>(CommittedMemory() / KB));
538 isolate_->counters()->heap_sample_total_used()->AddSample(
539 static_cast<int>(SizeOfObjects() / KB));
540 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
541 static_cast<int>(map_space()->CommittedMemory() / KB));
542 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
543 static_cast<int>(cell_space()->CommittedMemory() / KB));
544 isolate_->counters()->
545 heap_sample_property_cell_space_committed()->
546 AddSample(static_cast<int>(
547 property_cell_space()->CommittedMemory() / KB));
550 #define UPDATE_COUNTERS_FOR_SPACE(space) \
551 isolate_->counters()->space##_bytes_available()->Set( \
552 static_cast<int>(space()->Available())); \
553 isolate_->counters()->space##_bytes_committed()->Set( \
554 static_cast<int>(space()->CommittedMemory())); \
555 isolate_->counters()->space##_bytes_used()->Set( \
556 static_cast<int>(space()->SizeOfObjects()));
557 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
558 if (space()->CommittedMemory() > 0) { \
559 isolate_->counters()->external_fragmentation_##space()->AddSample( \
560 static_cast<int>(100 - \
561 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
564 UPDATE_COUNTERS_FOR_SPACE(space) \
565 UPDATE_FRAGMENTATION_FOR_SPACE(space)
567 UPDATE_COUNTERS_FOR_SPACE(new_space)
568 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
569 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
570 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
571 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
572 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
573 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
574 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
575 #undef UPDATE_COUNTERS_FOR_SPACE
576 #undef UPDATE_FRAGMENTATION_FOR_SPACE
577 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
580 ReportStatisticsAfterGC();
582 #ifdef ENABLE_DEBUGGER_SUPPORT
583 isolate_->debug()->AfterGarbageCollection();
584 #endif // ENABLE_DEBUGGER_SUPPORT
586 error_object_list_.DeferredFormatStackTrace(isolate());
590 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
591 // Since we are ignoring the return value, the exact choice of space does
592 // not matter, so long as we do not specify NEW_SPACE, which would not
594 mark_compact_collector_.SetFlags(flags);
595 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
596 mark_compact_collector_.SetFlags(kNoGCFlags);
600 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
601 // Since we are ignoring the return value, the exact choice of space does
602 // not matter, so long as we do not specify NEW_SPACE, which would not
604 // Major GC would invoke weak handle callbacks on weakly reachable
605 // handles, but won't collect weakly reachable objects until next
606 // major GC. Therefore if we collect aggressively and weak handle callback
607 // has been invoked, we rerun major GC to release objects which become
609 // Note: as weak callbacks can execute arbitrary code, we cannot
610 // hope that eventually there will be no weak callbacks invocations.
611 // Therefore stop recollecting after several attempts.
612 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
613 kReduceMemoryFootprintMask);
614 isolate_->compilation_cache()->Clear();
615 const int kMaxNumberOfAttempts = 7;
616 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
621 mark_compact_collector()->SetFlags(kNoGCFlags);
624 incremental_marking()->UncommitMarkingDeque();
628 bool Heap::CollectGarbage(AllocationSpace space,
629 GarbageCollector collector,
630 const char* gc_reason,
631 const char* collector_reason) {
632 // The VM is in the GC state until exiting this function.
633 VMState<GC> state(isolate_);
636 // Reset the allocation timeout to the GC interval, but make sure to
637 // allow at least a few allocations after a collection. The reason
638 // for this is that we have a lot of allocation sequences and we
639 // assume that a garbage collection will allow the subsequent
640 // allocation attempts to go through.
641 allocation_timeout_ = Max(6, FLAG_gc_interval);
644 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
645 if (FLAG_trace_incremental_marking) {
646 PrintF("[IncrementalMarking] Scavenge during marking.\n");
650 if (collector == MARK_COMPACTOR &&
651 !mark_compact_collector()->abort_incremental_marking() &&
652 !incremental_marking()->IsStopped() &&
653 !incremental_marking()->should_hurry() &&
654 FLAG_incremental_marking_steps) {
655 // Make progress in incremental marking.
656 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
657 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
658 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
659 if (!incremental_marking()->IsComplete()) {
660 if (FLAG_trace_incremental_marking) {
661 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
663 collector = SCAVENGER;
664 collector_reason = "incremental marking delaying mark-sweep";
668 bool next_gc_likely_to_collect_more = false;
670 { GCTracer tracer(this, gc_reason, collector_reason);
671 ASSERT(AllowHeapAllocation::IsAllowed());
672 DisallowHeapAllocation no_allocation_during_gc;
673 GarbageCollectionPrologue();
674 // The GC count was incremented in the prologue. Tell the tracer about
676 tracer.set_gc_count(gc_count_);
678 // Tell the tracer which collector we've selected.
679 tracer.set_collector(collector);
682 HistogramTimerScope histogram_timer_scope(
683 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
684 : isolate_->counters()->gc_compactor());
685 next_gc_likely_to_collect_more =
686 PerformGarbageCollection(collector, &tracer);
689 GarbageCollectionEpilogue();
692 // Start incremental marking for the next cycle. The heap snapshot
693 // generator needs incremental marking to stay off after it aborted.
694 if (!mark_compact_collector()->abort_incremental_marking() &&
695 incremental_marking()->IsStopped() &&
696 incremental_marking()->WorthActivating() &&
697 NextGCIsLikelyToBeFull()) {
698 incremental_marking()->Start();
701 return next_gc_likely_to_collect_more;
705 void Heap::PerformScavenge() {
706 GCTracer tracer(this, NULL, NULL);
707 if (incremental_marking()->IsStopped()) {
708 PerformGarbageCollection(SCAVENGER, &tracer);
710 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
715 void Heap::MoveElements(FixedArray* array,
719 if (len == 0) return;
721 ASSERT(array->map() != HEAP->fixed_cow_array_map());
722 Object** dst_objects = array->data_start() + dst_index;
723 OS::MemMove(dst_objects,
724 array->data_start() + src_index,
726 if (!InNewSpace(array)) {
727 for (int i = 0; i < len; i++) {
728 // TODO(hpayer): check store buffer for entries
729 if (InNewSpace(dst_objects[i])) {
730 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
734 incremental_marking()->RecordWrites(array);
739 // Helper class for verifying the string table.
740 class StringTableVerifier : public ObjectVisitor {
742 void VisitPointers(Object** start, Object** end) {
743 // Visit all HeapObject pointers in [start, end).
744 for (Object** p = start; p < end; p++) {
745 if ((*p)->IsHeapObject()) {
746 // Check that the string is actually internalized.
747 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
748 (*p)->IsInternalizedString());
755 static void VerifyStringTable() {
756 StringTableVerifier verifier;
757 HEAP->string_table()->IterateElements(&verifier);
759 #endif // VERIFY_HEAP
762 static bool AbortIncrementalMarkingAndCollectGarbage(
764 AllocationSpace space,
765 const char* gc_reason = NULL) {
766 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
767 bool result = heap->CollectGarbage(space, gc_reason);
768 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
773 void Heap::ReserveSpace(
775 Address *locations_out) {
776 bool gc_performed = true;
778 static const int kThreshold = 20;
779 while (gc_performed && counter++ < kThreshold) {
780 gc_performed = false;
781 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
782 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
783 if (sizes[space] != 0) {
784 MaybeObject* allocation;
785 if (space == NEW_SPACE) {
786 allocation = new_space()->AllocateRaw(sizes[space]);
788 allocation = paged_space(space)->AllocateRaw(sizes[space]);
791 if (!allocation->To<FreeListNode>(&node)) {
792 if (space == NEW_SPACE) {
793 Heap::CollectGarbage(NEW_SPACE,
794 "failed to reserve space in the new space");
796 AbortIncrementalMarkingAndCollectGarbage(
798 static_cast<AllocationSpace>(space),
799 "failed to reserve space in paged space");
804 // Mark with a free list node, in case we have a GC before
806 node->set_size(this, sizes[space]);
807 locations_out[space] = node->address();
814 // Failed to reserve the space after several attempts.
815 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
820 void Heap::EnsureFromSpaceIsCommitted() {
821 if (new_space_.CommitFromSpaceIfNeeded()) return;
823 // Committing memory to from space failed.
824 // Memory is exhausted and we will die.
825 V8::FatalProcessOutOfMemory("Committing semi space failed.");
829 void Heap::ClearJSFunctionResultCaches() {
830 if (isolate_->bootstrapper()->IsActive()) return;
832 Object* context = native_contexts_list_;
833 while (!context->IsUndefined()) {
834 // Get the caches for this context. GC can happen when the context
835 // is not fully initialized, so the caches can be undefined.
836 Object* caches_or_undefined =
837 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
838 if (!caches_or_undefined->IsUndefined()) {
839 FixedArray* caches = FixedArray::cast(caches_or_undefined);
841 int length = caches->length();
842 for (int i = 0; i < length; i++) {
843 JSFunctionResultCache::cast(caches->get(i))->Clear();
846 // Get the next context:
847 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
852 void Heap::ClearNormalizedMapCaches() {
853 if (isolate_->bootstrapper()->IsActive() &&
854 !incremental_marking()->IsMarking()) {
858 Object* context = native_contexts_list_;
859 while (!context->IsUndefined()) {
860 // GC can happen when the context is not fully initialized,
861 // so the cache can be undefined.
863 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
864 if (!cache->IsUndefined()) {
865 NormalizedMapCache::cast(cache)->Clear();
867 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
872 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
873 double survival_rate =
874 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
875 start_new_space_size;
877 if (survival_rate > kYoungSurvivalRateHighThreshold) {
878 high_survival_rate_period_length_++;
880 high_survival_rate_period_length_ = 0;
883 if (survival_rate < kYoungSurvivalRateLowThreshold) {
884 low_survival_rate_period_length_++;
886 low_survival_rate_period_length_ = 0;
889 double survival_rate_diff = survival_rate_ - survival_rate;
891 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
892 set_survival_rate_trend(DECREASING);
893 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
894 set_survival_rate_trend(INCREASING);
896 set_survival_rate_trend(STABLE);
899 survival_rate_ = survival_rate;
902 bool Heap::PerformGarbageCollection(GarbageCollector collector,
904 bool next_gc_likely_to_collect_more = false;
906 if (collector != SCAVENGER) {
907 PROFILE(isolate_, CodeMovingGCEvent());
911 if (FLAG_verify_heap) {
917 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
920 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
921 VMState<EXTERNAL> state(isolate_);
922 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
925 EnsureFromSpaceIsCommitted();
927 int start_new_space_size = Heap::new_space()->SizeAsInt();
929 if (IsHighSurvivalRate()) {
930 // We speed up the incremental marker if it is running so that it
931 // does not fall behind the rate of promotion, which would cause a
932 // constantly growing old space.
933 incremental_marking()->NotifyOfHighPromotionRate();
936 if (collector == MARK_COMPACTOR) {
937 // Perform mark-sweep with optional compaction.
941 UpdateSurvivalRateTrend(start_new_space_size);
943 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
945 old_generation_allocation_limit_ =
946 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
948 old_gen_exhausted_ = false;
954 UpdateSurvivalRateTrend(start_new_space_size);
957 if (!new_space_high_promotion_mode_active_ &&
958 new_space_.Capacity() == new_space_.MaximumCapacity() &&
959 IsStableOrIncreasingSurvivalTrend() &&
960 IsHighSurvivalRate()) {
961 // Stable high survival rates even though young generation is at
962 // maximum capacity indicates that most objects will be promoted.
963 // To decrease scavenger pauses and final mark-sweep pauses, we
964 // have to limit maximal capacity of the young generation.
965 SetNewSpaceHighPromotionModeActive(true);
967 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
968 new_space_.InitialCapacity() / MB);
970 // Support for global pre-tenuring uses the high promotion mode as a
971 // heuristic indicator of whether to pretenure or not, we trigger
972 // deoptimization here to take advantage of pre-tenuring as soon as
974 if (FLAG_pretenuring) {
975 isolate_->stack_guard()->FullDeopt();
977 } else if (new_space_high_promotion_mode_active_ &&
978 IsStableOrDecreasingSurvivalTrend() &&
979 IsLowSurvivalRate()) {
980 // Decreasing low survival rates might indicate that the above high
981 // promotion mode is over and we should allow the young generation
983 SetNewSpaceHighPromotionModeActive(false);
985 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
986 new_space_.MaximumCapacity() / MB);
988 // Trigger deoptimization here to turn off pre-tenuring as soon as
990 if (FLAG_pretenuring) {
991 isolate_->stack_guard()->FullDeopt();
995 if (new_space_high_promotion_mode_active_ &&
996 new_space_.Capacity() > new_space_.InitialCapacity()) {
1000 isolate_->counters()->objs_since_last_young()->Set(0);
1002 // Callbacks that fire after this point might trigger nested GCs and
1003 // restart incremental marking, the assertion can't be moved down.
1004 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1006 gc_post_processing_depth_++;
1007 { AllowHeapAllocation allow_allocation;
1008 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1009 next_gc_likely_to_collect_more =
1010 isolate_->global_handles()->PostGarbageCollectionProcessing(
1013 gc_post_processing_depth_--;
1015 // Update relocatables.
1016 Relocatable::PostGarbageCollectionProcessing();
1018 if (collector == MARK_COMPACTOR) {
1019 // Register the amount of external allocated memory.
1020 amount_of_external_allocated_memory_at_last_global_gc_ =
1021 amount_of_external_allocated_memory_;
1025 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1026 VMState<EXTERNAL> state(isolate_);
1027 CallGCEpilogueCallbacks(gc_type);
1031 if (FLAG_verify_heap) {
1032 VerifyStringTable();
1036 return next_gc_likely_to_collect_more;
1040 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1041 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1042 global_gc_prologue_callback_();
1044 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1045 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1046 gc_prologue_callbacks_[i].callback(gc_type, flags);
1052 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1053 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1054 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1055 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1058 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1059 global_gc_epilogue_callback_();
1064 void Heap::MarkCompact(GCTracer* tracer) {
1065 gc_state_ = MARK_COMPACT;
1066 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1068 mark_compact_collector_.Prepare(tracer);
1071 tracer->set_full_gc_count(ms_count_);
1073 MarkCompactPrologue();
1075 mark_compact_collector_.CollectGarbage();
1077 LOG(isolate_, ResourceEvent("markcompact", "end"));
1079 gc_state_ = NOT_IN_GC;
1081 isolate_->counters()->objs_since_last_full()->Set(0);
1083 contexts_disposed_ = 0;
1085 flush_monomorphic_ics_ = false;
1089 void Heap::MarkCompactPrologue() {
1090 // At any old GC clear the keyed lookup cache to enable collection of unused
1092 isolate_->keyed_lookup_cache()->Clear();
1093 isolate_->context_slot_cache()->Clear();
1094 isolate_->descriptor_lookup_cache()->Clear();
1095 RegExpResultsCache::Clear(string_split_cache());
1096 RegExpResultsCache::Clear(regexp_multiple_cache());
1098 isolate_->compilation_cache()->MarkCompactPrologue();
1100 CompletelyClearInstanceofCache();
1102 FlushNumberStringCache();
1103 if (FLAG_cleanup_code_caches_at_gc) {
1104 polymorphic_code_cache()->set_cache(undefined_value());
1107 ClearNormalizedMapCaches();
1111 Object* Heap::FindCodeObject(Address a) {
1112 return isolate()->inner_pointer_to_code_cache()->
1113 GcSafeFindCodeForInnerPointer(a);
1117 // Helper class for copying HeapObjects
1118 class ScavengeVisitor: public ObjectVisitor {
1120 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1122 void VisitPointer(Object** p) { ScavengePointer(p); }
1124 void VisitPointers(Object** start, Object** end) {
1125 // Copy all HeapObject pointers in [start, end)
1126 for (Object** p = start; p < end; p++) ScavengePointer(p);
1130 void ScavengePointer(Object** p) {
1131 Object* object = *p;
1132 if (!heap_->InNewSpace(object)) return;
1133 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1134 reinterpret_cast<HeapObject*>(object));
1142 // Visitor class to verify pointers in code or data space do not point into
1144 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1146 void VisitPointers(Object** start, Object**end) {
1147 for (Object** current = start; current < end; current++) {
1148 if ((*current)->IsHeapObject()) {
1149 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1156 static void VerifyNonPointerSpacePointers() {
1157 // Verify that there are no pointers to new space in spaces where we
1158 // do not expect them.
1159 VerifyNonPointerSpacePointersVisitor v;
1160 HeapObjectIterator code_it(HEAP->code_space());
1161 for (HeapObject* object = code_it.Next();
1162 object != NULL; object = code_it.Next())
1163 object->Iterate(&v);
1165 // The old data space was normally swept conservatively so that the iterator
1166 // doesn't work, so we normally skip the next bit.
1167 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1168 HeapObjectIterator data_it(HEAP->old_data_space());
1169 for (HeapObject* object = data_it.Next();
1170 object != NULL; object = data_it.Next())
1171 object->Iterate(&v);
1174 #endif // VERIFY_HEAP
1177 void Heap::CheckNewSpaceExpansionCriteria() {
1178 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1179 survived_since_last_expansion_ > new_space_.Capacity() &&
1180 !new_space_high_promotion_mode_active_) {
1181 // Grow the size of new space if there is room to grow, enough data
1182 // has survived scavenge since the last expansion and we are not in
1183 // high promotion mode.
1185 survived_since_last_expansion_ = 0;
1190 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1191 return heap->InNewSpace(*p) &&
1192 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1196 void Heap::ScavengeStoreBufferCallback(
1199 StoreBufferEvent event) {
1200 heap->store_buffer_rebuilder_.Callback(page, event);
1204 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1205 if (event == kStoreBufferStartScanningPagesEvent) {
1206 start_of_current_page_ = NULL;
1207 current_page_ = NULL;
1208 } else if (event == kStoreBufferScanningPageEvent) {
1209 if (current_page_ != NULL) {
1210 // If this page already overflowed the store buffer during this iteration.
1211 if (current_page_->scan_on_scavenge()) {
1212 // Then we should wipe out the entries that have been added for it.
1213 store_buffer_->SetTop(start_of_current_page_);
1214 } else if (store_buffer_->Top() - start_of_current_page_ >=
1215 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1216 // Did we find too many pointers in the previous page? The heuristic is
1217 // that no page can take more then 1/5 the remaining slots in the store
1219 current_page_->set_scan_on_scavenge(true);
1220 store_buffer_->SetTop(start_of_current_page_);
1222 // In this case the page we scanned took a reasonable number of slots in
1223 // the store buffer. It has now been rehabilitated and is no longer
1224 // marked scan_on_scavenge.
1225 ASSERT(!current_page_->scan_on_scavenge());
1228 start_of_current_page_ = store_buffer_->Top();
1229 current_page_ = page;
1230 } else if (event == kStoreBufferFullEvent) {
1231 // The current page overflowed the store buffer again. Wipe out its entries
1232 // in the store buffer and mark it scan-on-scavenge again. This may happen
1233 // several times while scanning.
1234 if (current_page_ == NULL) {
1235 // Store Buffer overflowed while scanning promoted objects. These are not
1236 // in any particular page, though they are likely to be clustered by the
1237 // allocation routines.
1238 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1240 // Store Buffer overflowed while scanning a particular old space page for
1241 // pointers to new space.
1242 ASSERT(current_page_ == page);
1243 ASSERT(page != NULL);
1244 current_page_->set_scan_on_scavenge(true);
1245 ASSERT(start_of_current_page_ != store_buffer_->Top());
1246 store_buffer_->SetTop(start_of_current_page_);
1254 void PromotionQueue::Initialize() {
1255 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1256 // entries (where each is a pair of intptr_t). This allows us to simplify
1257 // the test fpr when to switch pages.
1258 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1260 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1262 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1263 emergency_stack_ = NULL;
1268 void PromotionQueue::RelocateQueueHead() {
1269 ASSERT(emergency_stack_ == NULL);
1271 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1272 intptr_t* head_start = rear_;
1273 intptr_t* head_end =
1274 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1277 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1279 emergency_stack_ = new List<Entry>(2 * entries_count);
1281 while (head_start != head_end) {
1282 int size = static_cast<int>(*(head_start++));
1283 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1284 emergency_stack_->Add(Entry(obj, size));
1290 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1292 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1294 virtual Object* RetainAs(Object* object) {
1295 if (!heap_->InFromSpace(object)) {
1299 MapWord map_word = HeapObject::cast(object)->map_word();
1300 if (map_word.IsForwardingAddress()) {
1301 return map_word.ToForwardingAddress();
1311 void Heap::Scavenge() {
1312 RelocationLock relocation_lock(this);
1315 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1318 gc_state_ = SCAVENGE;
1320 // Implements Cheney's copying algorithm
1321 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1323 // Clear descriptor cache.
1324 isolate_->descriptor_lookup_cache()->Clear();
1326 // Used for updating survived_since_last_expansion_ at function end.
1327 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1329 CheckNewSpaceExpansionCriteria();
1331 SelectScavengingVisitorsTable();
1333 incremental_marking()->PrepareForScavenge();
1335 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1336 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1338 // Flip the semispaces. After flipping, to space is empty, from space has
1341 new_space_.ResetAllocationInfo();
1343 // We need to sweep newly copied objects which can be either in the
1344 // to space or promoted to the old generation. For to-space
1345 // objects, we treat the bottom of the to space as a queue. Newly
1346 // copied and unswept objects lie between a 'front' mark and the
1347 // allocation pointer.
1349 // Promoted objects can go into various old-generation spaces, and
1350 // can be allocated internally in the spaces (from the free list).
1351 // We treat the top of the to space as a queue of addresses of
1352 // promoted objects. The addresses of newly promoted and unswept
1353 // objects lie between a 'front' mark and a 'rear' mark that is
1354 // updated as a side effect of promoting an object.
1356 // There is guaranteed to be enough room at the top of the to space
1357 // for the addresses of promoted objects: every object promoted
1358 // frees up its size in bytes from the top of the new space, and
1359 // objects are at least one pointer in size.
1360 Address new_space_front = new_space_.ToSpaceStart();
1361 promotion_queue_.Initialize();
1364 store_buffer()->Clean();
1367 ScavengeVisitor scavenge_visitor(this);
1369 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1371 // Copy objects reachable from the old generation.
1373 StoreBufferRebuildScope scope(this,
1375 &ScavengeStoreBufferCallback);
1376 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1379 // Copy objects reachable from simple cells by scavenging cell values
1381 HeapObjectIterator cell_iterator(cell_space_);
1382 for (HeapObject* heap_object = cell_iterator.Next();
1383 heap_object != NULL;
1384 heap_object = cell_iterator.Next()) {
1385 if (heap_object->IsCell()) {
1386 Cell* cell = Cell::cast(heap_object);
1387 Address value_address = cell->ValueAddress();
1388 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1392 // Copy objects reachable from global property cells by scavenging global
1393 // property cell values directly.
1394 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1395 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1396 heap_object != NULL;
1397 heap_object = js_global_property_cell_iterator.Next()) {
1398 if (heap_object->IsPropertyCell()) {
1399 PropertyCell* cell = PropertyCell::cast(heap_object);
1400 Address value_address = cell->ValueAddress();
1401 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1402 Address type_address = cell->TypeAddress();
1403 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1407 // Copy objects reachable from the code flushing candidates list.
1408 MarkCompactCollector* collector = mark_compact_collector();
1409 if (collector->is_code_flushing_enabled()) {
1410 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1413 // Scavenge object reachable from the native contexts list directly.
1414 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1416 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1418 while (isolate()->global_handles()->IterateObjectGroups(
1419 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1420 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1422 isolate()->global_handles()->RemoveObjectGroups();
1423 isolate()->global_handles()->RemoveImplicitRefGroups();
1425 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1426 &IsUnscavengedHeapObject);
1427 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1429 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1431 UpdateNewSpaceReferencesInExternalStringTable(
1432 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1434 error_object_list_.UpdateReferencesInNewSpace(this);
1436 promotion_queue_.Destroy();
1438 if (!FLAG_watch_ic_patching) {
1439 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1441 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1443 ScavengeWeakObjectRetainer weak_object_retainer(this);
1444 ProcessWeakReferences(&weak_object_retainer);
1446 ASSERT(new_space_front == new_space_.top());
1449 new_space_.set_age_mark(new_space_.top());
1451 new_space_.LowerInlineAllocationLimit(
1452 new_space_.inline_allocation_limit_step());
1454 // Update how much has survived scavenge.
1455 IncrementYoungSurvivorsCounter(static_cast<int>(
1456 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1458 LOG(isolate_, ResourceEvent("scavenge", "end"));
1460 gc_state_ = NOT_IN_GC;
1462 scavenges_since_last_idle_round_++;
1466 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1468 MapWord first_word = HeapObject::cast(*p)->map_word();
1470 if (!first_word.IsForwardingAddress()) {
1471 // Unreachable external string can be finalized.
1472 heap->FinalizeExternalString(String::cast(*p));
1476 // String is still reachable.
1477 return String::cast(first_word.ToForwardingAddress());
1481 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1482 ExternalStringTableUpdaterCallback updater_func) {
1484 if (FLAG_verify_heap) {
1485 external_string_table_.Verify();
1489 if (external_string_table_.new_space_strings_.is_empty()) return;
1491 Object** start = &external_string_table_.new_space_strings_[0];
1492 Object** end = start + external_string_table_.new_space_strings_.length();
1493 Object** last = start;
1495 for (Object** p = start; p < end; ++p) {
1496 ASSERT(InFromSpace(*p));
1497 String* target = updater_func(this, p);
1499 if (target == NULL) continue;
1501 ASSERT(target->IsExternalString());
1503 if (InNewSpace(target)) {
1504 // String is still in new space. Update the table entry.
1508 // String got promoted. Move it to the old string list.
1509 external_string_table_.AddOldString(target);
1513 ASSERT(last <= end);
1514 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1518 void Heap::UpdateReferencesInExternalStringTable(
1519 ExternalStringTableUpdaterCallback updater_func) {
1521 // Update old space string references.
1522 if (external_string_table_.old_space_strings_.length() > 0) {
1523 Object** start = &external_string_table_.old_space_strings_[0];
1524 Object** end = start + external_string_table_.old_space_strings_.length();
1525 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1528 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1533 struct WeakListVisitor;
1537 static Object* VisitWeakList(Heap* heap,
1539 WeakObjectRetainer* retainer,
1540 bool record_slots) {
1541 Object* undefined = heap->undefined_value();
1542 Object* head = undefined;
1544 MarkCompactCollector* collector = heap->mark_compact_collector();
1545 while (list != undefined) {
1546 // Check whether to keep the candidate in the list.
1547 T* candidate = reinterpret_cast<T*>(list);
1548 Object* retained = retainer->RetainAs(list);
1549 if (retained != NULL) {
1550 if (head == undefined) {
1551 // First element in the list.
1554 // Subsequent elements in the list.
1555 ASSERT(tail != NULL);
1556 WeakListVisitor<T>::SetWeakNext(tail, retained);
1558 Object** next_slot =
1559 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1560 collector->RecordSlot(next_slot, next_slot, retained);
1563 // Retained object is new tail.
1564 ASSERT(!retained->IsUndefined());
1565 candidate = reinterpret_cast<T*>(retained);
1569 // tail is a live object, visit it.
1570 WeakListVisitor<T>::VisitLiveObject(
1571 heap, tail, retainer, record_slots);
1573 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1576 // Move to next element in the list.
1577 list = WeakListVisitor<T>::WeakNext(candidate);
1580 // Terminate the list if there is one or more elements.
1582 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1589 struct WeakListVisitor<JSFunction> {
1590 static void SetWeakNext(JSFunction* function, Object* next) {
1591 function->set_next_function_link(next);
1594 static Object* WeakNext(JSFunction* function) {
1595 return function->next_function_link();
1598 static int WeakNextOffset() {
1599 return JSFunction::kNextFunctionLinkOffset;
1602 static void VisitLiveObject(Heap*, JSFunction*,
1603 WeakObjectRetainer*, bool) {
1606 static void VisitPhantomObject(Heap*, JSFunction*) {
1612 struct WeakListVisitor<Context> {
1613 static void SetWeakNext(Context* context, Object* next) {
1614 context->set(Context::NEXT_CONTEXT_LINK,
1616 UPDATE_WRITE_BARRIER);
1619 static Object* WeakNext(Context* context) {
1620 return context->get(Context::NEXT_CONTEXT_LINK);
1623 static void VisitLiveObject(Heap* heap,
1625 WeakObjectRetainer* retainer,
1626 bool record_slots) {
1627 // Process the weak list of optimized functions for the context.
1628 Object* function_list_head =
1629 VisitWeakList<JSFunction>(
1631 context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1634 context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1636 UPDATE_WRITE_BARRIER);
1638 Object** optimized_functions =
1639 HeapObject::RawField(
1640 context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1641 heap->mark_compact_collector()->RecordSlot(
1642 optimized_functions, optimized_functions, function_list_head);
1646 static void VisitPhantomObject(Heap*, Context*) {
1649 static int WeakNextOffset() {
1650 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1655 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1656 // We don't record weak slots during marking or scavenges.
1657 // Instead we do it once when we complete mark-compact cycle.
1658 // Note that write barrier has no effect if we are already in the middle of
1659 // compacting mark-sweep cycle and we have to record slots manually.
1661 gc_state() == MARK_COMPACT &&
1662 mark_compact_collector()->is_compacting();
1663 ProcessArrayBuffers(retainer, record_slots);
1664 ProcessNativeContexts(retainer, record_slots);
1667 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1668 bool record_slots) {
1670 VisitWeakList<Context>(
1671 this, native_contexts_list(), retainer, record_slots);
1672 // Update the head of the list of contexts.
1673 native_contexts_list_ = head;
1678 struct WeakListVisitor<JSArrayBufferView> {
1679 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1680 obj->set_weak_next(next);
1683 static Object* WeakNext(JSArrayBufferView* obj) {
1684 return obj->weak_next();
1687 static void VisitLiveObject(Heap*,
1688 JSArrayBufferView* obj,
1689 WeakObjectRetainer* retainer,
1690 bool record_slots) {}
1692 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1694 static int WeakNextOffset() {
1695 return JSArrayBufferView::kWeakNextOffset;
1701 struct WeakListVisitor<JSArrayBuffer> {
1702 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1703 obj->set_weak_next(next);
1706 static Object* WeakNext(JSArrayBuffer* obj) {
1707 return obj->weak_next();
1710 static void VisitLiveObject(Heap* heap,
1711 JSArrayBuffer* array_buffer,
1712 WeakObjectRetainer* retainer,
1713 bool record_slots) {
1714 Object* typed_array_obj =
1715 VisitWeakList<JSArrayBufferView>(
1717 array_buffer->weak_first_view(),
1718 retainer, record_slots);
1719 array_buffer->set_weak_first_view(typed_array_obj);
1720 if (typed_array_obj != heap->undefined_value() && record_slots) {
1721 Object** slot = HeapObject::RawField(
1722 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1723 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1727 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1728 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1731 static int WeakNextOffset() {
1732 return JSArrayBuffer::kWeakNextOffset;
1737 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1738 bool record_slots) {
1739 Object* array_buffer_obj =
1740 VisitWeakList<JSArrayBuffer>(this,
1741 array_buffers_list(),
1742 retainer, record_slots);
1743 set_array_buffers_list(array_buffer_obj);
1747 void Heap::TearDownArrayBuffers() {
1748 Object* undefined = undefined_value();
1749 for (Object* o = array_buffers_list(); o != undefined;) {
1750 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1751 Runtime::FreeArrayBuffer(isolate(), buffer);
1752 o = buffer->weak_next();
1754 array_buffers_list_ = undefined;
1758 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1759 DisallowHeapAllocation no_allocation;
1761 // Both the external string table and the string table may contain
1762 // external strings, but neither lists them exhaustively, nor is the
1763 // intersection set empty. Therefore we iterate over the external string
1764 // table first, ignoring internalized strings, and then over the
1765 // internalized string table.
1767 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1769 explicit ExternalStringTableVisitorAdapter(
1770 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1771 virtual void VisitPointers(Object** start, Object** end) {
1772 for (Object** p = start; p < end; p++) {
1773 // Visit non-internalized external strings,
1774 // since internalized strings are listed in the string table.
1775 if (!(*p)->IsInternalizedString()) {
1776 ASSERT((*p)->IsExternalString());
1777 visitor_->VisitExternalString(Utils::ToLocal(
1778 Handle<String>(String::cast(*p))));
1783 v8::ExternalResourceVisitor* visitor_;
1784 } external_string_table_visitor(visitor);
1786 external_string_table_.Iterate(&external_string_table_visitor);
1788 class StringTableVisitorAdapter : public ObjectVisitor {
1790 explicit StringTableVisitorAdapter(
1791 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1792 virtual void VisitPointers(Object** start, Object** end) {
1793 for (Object** p = start; p < end; p++) {
1794 if ((*p)->IsExternalString()) {
1795 ASSERT((*p)->IsInternalizedString());
1796 visitor_->VisitExternalString(Utils::ToLocal(
1797 Handle<String>(String::cast(*p))));
1802 v8::ExternalResourceVisitor* visitor_;
1803 } string_table_visitor(visitor);
1805 string_table()->IterateElements(&string_table_visitor);
1809 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1811 static inline void VisitPointer(Heap* heap, Object** p) {
1812 Object* object = *p;
1813 if (!heap->InNewSpace(object)) return;
1814 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1815 reinterpret_cast<HeapObject*>(object));
1820 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1821 Address new_space_front) {
1823 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1824 // The addresses new_space_front and new_space_.top() define a
1825 // queue of unprocessed copied objects. Process them until the
1827 while (new_space_front != new_space_.top()) {
1828 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1829 HeapObject* object = HeapObject::FromAddress(new_space_front);
1831 NewSpaceScavenger::IterateBody(object->map(), object);
1834 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1838 // Promote and process all the to-be-promoted objects.
1840 StoreBufferRebuildScope scope(this,
1842 &ScavengeStoreBufferCallback);
1843 while (!promotion_queue()->is_empty()) {
1846 promotion_queue()->remove(&target, &size);
1848 // Promoted object might be already partially visited
1849 // during old space pointer iteration. Thus we search specificly
1850 // for pointers to from semispace instead of looking for pointers
1852 ASSERT(!target->IsMap());
1853 IterateAndMarkPointersToFromSpace(target->address(),
1854 target->address() + size,
1859 // Take another spin if there are now unswept objects in new space
1860 // (there are currently no more unswept promoted objects).
1861 } while (new_space_front != new_space_.top());
1863 return new_space_front;
1867 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1870 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1874 static HeapObject* EnsureDoubleAligned(Heap* heap,
1877 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1878 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1879 return HeapObject::FromAddress(object->address() + kPointerSize);
1881 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1888 enum LoggingAndProfiling {
1889 LOGGING_AND_PROFILING_ENABLED,
1890 LOGGING_AND_PROFILING_DISABLED
1894 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1897 template<MarksHandling marks_handling,
1898 LoggingAndProfiling logging_and_profiling_mode>
1899 class ScavengingVisitor : public StaticVisitorBase {
1901 static void Initialize() {
1902 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1903 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1904 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1905 table_.Register(kVisitByteArray, &EvacuateByteArray);
1906 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1907 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1909 table_.Register(kVisitNativeContext,
1910 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1911 template VisitSpecialized<Context::kSize>);
1913 table_.Register(kVisitConsString,
1914 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1915 template VisitSpecialized<ConsString::kSize>);
1917 table_.Register(kVisitSlicedString,
1918 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1919 template VisitSpecialized<SlicedString::kSize>);
1921 table_.Register(kVisitSymbol,
1922 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1923 template VisitSpecialized<Symbol::kSize>);
1925 table_.Register(kVisitSharedFunctionInfo,
1926 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1927 template VisitSpecialized<SharedFunctionInfo::kSize>);
1929 table_.Register(kVisitJSWeakMap,
1930 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1933 table_.Register(kVisitJSArrayBuffer,
1934 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1937 table_.Register(kVisitJSTypedArray,
1938 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1941 table_.Register(kVisitJSDataView,
1942 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1945 table_.Register(kVisitJSRegExp,
1946 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1949 if (marks_handling == IGNORE_MARKS) {
1950 table_.Register(kVisitJSFunction,
1951 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1952 template VisitSpecialized<JSFunction::kSize>);
1954 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1957 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1959 kVisitDataObjectGeneric>();
1961 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1963 kVisitJSObjectGeneric>();
1965 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1967 kVisitStructGeneric>();
1970 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1975 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1976 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1978 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1979 bool should_record = false;
1981 should_record = FLAG_heap_stats;
1983 should_record = should_record || FLAG_log_gc;
1984 if (should_record) {
1985 if (heap->new_space()->Contains(obj)) {
1986 heap->new_space()->RecordAllocation(obj);
1988 heap->new_space()->RecordPromotion(obj);
1993 // Helper function used by CopyObject to copy a source object to an
1994 // allocated target object and update the forwarding pointer in the source
1995 // object. Returns the target object.
1996 INLINE(static void MigrateObject(Heap* heap,
2000 // Copy the content of source to target.
2001 heap->CopyBlock(target->address(), source->address(), size);
2003 // Set the forwarding address.
2004 source->set_map_word(MapWord::FromForwardingAddress(target));
2006 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2007 // Update NewSpace stats if necessary.
2008 RecordCopiedObject(heap, target);
2009 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2010 Isolate* isolate = heap->isolate();
2011 if (isolate->logger()->is_logging_code_events() ||
2012 isolate->cpu_profiler()->is_profiling()) {
2013 if (target->IsSharedFunctionInfo()) {
2014 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2015 source->address(), target->address()));
2020 if (marks_handling == TRANSFER_MARKS) {
2021 if (Marking::TransferColor(source, target)) {
2022 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2028 template<ObjectContents object_contents,
2029 SizeRestriction size_restriction,
2031 static inline void EvacuateObject(Map* map,
2035 SLOW_ASSERT((size_restriction != SMALL) ||
2036 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2037 SLOW_ASSERT(object->Size() == object_size);
2039 int allocation_size = object_size;
2040 if (alignment != kObjectAlignment) {
2041 ASSERT(alignment == kDoubleAlignment);
2042 allocation_size += kPointerSize;
2045 Heap* heap = map->GetHeap();
2046 if (heap->ShouldBePromoted(object->address(), object_size)) {
2047 MaybeObject* maybe_result;
2049 if ((size_restriction != SMALL) &&
2050 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2051 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2054 if (object_contents == DATA_OBJECT) {
2055 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2058 heap->old_pointer_space()->AllocateRaw(allocation_size);
2062 Object* result = NULL; // Initialization to please compiler.
2063 if (maybe_result->ToObject(&result)) {
2064 HeapObject* target = HeapObject::cast(result);
2066 if (alignment != kObjectAlignment) {
2067 target = EnsureDoubleAligned(heap, target, allocation_size);
2070 // Order is important: slot might be inside of the target if target
2071 // was allocated over a dead object and slot comes from the store
2074 MigrateObject(heap, object, target, object_size);
2076 if (object_contents == POINTER_OBJECT) {
2077 if (map->instance_type() == JS_FUNCTION_TYPE) {
2078 heap->promotion_queue()->insert(
2079 target, JSFunction::kNonWeakFieldsEndOffset);
2081 heap->promotion_queue()->insert(target, object_size);
2085 heap->tracer()->increment_promoted_objects_size(object_size);
2089 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2090 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2091 Object* result = allocation->ToObjectUnchecked();
2092 HeapObject* target = HeapObject::cast(result);
2094 if (alignment != kObjectAlignment) {
2095 target = EnsureDoubleAligned(heap, target, allocation_size);
2098 // Order is important: slot might be inside of the target if target
2099 // was allocated over a dead object and slot comes from the store
2102 MigrateObject(heap, object, target, object_size);
2107 static inline void EvacuateJSFunction(Map* map,
2109 HeapObject* object) {
2110 ObjectEvacuationStrategy<POINTER_OBJECT>::
2111 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2113 HeapObject* target = *slot;
2114 MarkBit mark_bit = Marking::MarkBitFrom(target);
2115 if (Marking::IsBlack(mark_bit)) {
2116 // This object is black and it might not be rescanned by marker.
2117 // We should explicitly record code entry slot for compaction because
2118 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2119 // miss it as it is not HeapObject-tagged.
2120 Address code_entry_slot =
2121 target->address() + JSFunction::kCodeEntryOffset;
2122 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2123 map->GetHeap()->mark_compact_collector()->
2124 RecordCodeEntrySlot(code_entry_slot, code);
2129 static inline void EvacuateFixedArray(Map* map,
2131 HeapObject* object) {
2132 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2133 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2140 static inline void EvacuateFixedDoubleArray(Map* map,
2142 HeapObject* object) {
2143 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2144 int object_size = FixedDoubleArray::SizeFor(length);
2145 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2153 static inline void EvacuateByteArray(Map* map,
2155 HeapObject* object) {
2156 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2157 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2158 map, slot, object, object_size);
2162 static inline void EvacuateSeqOneByteString(Map* map,
2164 HeapObject* object) {
2165 int object_size = SeqOneByteString::cast(object)->
2166 SeqOneByteStringSize(map->instance_type());
2167 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2168 map, slot, object, object_size);
2172 static inline void EvacuateSeqTwoByteString(Map* map,
2174 HeapObject* object) {
2175 int object_size = SeqTwoByteString::cast(object)->
2176 SeqTwoByteStringSize(map->instance_type());
2177 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2178 map, slot, object, object_size);
2182 static inline bool IsShortcutCandidate(int type) {
2183 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2186 static inline void EvacuateShortcutCandidate(Map* map,
2188 HeapObject* object) {
2189 ASSERT(IsShortcutCandidate(map->instance_type()));
2191 Heap* heap = map->GetHeap();
2193 if (marks_handling == IGNORE_MARKS &&
2194 ConsString::cast(object)->unchecked_second() ==
2195 heap->empty_string()) {
2197 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2201 if (!heap->InNewSpace(first)) {
2202 object->set_map_word(MapWord::FromForwardingAddress(first));
2206 MapWord first_word = first->map_word();
2207 if (first_word.IsForwardingAddress()) {
2208 HeapObject* target = first_word.ToForwardingAddress();
2211 object->set_map_word(MapWord::FromForwardingAddress(target));
2215 heap->DoScavengeObject(first->map(), slot, first);
2216 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2220 int object_size = ConsString::kSize;
2221 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2222 map, slot, object, object_size);
2225 template<ObjectContents object_contents>
2226 class ObjectEvacuationStrategy {
2228 template<int object_size>
2229 static inline void VisitSpecialized(Map* map,
2231 HeapObject* object) {
2232 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2233 map, slot, object, object_size);
2236 static inline void Visit(Map* map,
2238 HeapObject* object) {
2239 int object_size = map->instance_size();
2240 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2241 map, slot, object, object_size);
2245 static VisitorDispatchTable<ScavengingCallback> table_;
2249 template<MarksHandling marks_handling,
2250 LoggingAndProfiling logging_and_profiling_mode>
2251 VisitorDispatchTable<ScavengingCallback>
2252 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2255 static void InitializeScavengingVisitorsTables() {
2256 ScavengingVisitor<TRANSFER_MARKS,
2257 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2258 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2259 ScavengingVisitor<TRANSFER_MARKS,
2260 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2261 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2265 void Heap::SelectScavengingVisitorsTable() {
2266 bool logging_and_profiling =
2267 isolate()->logger()->is_logging() ||
2268 isolate()->cpu_profiler()->is_profiling() ||
2269 (isolate()->heap_profiler() != NULL &&
2270 isolate()->heap_profiler()->is_profiling());
2272 if (!incremental_marking()->IsMarking()) {
2273 if (!logging_and_profiling) {
2274 scavenging_visitors_table_.CopyFrom(
2275 ScavengingVisitor<IGNORE_MARKS,
2276 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2278 scavenging_visitors_table_.CopyFrom(
2279 ScavengingVisitor<IGNORE_MARKS,
2280 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2283 if (!logging_and_profiling) {
2284 scavenging_visitors_table_.CopyFrom(
2285 ScavengingVisitor<TRANSFER_MARKS,
2286 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2288 scavenging_visitors_table_.CopyFrom(
2289 ScavengingVisitor<TRANSFER_MARKS,
2290 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2293 if (incremental_marking()->IsCompacting()) {
2294 // When compacting forbid short-circuiting of cons-strings.
2295 // Scavenging code relies on the fact that new space object
2296 // can't be evacuated into evacuation candidate but
2297 // short-circuiting violates this assumption.
2298 scavenging_visitors_table_.Register(
2299 StaticVisitorBase::kVisitShortcutCandidate,
2300 scavenging_visitors_table_.GetVisitorById(
2301 StaticVisitorBase::kVisitConsString));
2307 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2308 SLOW_ASSERT(HEAP->InFromSpace(object));
2309 MapWord first_word = object->map_word();
2310 SLOW_ASSERT(!first_word.IsForwardingAddress());
2311 Map* map = first_word.ToMap();
2312 map->GetHeap()->DoScavengeObject(map, p, object);
2316 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2317 int instance_size) {
2319 MaybeObject* maybe_result = AllocateRawMap();
2320 if (!maybe_result->ToObject(&result)) return maybe_result;
2322 // Map::cast cannot be used due to uninitialized map field.
2323 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2324 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2325 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2326 reinterpret_cast<Map*>(result)->set_visitor_id(
2327 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2328 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2329 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2330 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2331 reinterpret_cast<Map*>(result)->set_bit_field(0);
2332 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2333 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2334 Map::OwnsDescriptors::encode(true);
2335 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2340 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2342 ElementsKind elements_kind) {
2344 MaybeObject* maybe_result = AllocateRawMap();
2345 if (!maybe_result->To(&result)) return maybe_result;
2347 Map* map = reinterpret_cast<Map*>(result);
2348 map->set_map_no_write_barrier(meta_map());
2349 map->set_instance_type(instance_type);
2350 map->set_visitor_id(
2351 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2352 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2353 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2354 map->set_instance_size(instance_size);
2355 map->set_inobject_properties(0);
2356 map->set_pre_allocated_property_fields(0);
2357 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2358 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2359 SKIP_WRITE_BARRIER);
2360 map->init_back_pointer(undefined_value());
2361 map->set_unused_property_fields(0);
2362 map->set_instance_descriptors(empty_descriptor_array());
2363 map->set_bit_field(0);
2364 map->set_bit_field2(1 << Map::kIsExtensible);
2365 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2366 Map::OwnsDescriptors::encode(true);
2367 map->set_bit_field3(bit_field3);
2368 map->set_elements_kind(elements_kind);
2374 MaybeObject* Heap::AllocateCodeCache() {
2375 CodeCache* code_cache;
2376 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2377 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2379 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2380 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2385 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2386 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2390 MaybeObject* Heap::AllocateAccessorPair() {
2391 AccessorPair* accessors;
2392 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2393 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2395 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2396 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2401 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2402 TypeFeedbackInfo* info;
2403 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2404 if (!maybe_info->To(&info)) return maybe_info;
2406 info->initialize_storage();
2407 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2408 SKIP_WRITE_BARRIER);
2413 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2414 AliasedArgumentsEntry* entry;
2415 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2416 if (!maybe_entry->To(&entry)) return maybe_entry;
2418 entry->set_aliased_context_slot(aliased_context_slot);
2423 const Heap::StringTypeTable Heap::string_type_table[] = {
2424 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2425 {type, size, k##camel_name##MapRootIndex},
2426 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2427 #undef STRING_TYPE_ELEMENT
2431 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2432 #define CONSTANT_STRING_ELEMENT(name, contents) \
2433 {contents, k##name##RootIndex},
2434 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2435 #undef CONSTANT_STRING_ELEMENT
2439 const Heap::StructTable Heap::struct_table[] = {
2440 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2441 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2442 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2443 #undef STRUCT_TABLE_ELEMENT
2447 bool Heap::CreateInitialMaps() {
2449 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2450 if (!maybe_obj->ToObject(&obj)) return false;
2452 // Map::cast cannot be used due to uninitialized map field.
2453 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2454 set_meta_map(new_meta_map);
2455 new_meta_map->set_map(new_meta_map);
2457 { MaybeObject* maybe_obj =
2458 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2459 if (!maybe_obj->ToObject(&obj)) return false;
2461 set_fixed_array_map(Map::cast(obj));
2463 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2464 if (!maybe_obj->ToObject(&obj)) return false;
2466 set_oddball_map(Map::cast(obj));
2468 // Allocate the empty array.
2469 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2470 if (!maybe_obj->ToObject(&obj)) return false;
2472 set_empty_fixed_array(FixedArray::cast(obj));
2474 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2475 if (!maybe_obj->ToObject(&obj)) return false;
2477 set_null_value(Oddball::cast(obj));
2478 Oddball::cast(obj)->set_kind(Oddball::kNull);
2480 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2481 if (!maybe_obj->ToObject(&obj)) return false;
2483 set_undefined_value(Oddball::cast(obj));
2484 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2485 ASSERT(!InNewSpace(undefined_value()));
2487 // Allocate the empty descriptor array.
2488 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2489 if (!maybe_obj->ToObject(&obj)) return false;
2491 set_empty_descriptor_array(DescriptorArray::cast(obj));
2493 // Fix the instance_descriptors for the existing maps.
2494 meta_map()->set_code_cache(empty_fixed_array());
2495 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2496 meta_map()->init_back_pointer(undefined_value());
2497 meta_map()->set_instance_descriptors(empty_descriptor_array());
2499 fixed_array_map()->set_code_cache(empty_fixed_array());
2500 fixed_array_map()->set_dependent_code(
2501 DependentCode::cast(empty_fixed_array()));
2502 fixed_array_map()->init_back_pointer(undefined_value());
2503 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2505 oddball_map()->set_code_cache(empty_fixed_array());
2506 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2507 oddball_map()->init_back_pointer(undefined_value());
2508 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2510 // Fix prototype object for existing maps.
2511 meta_map()->set_prototype(null_value());
2512 meta_map()->set_constructor(null_value());
2514 fixed_array_map()->set_prototype(null_value());
2515 fixed_array_map()->set_constructor(null_value());
2517 oddball_map()->set_prototype(null_value());
2518 oddball_map()->set_constructor(null_value());
2520 { MaybeObject* maybe_obj =
2521 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2522 if (!maybe_obj->ToObject(&obj)) return false;
2524 set_fixed_cow_array_map(Map::cast(obj));
2525 ASSERT(fixed_array_map() != fixed_cow_array_map());
2527 { MaybeObject* maybe_obj =
2528 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2529 if (!maybe_obj->ToObject(&obj)) return false;
2531 set_scope_info_map(Map::cast(obj));
2533 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2534 if (!maybe_obj->ToObject(&obj)) return false;
2536 set_heap_number_map(Map::cast(obj));
2538 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2539 if (!maybe_obj->ToObject(&obj)) return false;
2541 set_symbol_map(Map::cast(obj));
2543 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2544 if (!maybe_obj->ToObject(&obj)) return false;
2546 set_foreign_map(Map::cast(obj));
2548 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2549 const StringTypeTable& entry = string_type_table[i];
2550 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2551 if (!maybe_obj->ToObject(&obj)) return false;
2553 roots_[entry.index] = Map::cast(obj);
2556 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2557 if (!maybe_obj->ToObject(&obj)) return false;
2559 set_undetectable_string_map(Map::cast(obj));
2560 Map::cast(obj)->set_is_undetectable();
2562 { MaybeObject* maybe_obj =
2563 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2564 if (!maybe_obj->ToObject(&obj)) return false;
2566 set_undetectable_ascii_string_map(Map::cast(obj));
2567 Map::cast(obj)->set_is_undetectable();
2569 { MaybeObject* maybe_obj =
2570 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2571 if (!maybe_obj->ToObject(&obj)) return false;
2573 set_fixed_double_array_map(Map::cast(obj));
2575 { MaybeObject* maybe_obj =
2576 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2577 if (!maybe_obj->ToObject(&obj)) return false;
2579 set_byte_array_map(Map::cast(obj));
2581 { MaybeObject* maybe_obj =
2582 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2583 if (!maybe_obj->ToObject(&obj)) return false;
2585 set_free_space_map(Map::cast(obj));
2587 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2588 if (!maybe_obj->ToObject(&obj)) return false;
2590 set_empty_byte_array(ByteArray::cast(obj));
2592 { MaybeObject* maybe_obj =
2593 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2594 if (!maybe_obj->ToObject(&obj)) return false;
2596 set_external_pixel_array_map(Map::cast(obj));
2598 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2599 ExternalArray::kAlignedSize);
2600 if (!maybe_obj->ToObject(&obj)) return false;
2602 set_external_byte_array_map(Map::cast(obj));
2604 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2605 ExternalArray::kAlignedSize);
2606 if (!maybe_obj->ToObject(&obj)) return false;
2608 set_external_unsigned_byte_array_map(Map::cast(obj));
2610 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2611 ExternalArray::kAlignedSize);
2612 if (!maybe_obj->ToObject(&obj)) return false;
2614 set_external_short_array_map(Map::cast(obj));
2616 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2617 ExternalArray::kAlignedSize);
2618 if (!maybe_obj->ToObject(&obj)) return false;
2620 set_external_unsigned_short_array_map(Map::cast(obj));
2622 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2623 ExternalArray::kAlignedSize);
2624 if (!maybe_obj->ToObject(&obj)) return false;
2626 set_external_int_array_map(Map::cast(obj));
2628 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2629 ExternalArray::kAlignedSize);
2630 if (!maybe_obj->ToObject(&obj)) return false;
2632 set_external_unsigned_int_array_map(Map::cast(obj));
2634 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2635 ExternalArray::kAlignedSize);
2636 if (!maybe_obj->ToObject(&obj)) return false;
2638 set_external_float_array_map(Map::cast(obj));
2640 { MaybeObject* maybe_obj =
2641 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2642 if (!maybe_obj->ToObject(&obj)) return false;
2644 set_non_strict_arguments_elements_map(Map::cast(obj));
2646 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2647 ExternalArray::kAlignedSize);
2648 if (!maybe_obj->ToObject(&obj)) return false;
2650 set_external_double_array_map(Map::cast(obj));
2652 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2653 if (!maybe_obj->ToObject(&obj)) return false;
2655 set_empty_external_byte_array(ExternalArray::cast(obj));
2657 { MaybeObject* maybe_obj =
2658 AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2659 if (!maybe_obj->ToObject(&obj)) return false;
2661 set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2663 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2664 if (!maybe_obj->ToObject(&obj)) return false;
2666 set_empty_external_short_array(ExternalArray::cast(obj));
2668 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2669 kExternalUnsignedShortArray);
2670 if (!maybe_obj->ToObject(&obj)) return false;
2672 set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2674 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2675 if (!maybe_obj->ToObject(&obj)) return false;
2677 set_empty_external_int_array(ExternalArray::cast(obj));
2679 { MaybeObject* maybe_obj =
2680 AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2681 if (!maybe_obj->ToObject(&obj)) return false;
2683 set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2685 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2686 if (!maybe_obj->ToObject(&obj)) return false;
2688 set_empty_external_float_array(ExternalArray::cast(obj));
2690 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2691 if (!maybe_obj->ToObject(&obj)) return false;
2693 set_empty_external_double_array(ExternalArray::cast(obj));
2695 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2696 if (!maybe_obj->ToObject(&obj)) return false;
2698 set_empty_external_pixel_array(ExternalArray::cast(obj));
2700 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2701 if (!maybe_obj->ToObject(&obj)) return false;
2703 set_code_map(Map::cast(obj));
2705 { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2706 if (!maybe_obj->ToObject(&obj)) return false;
2708 set_cell_map(Map::cast(obj));
2710 { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2711 PropertyCell::kSize);
2712 if (!maybe_obj->ToObject(&obj)) return false;
2714 set_global_property_cell_map(Map::cast(obj));
2716 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2717 if (!maybe_obj->ToObject(&obj)) return false;
2719 set_one_pointer_filler_map(Map::cast(obj));
2721 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2722 if (!maybe_obj->ToObject(&obj)) return false;
2724 set_two_pointer_filler_map(Map::cast(obj));
2726 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2727 const StructTable& entry = struct_table[i];
2728 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2729 if (!maybe_obj->ToObject(&obj)) return false;
2731 roots_[entry.index] = Map::cast(obj);
2734 { MaybeObject* maybe_obj =
2735 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2736 if (!maybe_obj->ToObject(&obj)) return false;
2738 set_hash_table_map(Map::cast(obj));
2740 { MaybeObject* maybe_obj =
2741 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2742 if (!maybe_obj->ToObject(&obj)) return false;
2744 set_function_context_map(Map::cast(obj));
2746 { MaybeObject* maybe_obj =
2747 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2748 if (!maybe_obj->ToObject(&obj)) return false;
2750 set_catch_context_map(Map::cast(obj));
2752 { MaybeObject* maybe_obj =
2753 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2754 if (!maybe_obj->ToObject(&obj)) return false;
2756 set_with_context_map(Map::cast(obj));
2758 { MaybeObject* maybe_obj =
2759 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2760 if (!maybe_obj->ToObject(&obj)) return false;
2762 set_block_context_map(Map::cast(obj));
2764 { MaybeObject* maybe_obj =
2765 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2766 if (!maybe_obj->ToObject(&obj)) return false;
2768 set_module_context_map(Map::cast(obj));
2770 { MaybeObject* maybe_obj =
2771 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2772 if (!maybe_obj->ToObject(&obj)) return false;
2774 set_global_context_map(Map::cast(obj));
2776 { MaybeObject* maybe_obj =
2777 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2778 if (!maybe_obj->ToObject(&obj)) return false;
2780 Map* native_context_map = Map::cast(obj);
2781 native_context_map->set_dictionary_map(true);
2782 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2783 set_native_context_map(native_context_map);
2785 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2786 SharedFunctionInfo::kAlignedSize);
2787 if (!maybe_obj->ToObject(&obj)) return false;
2789 set_shared_function_info_map(Map::cast(obj));
2791 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2792 JSMessageObject::kSize);
2793 if (!maybe_obj->ToObject(&obj)) return false;
2795 set_message_object_map(Map::cast(obj));
2798 { MaybeObject* maybe_obj =
2799 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2800 if (!maybe_obj->To(&external_map)) return false;
2802 external_map->set_is_extensible(false);
2803 set_external_map(external_map);
2805 ASSERT(!InNewSpace(empty_fixed_array()));
2810 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2811 // Statically ensure that it is safe to allocate heap numbers in paged
2813 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2814 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2817 { MaybeObject* maybe_result =
2818 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2819 if (!maybe_result->ToObject(&result)) return maybe_result;
2822 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2823 HeapNumber::cast(result)->set_value(value);
2828 MaybeObject* Heap::AllocateHeapNumber(double value) {
2829 // Use general version, if we're forced to always allocate.
2830 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2832 // This version of AllocateHeapNumber is optimized for
2833 // allocation in new space.
2834 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2836 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2837 if (!maybe_result->ToObject(&result)) return maybe_result;
2839 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2840 HeapNumber::cast(result)->set_value(value);
2845 MaybeObject* Heap::AllocateCell(Object* value) {
2847 { MaybeObject* maybe_result = AllocateRawCell();
2848 if (!maybe_result->ToObject(&result)) return maybe_result;
2850 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2851 Cell::cast(result)->set_value(value);
2856 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2858 { MaybeObject* maybe_result = AllocateRawPropertyCell();
2859 if (!maybe_result->ToObject(&result)) return maybe_result;
2861 HeapObject::cast(result)->set_map_no_write_barrier(
2862 global_property_cell_map());
2863 PropertyCell* cell = PropertyCell::cast(result);
2864 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2865 SKIP_WRITE_BARRIER);
2866 cell->set_value(value);
2867 cell->set_type(Type::None());
2872 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2874 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2875 if (!maybe_result->To(&result)) return maybe_result;
2876 result->set_value(value);
2881 MaybeObject* Heap::CreateOddball(const char* to_string,
2885 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2886 if (!maybe_result->ToObject(&result)) return maybe_result;
2888 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2892 bool Heap::CreateApiObjects() {
2895 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2896 if (!maybe_obj->ToObject(&obj)) return false;
2898 // Don't use Smi-only elements optimizations for objects with the neander
2899 // map. There are too many cases where element values are set directly with a
2900 // bottleneck to trap the Smi-only -> fast elements transition, and there
2901 // appears to be no benefit for optimize this case.
2902 Map* new_neander_map = Map::cast(obj);
2903 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2904 set_neander_map(new_neander_map);
2906 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2907 if (!maybe_obj->ToObject(&obj)) return false;
2910 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2911 if (!maybe_elements->ToObject(&elements)) return false;
2913 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2914 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2915 set_message_listeners(JSObject::cast(obj));
2921 void Heap::CreateJSEntryStub() {
2923 set_js_entry_code(*stub.GetCode(isolate()));
2927 void Heap::CreateJSConstructEntryStub() {
2928 JSConstructEntryStub stub;
2929 set_js_construct_entry_code(*stub.GetCode(isolate()));
2933 void Heap::CreateFixedStubs() {
2934 // Here we create roots for fixed stubs. They are needed at GC
2935 // for cooking and uncooking (check out frames.cc).
2936 // The eliminates the need for doing dictionary lookup in the
2937 // stub cache for these stubs.
2938 HandleScope scope(isolate());
2939 // gcc-4.4 has problem generating correct code of following snippet:
2940 // { JSEntryStub stub;
2941 // js_entry_code_ = *stub.GetCode();
2943 // { JSConstructEntryStub stub;
2944 // js_construct_entry_code_ = *stub.GetCode();
2946 // To workaround the problem, make separate functions without inlining.
2947 Heap::CreateJSEntryStub();
2948 Heap::CreateJSConstructEntryStub();
2950 // Create stubs that should be there, so we don't unexpectedly have to
2951 // create them if we need them during the creation of another stub.
2952 // Stub creation mixes raw pointers and handles in an unsafe manner so
2953 // we cannot create stubs while we are creating stubs.
2954 CodeStub::GenerateStubsAheadOfTime(isolate());
2958 bool Heap::CreateInitialObjects() {
2961 // The -0 value must be set before NumberFromDouble works.
2962 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2963 if (!maybe_obj->ToObject(&obj)) return false;
2965 set_minus_zero_value(HeapNumber::cast(obj));
2966 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2968 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2969 if (!maybe_obj->ToObject(&obj)) return false;
2971 set_nan_value(HeapNumber::cast(obj));
2973 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2974 if (!maybe_obj->ToObject(&obj)) return false;
2976 set_infinity_value(HeapNumber::cast(obj));
2978 // The hole has not been created yet, but we want to put something
2979 // predictable in the gaps in the string table, so lets make that Smi zero.
2980 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2982 // Allocate initial string table.
2983 { MaybeObject* maybe_obj =
2984 StringTable::Allocate(this, kInitialStringTableSize);
2985 if (!maybe_obj->ToObject(&obj)) return false;
2987 // Don't use set_string_table() due to asserts.
2988 roots_[kStringTableRootIndex] = obj;
2990 // Finish initializing oddballs after creating the string table.
2991 { MaybeObject* maybe_obj =
2992 undefined_value()->Initialize("undefined",
2994 Oddball::kUndefined);
2995 if (!maybe_obj->ToObject(&obj)) return false;
2998 // Initialize the null_value.
2999 { MaybeObject* maybe_obj =
3000 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3001 if (!maybe_obj->ToObject(&obj)) return false;
3004 { MaybeObject* maybe_obj = CreateOddball("true",
3007 if (!maybe_obj->ToObject(&obj)) return false;
3009 set_true_value(Oddball::cast(obj));
3011 { MaybeObject* maybe_obj = CreateOddball("false",
3014 if (!maybe_obj->ToObject(&obj)) return false;
3016 set_false_value(Oddball::cast(obj));
3018 { MaybeObject* maybe_obj = CreateOddball("hole",
3021 if (!maybe_obj->ToObject(&obj)) return false;
3023 set_the_hole_value(Oddball::cast(obj));
3025 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3027 Oddball::kUninitialized);
3028 if (!maybe_obj->ToObject(&obj)) return false;
3030 set_uninitialized_value(Oddball::cast(obj));
3032 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3034 Oddball::kArgumentMarker);
3035 if (!maybe_obj->ToObject(&obj)) return false;
3037 set_arguments_marker(Oddball::cast(obj));
3039 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3042 if (!maybe_obj->ToObject(&obj)) return false;
3044 set_no_interceptor_result_sentinel(obj);
3046 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3049 if (!maybe_obj->ToObject(&obj)) return false;
3051 set_termination_exception(obj);
3053 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3054 { MaybeObject* maybe_obj =
3055 InternalizeUtf8String(constant_string_table[i].contents);
3056 if (!maybe_obj->ToObject(&obj)) return false;
3058 roots_[constant_string_table[i].index] = String::cast(obj);
3061 // Allocate the hidden string which is used to identify the hidden properties
3062 // in JSObjects. The hash code has a special value so that it will not match
3063 // the empty string when searching for the property. It cannot be part of the
3064 // loop above because it needs to be allocated manually with the special
3065 // hash code in place. The hash code for the hidden_string is zero to ensure
3066 // that it will always be at the first entry in property descriptors.
3067 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3068 OneByteVector("", 0), String::kEmptyStringHash);
3069 if (!maybe_obj->ToObject(&obj)) return false;
3071 hidden_string_ = String::cast(obj);
3073 // Allocate the code_stubs dictionary. The initial size is set to avoid
3074 // expanding the dictionary during bootstrapping.
3075 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3076 if (!maybe_obj->ToObject(&obj)) return false;
3078 set_code_stubs(UnseededNumberDictionary::cast(obj));
3081 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3082 // is set to avoid expanding the dictionary during bootstrapping.
3083 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3084 if (!maybe_obj->ToObject(&obj)) return false;
3086 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3088 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3089 if (!maybe_obj->ToObject(&obj)) return false;
3091 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3093 set_instanceof_cache_function(Smi::FromInt(0));
3094 set_instanceof_cache_map(Smi::FromInt(0));
3095 set_instanceof_cache_answer(Smi::FromInt(0));
3099 // Allocate the dictionary of intrinsic function names.
3100 { MaybeObject* maybe_obj =
3101 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3102 if (!maybe_obj->ToObject(&obj)) return false;
3104 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3106 if (!maybe_obj->ToObject(&obj)) return false;
3108 set_intrinsic_function_names(NameDictionary::cast(obj));
3110 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3111 if (!maybe_obj->ToObject(&obj)) return false;
3113 set_number_string_cache(FixedArray::cast(obj));
3115 // Allocate cache for single character one byte strings.
3116 { MaybeObject* maybe_obj =
3117 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3118 if (!maybe_obj->ToObject(&obj)) return false;
3120 set_single_character_string_cache(FixedArray::cast(obj));
3122 // Allocate cache for string split.
3123 { MaybeObject* maybe_obj = AllocateFixedArray(
3124 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3125 if (!maybe_obj->ToObject(&obj)) return false;
3127 set_string_split_cache(FixedArray::cast(obj));
3129 { MaybeObject* maybe_obj = AllocateFixedArray(
3130 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3131 if (!maybe_obj->ToObject(&obj)) return false;
3133 set_regexp_multiple_cache(FixedArray::cast(obj));
3135 // Allocate cache for external strings pointing to native source code.
3136 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3137 if (!maybe_obj->ToObject(&obj)) return false;
3139 set_natives_source_cache(FixedArray::cast(obj));
3141 // Allocate object to hold object observation state.
3142 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3143 if (!maybe_obj->ToObject(&obj)) return false;
3145 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3146 if (!maybe_obj->ToObject(&obj)) return false;
3148 set_observation_state(JSObject::cast(obj));
3150 { MaybeObject* maybe_obj = AllocateSymbol();
3151 if (!maybe_obj->ToObject(&obj)) return false;
3153 set_frozen_symbol(Symbol::cast(obj));
3155 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3156 if (!maybe_obj->ToObject(&obj)) return false;
3158 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3159 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3161 // Handling of script id generation is in Factory::NewScript.
3162 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3164 // Initialize keyed lookup cache.
3165 isolate_->keyed_lookup_cache()->Clear();
3167 // Initialize context slot cache.
3168 isolate_->context_slot_cache()->Clear();
3170 // Initialize descriptor cache.
3171 isolate_->descriptor_lookup_cache()->Clear();
3173 // Initialize compilation cache.
3174 isolate_->compilation_cache()->Clear();
3180 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3181 RootListIndex writable_roots[] = {
3182 kStoreBufferTopRootIndex,
3183 kStackLimitRootIndex,
3184 kNumberStringCacheRootIndex,
3185 kInstanceofCacheFunctionRootIndex,
3186 kInstanceofCacheMapRootIndex,
3187 kInstanceofCacheAnswerRootIndex,
3188 kCodeStubsRootIndex,
3189 kNonMonomorphicCacheRootIndex,
3190 kPolymorphicCodeCacheRootIndex,
3191 kLastScriptIdRootIndex,
3192 kEmptyScriptRootIndex,
3193 kRealStackLimitRootIndex,
3194 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3195 kConstructStubDeoptPCOffsetRootIndex,
3196 kGetterStubDeoptPCOffsetRootIndex,
3197 kSetterStubDeoptPCOffsetRootIndex,
3198 kStringTableRootIndex,
3201 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3202 if (root_index == writable_roots[i])
3209 Object* RegExpResultsCache::Lookup(Heap* heap,
3211 Object* key_pattern,
3212 ResultsCacheType type) {
3214 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3215 if (type == STRING_SPLIT_SUBSTRINGS) {
3216 ASSERT(key_pattern->IsString());
3217 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3218 cache = heap->string_split_cache();
3220 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3221 ASSERT(key_pattern->IsFixedArray());
3222 cache = heap->regexp_multiple_cache();
3225 uint32_t hash = key_string->Hash();
3226 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3227 ~(kArrayEntriesPerCacheEntry - 1));
3228 if (cache->get(index + kStringOffset) == key_string &&
3229 cache->get(index + kPatternOffset) == key_pattern) {
3230 return cache->get(index + kArrayOffset);
3233 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3234 if (cache->get(index + kStringOffset) == key_string &&
3235 cache->get(index + kPatternOffset) == key_pattern) {
3236 return cache->get(index + kArrayOffset);
3238 return Smi::FromInt(0);
3242 void RegExpResultsCache::Enter(Heap* heap,
3244 Object* key_pattern,
3245 FixedArray* value_array,
3246 ResultsCacheType type) {
3248 if (!key_string->IsInternalizedString()) return;
3249 if (type == STRING_SPLIT_SUBSTRINGS) {
3250 ASSERT(key_pattern->IsString());
3251 if (!key_pattern->IsInternalizedString()) return;
3252 cache = heap->string_split_cache();
3254 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3255 ASSERT(key_pattern->IsFixedArray());
3256 cache = heap->regexp_multiple_cache();
3259 uint32_t hash = key_string->Hash();
3260 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3261 ~(kArrayEntriesPerCacheEntry - 1));
3262 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3263 cache->set(index + kStringOffset, key_string);
3264 cache->set(index + kPatternOffset, key_pattern);
3265 cache->set(index + kArrayOffset, value_array);
3268 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3269 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3270 cache->set(index2 + kStringOffset, key_string);
3271 cache->set(index2 + kPatternOffset, key_pattern);
3272 cache->set(index2 + kArrayOffset, value_array);
3274 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3275 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3276 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3277 cache->set(index + kStringOffset, key_string);
3278 cache->set(index + kPatternOffset, key_pattern);
3279 cache->set(index + kArrayOffset, value_array);
3282 // If the array is a reasonably short list of substrings, convert it into a
3283 // list of internalized strings.
3284 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3285 for (int i = 0; i < value_array->length(); i++) {
3286 String* str = String::cast(value_array->get(i));
3287 Object* internalized_str;
3288 MaybeObject* maybe_string = heap->InternalizeString(str);
3289 if (maybe_string->ToObject(&internalized_str)) {
3290 value_array->set(i, internalized_str);
3294 // Convert backing store to a copy-on-write array.
3295 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3299 void RegExpResultsCache::Clear(FixedArray* cache) {
3300 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3301 cache->set(i, Smi::FromInt(0));
3306 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3307 MaybeObject* maybe_obj =
3308 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3313 int Heap::FullSizeNumberStringCacheLength() {
3314 // Compute the size of the number string cache based on the max newspace size.
3315 // The number string cache has a minimum size based on twice the initial cache
3316 // size to ensure that it is bigger after being made 'full size'.
3317 int number_string_cache_size = max_semispace_size_ / 512;
3318 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3319 Min(0x4000, number_string_cache_size));
3320 // There is a string and a number per entry so the length is twice the number
3322 return number_string_cache_size * 2;
3326 void Heap::AllocateFullSizeNumberStringCache() {
3327 // The idea is to have a small number string cache in the snapshot to keep
3328 // boot-time memory usage down. If we expand the number string cache already
3329 // while creating the snapshot then that didn't work out.
3330 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3331 MaybeObject* maybe_obj =
3332 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3334 if (maybe_obj->ToObject(&new_cache)) {
3335 // We don't bother to repopulate the cache with entries from the old cache.
3336 // It will be repopulated soon enough with new strings.
3337 set_number_string_cache(FixedArray::cast(new_cache));
3339 // If allocation fails then we just return without doing anything. It is only
3340 // a cache, so best effort is OK here.
3344 void Heap::FlushNumberStringCache() {
3345 // Flush the number to string cache.
3346 int len = number_string_cache()->length();
3347 for (int i = 0; i < len; i++) {
3348 number_string_cache()->set_undefined(this, i);
3353 static inline int double_get_hash(double d) {
3354 DoubleRepresentation rep(d);
3355 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3359 static inline int smi_get_hash(Smi* smi) {
3360 return smi->value();
3364 Object* Heap::GetNumberStringCache(Object* number) {
3366 int mask = (number_string_cache()->length() >> 1) - 1;
3367 if (number->IsSmi()) {
3368 hash = smi_get_hash(Smi::cast(number)) & mask;
3370 hash = double_get_hash(number->Number()) & mask;
3372 Object* key = number_string_cache()->get(hash * 2);
3373 if (key == number) {
3374 return String::cast(number_string_cache()->get(hash * 2 + 1));
3375 } else if (key->IsHeapNumber() &&
3376 number->IsHeapNumber() &&
3377 key->Number() == number->Number()) {
3378 return String::cast(number_string_cache()->get(hash * 2 + 1));
3380 return undefined_value();
3384 void Heap::SetNumberStringCache(Object* number, String* string) {
3386 int mask = (number_string_cache()->length() >> 1) - 1;
3387 if (number->IsSmi()) {
3388 hash = smi_get_hash(Smi::cast(number)) & mask;
3390 hash = double_get_hash(number->Number()) & mask;
3392 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3393 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3394 // The first time we have a hash collision, we move to the full sized
3395 // number string cache.
3396 AllocateFullSizeNumberStringCache();
3399 number_string_cache()->set(hash * 2, number);
3400 number_string_cache()->set(hash * 2 + 1, string);
3404 MaybeObject* Heap::NumberToString(Object* number,
3405 bool check_number_string_cache,
3406 PretenureFlag pretenure) {
3407 isolate_->counters()->number_to_string_runtime()->Increment();
3408 if (check_number_string_cache) {
3409 Object* cached = GetNumberStringCache(number);
3410 if (cached != undefined_value()) {
3416 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3418 if (number->IsSmi()) {
3419 int num = Smi::cast(number)->value();
3420 str = IntToCString(num, buffer);
3422 double num = HeapNumber::cast(number)->value();
3423 str = DoubleToCString(num, buffer);
3427 MaybeObject* maybe_js_string =
3428 AllocateStringFromOneByte(CStrVector(str), pretenure);
3429 if (maybe_js_string->ToObject(&js_string)) {
3430 SetNumberStringCache(number, String::cast(js_string));
3432 return maybe_js_string;
3436 MaybeObject* Heap::Uint32ToString(uint32_t value,
3437 bool check_number_string_cache) {
3439 MaybeObject* maybe = NumberFromUint32(value);
3440 if (!maybe->To<Object>(&number)) return maybe;
3441 return NumberToString(number, check_number_string_cache);
3445 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3446 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3450 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3451 ExternalArrayType array_type) {
3452 switch (array_type) {
3453 case kExternalByteArray:
3454 return kExternalByteArrayMapRootIndex;
3455 case kExternalUnsignedByteArray:
3456 return kExternalUnsignedByteArrayMapRootIndex;
3457 case kExternalShortArray:
3458 return kExternalShortArrayMapRootIndex;
3459 case kExternalUnsignedShortArray:
3460 return kExternalUnsignedShortArrayMapRootIndex;
3461 case kExternalIntArray:
3462 return kExternalIntArrayMapRootIndex;
3463 case kExternalUnsignedIntArray:
3464 return kExternalUnsignedIntArrayMapRootIndex;
3465 case kExternalFloatArray:
3466 return kExternalFloatArrayMapRootIndex;
3467 case kExternalDoubleArray:
3468 return kExternalDoubleArrayMapRootIndex;
3469 case kExternalPixelArray:
3470 return kExternalPixelArrayMapRootIndex;
3473 return kUndefinedValueRootIndex;
3477 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3478 ElementsKind elementsKind) {
3479 switch (elementsKind) {
3480 case EXTERNAL_BYTE_ELEMENTS:
3481 return kEmptyExternalByteArrayRootIndex;
3482 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3483 return kEmptyExternalUnsignedByteArrayRootIndex;
3484 case EXTERNAL_SHORT_ELEMENTS:
3485 return kEmptyExternalShortArrayRootIndex;
3486 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3487 return kEmptyExternalUnsignedShortArrayRootIndex;
3488 case EXTERNAL_INT_ELEMENTS:
3489 return kEmptyExternalIntArrayRootIndex;
3490 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3491 return kEmptyExternalUnsignedIntArrayRootIndex;
3492 case EXTERNAL_FLOAT_ELEMENTS:
3493 return kEmptyExternalFloatArrayRootIndex;
3494 case EXTERNAL_DOUBLE_ELEMENTS:
3495 return kEmptyExternalDoubleArrayRootIndex;
3496 case EXTERNAL_PIXEL_ELEMENTS:
3497 return kEmptyExternalPixelArrayRootIndex;
3500 return kUndefinedValueRootIndex;
3504 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3505 return ExternalArray::cast(
3506 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3512 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3513 // We need to distinguish the minus zero value and this cannot be
3514 // done after conversion to int. Doing this by comparing bit
3515 // patterns is faster than using fpclassify() et al.
3516 static const DoubleRepresentation minus_zero(-0.0);
3518 DoubleRepresentation rep(value);
3519 if (rep.bits == minus_zero.bits) {
3520 return AllocateHeapNumber(-0.0, pretenure);
3523 int int_value = FastD2I(value);
3524 if (value == int_value && Smi::IsValid(int_value)) {
3525 return Smi::FromInt(int_value);
3528 // Materialize the value in the heap.
3529 return AllocateHeapNumber(value, pretenure);
3533 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3534 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3535 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3536 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3538 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3539 if (!maybe_result->To(&result)) return maybe_result;
3540 result->set_foreign_address(address);
3545 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3546 SharedFunctionInfo* share;
3547 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3548 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3550 // Set pointer fields.
3551 share->set_name(name);
3552 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3553 share->set_code(illegal);
3554 share->set_optimized_code_map(Smi::FromInt(0));
3555 share->set_scope_info(ScopeInfo::Empty(isolate_));
3556 Code* construct_stub =
3557 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3558 share->set_construct_stub(construct_stub);
3559 share->set_instance_class_name(Object_string());
3560 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3561 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3562 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3563 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3564 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3565 share->set_ast_node_count(0);
3566 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3567 share->set_counters(0);
3569 // Set integer fields (smi or int, depending on the architecture).
3570 share->set_length(0);
3571 share->set_formal_parameter_count(0);
3572 share->set_expected_nof_properties(0);
3573 share->set_num_literals(0);
3574 share->set_start_position_and_type(0);
3575 share->set_end_position(0);
3576 share->set_function_token_position(0);
3577 // All compiler hints default to false or 0.
3578 share->set_compiler_hints(0);
3579 share->set_opt_count(0);
3585 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3590 Object* stack_trace,
3591 Object* stack_frames) {
3593 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3594 if (!maybe_result->ToObject(&result)) return maybe_result;
3596 JSMessageObject* message = JSMessageObject::cast(result);
3597 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3598 message->initialize_elements();
3599 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3600 message->set_type(type);
3601 message->set_arguments(arguments);
3602 message->set_start_position(start_position);
3603 message->set_end_position(end_position);
3604 message->set_script(script);
3605 message->set_stack_trace(stack_trace);
3606 message->set_stack_frames(stack_frames);
3612 // Returns true for a character in a range. Both limits are inclusive.
3613 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3614 // This makes uses of the the unsigned wraparound.
3615 return character - from <= to - from;
3619 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3624 // Numeric strings have a different hash algorithm not known by
3625 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3626 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3627 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3629 // Now we know the length is 2, we might as well make use of that fact
3630 // when building the new string.
3631 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3633 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3635 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3636 if (!maybe_result->ToObject(&result)) return maybe_result;
3638 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3639 dest[0] = static_cast<uint8_t>(c1);
3640 dest[1] = static_cast<uint8_t>(c2);
3644 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3645 if (!maybe_result->ToObject(&result)) return maybe_result;
3647 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3655 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3656 int first_length = first->length();
3657 if (first_length == 0) {
3661 int second_length = second->length();
3662 if (second_length == 0) {
3666 int length = first_length + second_length;
3668 // Optimization for 2-byte strings often used as keys in a decompression
3669 // dictionary. Check whether we already have the string in the string
3670 // table to prevent creation of many unneccesary strings.
3672 uint16_t c1 = first->Get(0);
3673 uint16_t c2 = second->Get(0);
3674 return MakeOrFindTwoCharacterString(this, c1, c2);
3677 bool first_is_one_byte = first->IsOneByteRepresentation();
3678 bool second_is_one_byte = second->IsOneByteRepresentation();
3679 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3680 // Make sure that an out of memory exception is thrown if the length
3681 // of the new cons string is too large.
3682 if (length > String::kMaxLength || length < 0) {
3683 isolate()->context()->mark_out_of_memory();
3684 return Failure::OutOfMemoryException(0x4);
3687 bool is_one_byte_data_in_two_byte_string = false;
3689 // At least one of the strings uses two-byte representation so we
3690 // can't use the fast case code for short ASCII strings below, but
3691 // we can try to save memory if all chars actually fit in ASCII.
3692 is_one_byte_data_in_two_byte_string =
3693 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3694 if (is_one_byte_data_in_two_byte_string) {
3695 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3699 // If the resulting string is small make a flat string.
3700 if (length < ConsString::kMinLength) {
3701 // Note that neither of the two inputs can be a slice because:
3702 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3703 ASSERT(first->IsFlat());
3704 ASSERT(second->IsFlat());
3707 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3708 if (!maybe_result->ToObject(&result)) return maybe_result;
3710 // Copy the characters into the new object.
3711 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3714 if (first->IsExternalString()) {
3715 src = ExternalAsciiString::cast(first)->GetChars();
3717 src = SeqOneByteString::cast(first)->GetChars();
3719 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3720 // Copy second part.
3721 if (second->IsExternalString()) {
3722 src = ExternalAsciiString::cast(second)->GetChars();
3724 src = SeqOneByteString::cast(second)->GetChars();
3726 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3729 if (is_one_byte_data_in_two_byte_string) {
3731 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3732 if (!maybe_result->ToObject(&result)) return maybe_result;
3734 // Copy the characters into the new object.
3735 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3736 String::WriteToFlat(first, dest, 0, first_length);
3737 String::WriteToFlat(second, dest + first_length, 0, second_length);
3738 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3743 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3744 if (!maybe_result->ToObject(&result)) return maybe_result;
3746 // Copy the characters into the new object.
3747 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3748 String::WriteToFlat(first, dest, 0, first_length);
3749 String::WriteToFlat(second, dest + first_length, 0, second_length);
3754 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3755 cons_ascii_string_map() : cons_string_map();
3758 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3759 if (!maybe_result->ToObject(&result)) return maybe_result;
3762 DisallowHeapAllocation no_gc;
3763 ConsString* cons_string = ConsString::cast(result);
3764 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3765 cons_string->set_length(length);
3766 cons_string->set_hash_field(String::kEmptyHashField);
3767 cons_string->set_first(first, mode);
3768 cons_string->set_second(second, mode);
3773 MaybeObject* Heap::AllocateSubString(String* buffer,
3776 PretenureFlag pretenure) {
3777 int length = end - start;
3779 return empty_string();
3780 } else if (length == 1) {
3781 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3782 } else if (length == 2) {
3783 // Optimization for 2-byte strings often used as keys in a decompression
3784 // dictionary. Check whether we already have the string in the string
3785 // table to prevent creation of many unnecessary strings.
3786 uint16_t c1 = buffer->Get(start);
3787 uint16_t c2 = buffer->Get(start + 1);
3788 return MakeOrFindTwoCharacterString(this, c1, c2);
3791 // Make an attempt to flatten the buffer to reduce access time.
3792 buffer = buffer->TryFlattenGetString();
3794 if (!FLAG_string_slices ||
3795 !buffer->IsFlat() ||
3796 length < SlicedString::kMinLength ||
3797 pretenure == TENURED) {
3799 // WriteToFlat takes care of the case when an indirect string has a
3800 // different encoding from its underlying string. These encodings may
3801 // differ because of externalization.
3802 bool is_one_byte = buffer->IsOneByteRepresentation();
3803 { MaybeObject* maybe_result = is_one_byte
3804 ? AllocateRawOneByteString(length, pretenure)
3805 : AllocateRawTwoByteString(length, pretenure);
3806 if (!maybe_result->ToObject(&result)) return maybe_result;
3808 String* string_result = String::cast(result);
3809 // Copy the characters into the new object.
3811 ASSERT(string_result->IsOneByteRepresentation());
3812 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3813 String::WriteToFlat(buffer, dest, start, end);
3815 ASSERT(string_result->IsTwoByteRepresentation());
3816 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3817 String::WriteToFlat(buffer, dest, start, end);
3822 ASSERT(buffer->IsFlat());
3824 if (FLAG_verify_heap) {
3825 buffer->StringVerify();
3830 // When slicing an indirect string we use its encoding for a newly created
3831 // slice and don't check the encoding of the underlying string. This is safe
3832 // even if the encodings are different because of externalization. If an
3833 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3834 // codes of the underlying string must still fit into ASCII (because
3835 // externalization must not change char codes).
3836 { Map* map = buffer->IsOneByteRepresentation()
3837 ? sliced_ascii_string_map()
3838 : sliced_string_map();
3839 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3840 if (!maybe_result->ToObject(&result)) return maybe_result;
3843 DisallowHeapAllocation no_gc;
3844 SlicedString* sliced_string = SlicedString::cast(result);
3845 sliced_string->set_length(length);
3846 sliced_string->set_hash_field(String::kEmptyHashField);
3847 if (buffer->IsConsString()) {
3848 ConsString* cons = ConsString::cast(buffer);
3849 ASSERT(cons->second()->length() == 0);
3850 sliced_string->set_parent(cons->first());
3851 sliced_string->set_offset(start);
3852 } else if (buffer->IsSlicedString()) {
3853 // Prevent nesting sliced strings.
3854 SlicedString* parent_slice = SlicedString::cast(buffer);
3855 sliced_string->set_parent(parent_slice->parent());
3856 sliced_string->set_offset(start + parent_slice->offset());
3858 sliced_string->set_parent(buffer);
3859 sliced_string->set_offset(start);
3861 ASSERT(sliced_string->parent()->IsSeqString() ||
3862 sliced_string->parent()->IsExternalString());
3867 MaybeObject* Heap::AllocateExternalStringFromAscii(
3868 const ExternalAsciiString::Resource* resource) {
3869 size_t length = resource->length();
3870 if (length > static_cast<size_t>(String::kMaxLength)) {
3871 isolate()->context()->mark_out_of_memory();
3872 return Failure::OutOfMemoryException(0x5);
3875 Map* map = external_ascii_string_map();
3877 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3878 if (!maybe_result->ToObject(&result)) return maybe_result;
3881 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3882 external_string->set_length(static_cast<int>(length));
3883 external_string->set_hash_field(String::kEmptyHashField);
3884 external_string->set_resource(resource);
3890 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3891 const ExternalTwoByteString::Resource* resource) {
3892 size_t length = resource->length();
3893 if (length > static_cast<size_t>(String::kMaxLength)) {
3894 isolate()->context()->mark_out_of_memory();
3895 return Failure::OutOfMemoryException(0x6);
3898 // For small strings we check whether the resource contains only
3899 // one byte characters. If yes, we use a different string map.
3900 static const size_t kOneByteCheckLengthLimit = 32;
3901 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3902 String::IsOneByte(resource->data(), static_cast<int>(length));
3903 Map* map = is_one_byte ?
3904 external_string_with_one_byte_data_map() : external_string_map();
3906 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3907 if (!maybe_result->ToObject(&result)) return maybe_result;
3910 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3911 external_string->set_length(static_cast<int>(length));
3912 external_string->set_hash_field(String::kEmptyHashField);
3913 external_string->set_resource(resource);
3919 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3920 if (code <= String::kMaxOneByteCharCode) {
3921 Object* value = single_character_string_cache()->get(code);
3922 if (value != undefined_value()) return value;
3925 buffer[0] = static_cast<uint8_t>(code);
3927 MaybeObject* maybe_result =
3928 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3930 if (!maybe_result->ToObject(&result)) return maybe_result;
3931 single_character_string_cache()->set(code, result);
3936 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3937 if (!maybe_result->ToObject(&result)) return maybe_result;
3939 String* answer = String::cast(result);
3940 answer->Set(0, code);
3945 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3946 if (length < 0 || length > ByteArray::kMaxLength) {
3947 return Failure::OutOfMemoryException(0x7);
3949 if (pretenure == NOT_TENURED) {
3950 return AllocateByteArray(length);
3952 int size = ByteArray::SizeFor(length);
3954 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3955 ? old_data_space_->AllocateRaw(size)
3956 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3957 if (!maybe_result->ToObject(&result)) return maybe_result;
3960 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3962 reinterpret_cast<ByteArray*>(result)->set_length(length);
3967 MaybeObject* Heap::AllocateByteArray(int length) {
3968 if (length < 0 || length > ByteArray::kMaxLength) {
3969 return Failure::OutOfMemoryException(0x8);
3971 int size = ByteArray::SizeFor(length);
3972 AllocationSpace space =
3973 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3975 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3976 if (!maybe_result->ToObject(&result)) return maybe_result;
3979 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3981 reinterpret_cast<ByteArray*>(result)->set_length(length);
3986 void Heap::CreateFillerObjectAt(Address addr, int size) {
3987 if (size == 0) return;
3988 HeapObject* filler = HeapObject::FromAddress(addr);
3989 if (size == kPointerSize) {
3990 filler->set_map_no_write_barrier(one_pointer_filler_map());
3991 } else if (size == 2 * kPointerSize) {
3992 filler->set_map_no_write_barrier(two_pointer_filler_map());
3994 filler->set_map_no_write_barrier(free_space_map());
3995 FreeSpace::cast(filler)->set_size(size);
4000 MaybeObject* Heap::AllocateExternalArray(int length,
4001 ExternalArrayType array_type,
4002 void* external_pointer,
4003 PretenureFlag pretenure) {
4004 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4006 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4009 if (!maybe_result->ToObject(&result)) return maybe_result;
4012 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4013 MapForExternalArrayType(array_type));
4014 reinterpret_cast<ExternalArray*>(result)->set_length(length);
4015 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4022 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4024 Handle<Object> self_reference,
4026 bool crankshafted) {
4027 // Allocate ByteArray before the Code object, so that we do not risk
4028 // leaving uninitialized Code object (and breaking the heap).
4029 ByteArray* reloc_info;
4030 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4031 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4034 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4035 int obj_size = Code::SizeFor(body_size);
4036 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4037 MaybeObject* maybe_result;
4038 // Large code objects and code objects which should stay at a fixed address
4039 // are allocated in large object space.
4041 bool force_lo_space = obj_size > code_space()->AreaSize();
4042 if (force_lo_space) {
4043 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4045 maybe_result = code_space_->AllocateRaw(obj_size);
4047 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4049 if (immovable && !force_lo_space &&
4050 // Objects on the first page of each space are never moved.
4051 !code_space_->FirstPage()->Contains(result->address())) {
4052 // Discard the first code allocation, which was on a page where it could be
4054 CreateFillerObjectAt(result->address(), obj_size);
4055 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4056 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4059 // Initialize the object
4060 result->set_map_no_write_barrier(code_map());
4061 Code* code = Code::cast(result);
4062 ASSERT(!isolate_->code_range()->exists() ||
4063 isolate_->code_range()->contains(code->address()));
4064 code->set_instruction_size(desc.instr_size);
4065 code->set_relocation_info(reloc_info);
4066 code->set_flags(flags);
4067 if (code->is_call_stub() || code->is_keyed_call_stub()) {
4068 code->set_check_type(RECEIVER_MAP_CHECK);
4070 code->set_is_crankshafted(crankshafted);
4071 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4072 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4073 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4074 code->set_gc_metadata(Smi::FromInt(0));
4075 code->set_ic_age(global_ic_age_);
4076 code->set_prologue_offset(kPrologueOffsetNotSet);
4077 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4078 code->set_marked_for_deoptimization(false);
4080 // Allow self references to created code object by patching the handle to
4081 // point to the newly allocated Code object.
4082 if (!self_reference.is_null()) {
4083 *(self_reference.location()) = code;
4085 // Migrate generated code.
4086 // The generated code can contain Object** values (typically from handles)
4087 // that are dereferenced during the copy to point directly to the actual heap
4088 // objects. These pointers can include references to the code object itself,
4089 // through the self_reference parameter.
4090 code->CopyFrom(desc);
4093 if (FLAG_verify_heap) {
4101 MaybeObject* Heap::CopyCode(Code* code) {
4102 // Allocate an object the same size as the code object.
4103 int obj_size = code->Size();
4104 MaybeObject* maybe_result;
4105 if (obj_size > code_space()->AreaSize()) {
4106 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4108 maybe_result = code_space_->AllocateRaw(obj_size);
4112 if (!maybe_result->ToObject(&result)) return maybe_result;
4114 // Copy code object.
4115 Address old_addr = code->address();
4116 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4117 CopyBlock(new_addr, old_addr, obj_size);
4118 // Relocate the copy.
4119 Code* new_code = Code::cast(result);
4120 ASSERT(!isolate_->code_range()->exists() ||
4121 isolate_->code_range()->contains(code->address()));
4122 new_code->Relocate(new_addr - old_addr);
4127 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4128 // Allocate ByteArray before the Code object, so that we do not risk
4129 // leaving uninitialized Code object (and breaking the heap).
4130 Object* reloc_info_array;
4131 { MaybeObject* maybe_reloc_info_array =
4132 AllocateByteArray(reloc_info.length(), TENURED);
4133 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4134 return maybe_reloc_info_array;
4138 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4140 int new_obj_size = Code::SizeFor(new_body_size);
4142 Address old_addr = code->address();
4144 size_t relocation_offset =
4145 static_cast<size_t>(code->instruction_end() - old_addr);
4147 MaybeObject* maybe_result;
4148 if (new_obj_size > code_space()->AreaSize()) {
4149 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4151 maybe_result = code_space_->AllocateRaw(new_obj_size);
4155 if (!maybe_result->ToObject(&result)) return maybe_result;
4157 // Copy code object.
4158 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4160 // Copy header and instructions.
4161 CopyBytes(new_addr, old_addr, relocation_offset);
4163 Code* new_code = Code::cast(result);
4164 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4166 // Copy patched rinfo.
4167 CopyBytes(new_code->relocation_start(),
4169 static_cast<size_t>(reloc_info.length()));
4171 // Relocate the copy.
4172 ASSERT(!isolate_->code_range()->exists() ||
4173 isolate_->code_range()->contains(code->address()));
4174 new_code->Relocate(new_addr - old_addr);
4177 if (FLAG_verify_heap) {
4185 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4186 Handle<Object> allocation_site_info_payload) {
4187 ASSERT(gc_state_ == NOT_IN_GC);
4188 ASSERT(map->instance_type() != MAP_TYPE);
4189 // If allocation failures are disallowed, we may allocate in a different
4190 // space when new space is full and the object is not a large object.
4191 AllocationSpace retry_space =
4192 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4193 int size = map->instance_size() + AllocationSiteInfo::kSize;
4195 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4196 if (!maybe_result->ToObject(&result)) return maybe_result;
4197 // No need for write barrier since object is white and map is in old space.
4198 HeapObject::cast(result)->set_map_no_write_barrier(map);
4199 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4200 reinterpret_cast<Address>(result) + map->instance_size());
4201 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4202 alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
4207 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4208 ASSERT(gc_state_ == NOT_IN_GC);
4209 ASSERT(map->instance_type() != MAP_TYPE);
4210 // If allocation failures are disallowed, we may allocate in a different
4211 // space when new space is full and the object is not a large object.
4212 AllocationSpace retry_space =
4213 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4214 int size = map->instance_size();
4216 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4217 if (!maybe_result->ToObject(&result)) return maybe_result;
4218 // No need for write barrier since object is white and map is in old space.
4219 HeapObject::cast(result)->set_map_no_write_barrier(map);
4224 void Heap::InitializeFunction(JSFunction* function,
4225 SharedFunctionInfo* shared,
4226 Object* prototype) {
4227 ASSERT(!prototype->IsMap());
4228 function->initialize_properties();
4229 function->initialize_elements();
4230 function->set_shared(shared);
4231 function->set_code(shared->code());
4232 function->set_prototype_or_initial_map(prototype);
4233 function->set_context(undefined_value());
4234 function->set_literals_or_bindings(empty_fixed_array());
4235 function->set_next_function_link(undefined_value());
4239 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4240 // Make sure to use globals from the function's context, since the function
4241 // can be from a different context.
4242 Context* native_context = function->context()->native_context();
4244 if (function->shared()->is_generator()) {
4245 // Generator prototypes can share maps since they don't have "constructor"
4247 new_map = native_context->generator_object_prototype_map();
4249 // Each function prototype gets a fresh map to avoid unwanted sharing of
4250 // maps between prototypes of different constructors.
4251 JSFunction* object_function = native_context->object_function();
4252 ASSERT(object_function->has_initial_map());
4253 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4254 if (!maybe_map->To(&new_map)) return maybe_map;
4258 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4259 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4261 if (!function->shared()->is_generator()) {
4262 MaybeObject* maybe_failure =
4263 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4264 constructor_string(), function, DONT_ENUM);
4265 if (maybe_failure->IsFailure()) return maybe_failure;
4272 MaybeObject* Heap::AllocateFunction(Map* function_map,
4273 SharedFunctionInfo* shared,
4275 PretenureFlag pretenure) {
4276 AllocationSpace space =
4277 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4279 { MaybeObject* maybe_result = Allocate(function_map, space);
4280 if (!maybe_result->ToObject(&result)) return maybe_result;
4282 InitializeFunction(JSFunction::cast(result), shared, prototype);
4287 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4288 // To get fast allocation and map sharing for arguments objects we
4289 // allocate them based on an arguments boilerplate.
4291 JSObject* boilerplate;
4292 int arguments_object_size;
4293 bool strict_mode_callee = callee->IsJSFunction() &&
4294 !JSFunction::cast(callee)->shared()->is_classic_mode();
4295 if (strict_mode_callee) {
4297 isolate()->context()->native_context()->
4298 strict_mode_arguments_boilerplate();
4299 arguments_object_size = kArgumentsObjectSizeStrict;
4302 isolate()->context()->native_context()->arguments_boilerplate();
4303 arguments_object_size = kArgumentsObjectSize;
4306 // This calls Copy directly rather than using Heap::AllocateRaw so we
4307 // duplicate the check here.
4308 ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4310 // Check that the size of the boilerplate matches our
4311 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4312 // on the size being a known constant.
4313 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4315 // Do the allocation.
4317 { MaybeObject* maybe_result =
4318 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4319 if (!maybe_result->ToObject(&result)) return maybe_result;
4322 // Copy the content. The arguments boilerplate doesn't have any
4323 // fields that point to new space so it's safe to skip the write
4325 CopyBlock(HeapObject::cast(result)->address(),
4326 boilerplate->address(),
4327 JSObject::kHeaderSize);
4329 // Set the length property.
4330 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4331 Smi::FromInt(length),
4332 SKIP_WRITE_BARRIER);
4333 // Set the callee property for non-strict mode arguments object only.
4334 if (!strict_mode_callee) {
4335 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4339 // Check the state of the object
4340 ASSERT(JSObject::cast(result)->HasFastProperties());
4341 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4347 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4348 ASSERT(!fun->has_initial_map());
4350 // First create a new map with the size and number of in-object properties
4351 // suggested by the function.
4352 InstanceType instance_type;
4354 int in_object_properties;
4355 if (fun->shared()->is_generator()) {
4356 instance_type = JS_GENERATOR_OBJECT_TYPE;
4357 instance_size = JSGeneratorObject::kSize;
4358 in_object_properties = 0;
4360 instance_type = JS_OBJECT_TYPE;
4361 instance_size = fun->shared()->CalculateInstanceSize();
4362 in_object_properties = fun->shared()->CalculateInObjectProperties();
4365 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4366 if (!maybe_map->To(&map)) return maybe_map;
4368 // Fetch or allocate prototype.
4370 if (fun->has_instance_prototype()) {
4371 prototype = fun->instance_prototype();
4373 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4374 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4376 map->set_inobject_properties(in_object_properties);
4377 map->set_unused_property_fields(in_object_properties);
4378 map->set_prototype(prototype);
4379 ASSERT(map->has_fast_object_elements());
4381 if (!fun->shared()->is_generator()) {
4382 fun->shared()->StartInobjectSlackTracking(map);
4389 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4390 FixedArray* properties,
4392 obj->set_properties(properties);
4393 obj->initialize_elements();
4394 // TODO(1240798): Initialize the object's body using valid initial values
4395 // according to the object's initial map. For example, if the map's
4396 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4397 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4398 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4399 // verification code has to cope with (temporarily) invalid objects. See
4400 // for example, JSArray::JSArrayVerify).
4402 // We cannot always fill with one_pointer_filler_map because objects
4403 // created from API functions expect their internal fields to be initialized
4404 // with undefined_value.
4405 // Pre-allocated fields need to be initialized with undefined_value as well
4406 // so that object accesses before the constructor completes (e.g. in the
4407 // debugger) will not cause a crash.
4408 if (map->constructor()->IsJSFunction() &&
4409 JSFunction::cast(map->constructor())->shared()->
4410 IsInobjectSlackTrackingInProgress()) {
4411 // We might want to shrink the object later.
4412 ASSERT(obj->GetInternalFieldCount() == 0);
4413 filler = Heap::one_pointer_filler_map();
4415 filler = Heap::undefined_value();
4417 obj->InitializeBody(map, Heap::undefined_value(), filler);
4421 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4422 // JSFunctions should be allocated using AllocateFunction to be
4423 // properly initialized.
4424 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4426 // Both types of global objects should be allocated using
4427 // AllocateGlobalObject to be properly initialized.
4428 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4429 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4431 // Allocate the backing storage for the properties.
4433 map->pre_allocated_property_fields() +
4434 map->unused_property_fields() -
4435 map->inobject_properties();
4436 ASSERT(prop_size >= 0);
4438 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4439 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4442 // Allocate the JSObject.
4443 AllocationSpace space =
4444 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4445 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4447 MaybeObject* maybe_obj = Allocate(map, space);
4448 if (!maybe_obj->To(&obj)) return maybe_obj;
4450 // Initialize the JSObject.
4451 InitializeJSObjectFromMap(JSObject::cast(obj),
4452 FixedArray::cast(properties),
4454 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4455 JSObject::cast(obj)->HasExternalArrayElements());
4460 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4461 Handle<Object> allocation_site_info_payload) {
4462 // JSFunctions should be allocated using AllocateFunction to be
4463 // properly initialized.
4464 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4466 // Both types of global objects should be allocated using
4467 // AllocateGlobalObject to be properly initialized.
4468 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4469 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4471 // Allocate the backing storage for the properties.
4473 map->pre_allocated_property_fields() +
4474 map->unused_property_fields() -
4475 map->inobject_properties();
4476 ASSERT(prop_size >= 0);
4478 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4479 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4482 // Allocate the JSObject.
4483 AllocationSpace space = NEW_SPACE;
4484 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4486 MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4487 allocation_site_info_payload);
4488 if (!maybe_obj->To(&obj)) return maybe_obj;
4490 // Initialize the JSObject.
4491 InitializeJSObjectFromMap(JSObject::cast(obj),
4492 FixedArray::cast(properties),
4494 ASSERT(JSObject::cast(obj)->HasFastElements());
4499 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4500 PretenureFlag pretenure) {
4501 // Allocate the initial map if absent.
4502 if (!constructor->has_initial_map()) {
4503 Object* initial_map;
4504 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4505 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4507 constructor->set_initial_map(Map::cast(initial_map));
4508 Map::cast(initial_map)->set_constructor(constructor);
4510 // Allocate the object based on the constructors initial map.
4511 MaybeObject* result = AllocateJSObjectFromMap(
4512 constructor->initial_map(), pretenure);
4514 // Make sure result is NOT a global object if valid.
4515 Object* non_failure;
4516 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4522 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4523 Handle<Object> allocation_site_info_payload) {
4524 // Allocate the initial map if absent.
4525 if (!constructor->has_initial_map()) {
4526 Object* initial_map;
4527 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4528 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4530 constructor->set_initial_map(Map::cast(initial_map));
4531 Map::cast(initial_map)->set_constructor(constructor);
4533 // Allocate the object based on the constructors initial map, or the payload
4535 Map* initial_map = constructor->initial_map();
4537 Cell* cell = Cell::cast(*allocation_site_info_payload);
4538 Smi* smi = Smi::cast(cell->value());
4539 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4540 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4541 if (to_kind != initial_map->elements_kind()) {
4542 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4543 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4544 // Possibly alter the mode, since we found an updated elements kind
4545 // in the type info cell.
4546 mode = AllocationSiteInfo::GetMode(to_kind);
4549 MaybeObject* result;
4550 if (mode == TRACK_ALLOCATION_SITE) {
4551 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4552 allocation_site_info_payload);
4554 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4557 // Make sure result is NOT a global object if valid.
4558 Object* non_failure;
4559 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4565 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4566 ASSERT(function->shared()->is_generator());
4568 if (function->has_initial_map()) {
4569 map = function->initial_map();
4571 // Allocate the initial map if absent.
4572 MaybeObject* maybe_map = AllocateInitialMap(function);
4573 if (!maybe_map->To(&map)) return maybe_map;
4574 function->set_initial_map(map);
4575 map->set_constructor(function);
4577 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4578 return AllocateJSObjectFromMap(map);
4582 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4583 // Allocate a fresh map. Modules do not have a prototype.
4585 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4586 if (!maybe_map->To(&map)) return maybe_map;
4587 // Allocate the object based on the map.
4589 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4590 if (!maybe_module->To(&module)) return maybe_module;
4591 module->set_context(context);
4592 module->set_scope_info(scope_info);
4597 MaybeObject* Heap::AllocateJSArrayAndStorage(
4598 ElementsKind elements_kind,
4601 ArrayStorageAllocationMode mode,
4602 PretenureFlag pretenure) {
4603 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4605 if (!maybe_array->To(&array)) return maybe_array;
4607 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4608 // for performance reasons.
4609 ASSERT(capacity >= length);
4611 if (capacity == 0) {
4612 array->set_length(Smi::FromInt(0));
4613 array->set_elements(empty_fixed_array());
4617 FixedArrayBase* elms;
4618 MaybeObject* maybe_elms = NULL;
4619 if (IsFastDoubleElementsKind(elements_kind)) {
4620 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4621 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4623 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4624 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4627 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4628 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4629 maybe_elms = AllocateUninitializedFixedArray(capacity);
4631 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4632 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4635 if (!maybe_elms->To(&elms)) return maybe_elms;
4637 array->set_elements(elms);
4638 array->set_length(Smi::FromInt(length));
4643 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4644 ElementsKind elements_kind,
4647 Handle<Object> allocation_site_payload,
4648 ArrayStorageAllocationMode mode) {
4649 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4650 allocation_site_payload);
4652 if (!maybe_array->To(&array)) return maybe_array;
4653 return AllocateJSArrayStorage(array, length, capacity, mode);
4657 MaybeObject* Heap::AllocateJSArrayStorage(
4661 ArrayStorageAllocationMode mode) {
4662 ASSERT(capacity >= length);
4664 if (capacity == 0) {
4665 array->set_length(Smi::FromInt(0));
4666 array->set_elements(empty_fixed_array());
4670 FixedArrayBase* elms;
4671 MaybeObject* maybe_elms = NULL;
4672 ElementsKind elements_kind = array->GetElementsKind();
4673 if (IsFastDoubleElementsKind(elements_kind)) {
4674 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4675 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4677 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4678 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4681 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4682 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4683 maybe_elms = AllocateUninitializedFixedArray(capacity);
4685 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4686 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4689 if (!maybe_elms->To(&elms)) return maybe_elms;
4691 array->set_elements(elms);
4692 array->set_length(Smi::FromInt(length));
4697 MaybeObject* Heap::AllocateJSArrayWithElements(
4698 FixedArrayBase* elements,
4699 ElementsKind elements_kind,
4701 PretenureFlag pretenure) {
4702 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4704 if (!maybe_array->To(&array)) return maybe_array;
4706 array->set_elements(elements);
4707 array->set_length(Smi::FromInt(length));
4708 array->ValidateElements();
4713 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4715 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4716 // maps. Will probably depend on the identity of the handler object, too.
4718 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4719 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4720 map->set_prototype(prototype);
4722 // Allocate the proxy object.
4724 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4725 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4726 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4727 result->set_handler(handler);
4728 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4733 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4735 Object* construct_trap,
4736 Object* prototype) {
4738 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4739 // maps. Will probably depend on the identity of the handler object, too.
4741 MaybeObject* maybe_map_obj =
4742 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4743 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4744 map->set_prototype(prototype);
4746 // Allocate the proxy object.
4747 JSFunctionProxy* result;
4748 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4749 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4750 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4751 result->set_handler(handler);
4752 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4753 result->set_call_trap(call_trap);
4754 result->set_construct_trap(construct_trap);
4759 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4760 ASSERT(constructor->has_initial_map());
4761 Map* map = constructor->initial_map();
4762 ASSERT(map->is_dictionary_map());
4764 // Make sure no field properties are described in the initial map.
4765 // This guarantees us that normalizing the properties does not
4766 // require us to change property values to PropertyCells.
4767 ASSERT(map->NextFreePropertyIndex() == 0);
4769 // Make sure we don't have a ton of pre-allocated slots in the
4770 // global objects. They will be unused once we normalize the object.
4771 ASSERT(map->unused_property_fields() == 0);
4772 ASSERT(map->inobject_properties() == 0);
4774 // Initial size of the backing store to avoid resize of the storage during
4775 // bootstrapping. The size differs between the JS global object ad the
4777 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4779 // Allocate a dictionary object for backing storage.
4780 NameDictionary* dictionary;
4781 MaybeObject* maybe_dictionary =
4782 NameDictionary::Allocate(
4784 map->NumberOfOwnDescriptors() * 2 + initial_size);
4785 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4787 // The global object might be created from an object template with accessors.
4788 // Fill these accessors into the dictionary.
4789 DescriptorArray* descs = map->instance_descriptors();
4790 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4791 PropertyDetails details = descs->GetDetails(i);
4792 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4793 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4794 Object* value = descs->GetCallbacksObject(i);
4795 MaybeObject* maybe_value = AllocatePropertyCell(value);
4796 if (!maybe_value->ToObject(&value)) return maybe_value;
4798 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4799 if (!maybe_added->To(&dictionary)) return maybe_added;
4802 // Allocate the global object and initialize it with the backing store.
4804 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4805 if (!maybe_global->To(&global)) return maybe_global;
4807 InitializeJSObjectFromMap(global, dictionary, map);
4809 // Create a new map for the global object.
4811 MaybeObject* maybe_map = map->CopyDropDescriptors();
4812 if (!maybe_map->To(&new_map)) return maybe_map;
4813 new_map->set_dictionary_map(true);
4815 // Set up the global object as a normalized object.
4816 global->set_map(new_map);
4817 global->set_properties(dictionary);
4819 // Make sure result is a global object with properties in dictionary.
4820 ASSERT(global->IsGlobalObject());
4821 ASSERT(!global->HasFastProperties());
4826 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4827 // Never used to copy functions. If functions need to be copied we
4828 // have to be careful to clear the literals array.
4829 SLOW_ASSERT(!source->IsJSFunction());
4832 Map* map = source->map();
4833 int object_size = map->instance_size();
4836 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4838 // If we're forced to always allocate, we use the general allocation
4839 // functions which may leave us with an object in old space.
4840 if (always_allocate()) {
4841 { MaybeObject* maybe_clone =
4842 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4843 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4845 Address clone_address = HeapObject::cast(clone)->address();
4846 CopyBlock(clone_address,
4849 // Update write barrier for all fields that lie beyond the header.
4850 RecordWrites(clone_address,
4851 JSObject::kHeaderSize,
4852 (object_size - JSObject::kHeaderSize) / kPointerSize);
4854 wb_mode = SKIP_WRITE_BARRIER;
4856 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4857 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4859 SLOW_ASSERT(InNewSpace(clone));
4860 // Since we know the clone is allocated in new space, we can copy
4861 // the contents without worrying about updating the write barrier.
4862 CopyBlock(HeapObject::cast(clone)->address(),
4868 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4869 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4870 FixedArray* properties = FixedArray::cast(source->properties());
4871 // Update elements if necessary.
4872 if (elements->length() > 0) {
4874 { MaybeObject* maybe_elem;
4875 if (elements->map() == fixed_cow_array_map()) {
4876 maybe_elem = FixedArray::cast(elements);
4877 } else if (source->HasFastDoubleElements()) {
4878 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4880 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4882 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4884 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4886 // Update properties if necessary.
4887 if (properties->length() > 0) {
4889 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4890 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4892 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4894 // Return the new clone.
4899 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4900 // Never used to copy functions. If functions need to be copied we
4901 // have to be careful to clear the literals array.
4902 SLOW_ASSERT(!source->IsJSFunction());
4905 Map* map = source->map();
4906 int object_size = map->instance_size();
4909 ASSERT(map->CanTrackAllocationSite());
4910 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4911 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4913 // If we're forced to always allocate, we use the general allocation
4914 // functions which may leave us with an object in old space.
4915 int adjusted_object_size = object_size;
4916 if (always_allocate()) {
4917 // We'll only track origin if we are certain to allocate in new space
4918 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4919 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4920 adjusted_object_size += AllocationSiteInfo::kSize;
4923 { MaybeObject* maybe_clone =
4924 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4925 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4927 Address clone_address = HeapObject::cast(clone)->address();
4928 CopyBlock(clone_address,
4931 // Update write barrier for all fields that lie beyond the header.
4932 int write_barrier_offset = adjusted_object_size > object_size
4933 ? JSArray::kSize + AllocationSiteInfo::kSize
4934 : JSObject::kHeaderSize;
4935 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4936 RecordWrites(clone_address,
4937 write_barrier_offset,
4938 (object_size - write_barrier_offset) / kPointerSize);
4941 // Track allocation site information, if we failed to allocate it inline.
4942 if (InNewSpace(clone) &&
4943 adjusted_object_size == object_size) {
4944 MaybeObject* maybe_alloc_info =
4945 AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4946 AllocationSiteInfo* alloc_info;
4947 if (maybe_alloc_info->To(&alloc_info)) {
4948 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4949 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4953 wb_mode = SKIP_WRITE_BARRIER;
4954 adjusted_object_size += AllocationSiteInfo::kSize;
4956 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4957 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4959 SLOW_ASSERT(InNewSpace(clone));
4960 // Since we know the clone is allocated in new space, we can copy
4961 // the contents without worrying about updating the write barrier.
4962 CopyBlock(HeapObject::cast(clone)->address(),
4967 if (adjusted_object_size > object_size) {
4968 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4969 reinterpret_cast<Address>(clone) + object_size);
4970 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4971 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4975 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4976 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4977 FixedArray* properties = FixedArray::cast(source->properties());
4978 // Update elements if necessary.
4979 if (elements->length() > 0) {
4981 { MaybeObject* maybe_elem;
4982 if (elements->map() == fixed_cow_array_map()) {
4983 maybe_elem = FixedArray::cast(elements);
4984 } else if (source->HasFastDoubleElements()) {
4985 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4987 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4989 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4991 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4993 // Update properties if necessary.
4994 if (properties->length() > 0) {
4996 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4997 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4999 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5001 // Return the new clone.
5006 MaybeObject* Heap::ReinitializeJSReceiver(
5007 JSReceiver* object, InstanceType type, int size) {
5008 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5010 // Allocate fresh map.
5011 // TODO(rossberg): Once we optimize proxies, cache these maps.
5013 MaybeObject* maybe = AllocateMap(type, size);
5014 if (!maybe->To<Map>(&map)) return maybe;
5016 // Check that the receiver has at least the size of the fresh object.
5017 int size_difference = object->map()->instance_size() - map->instance_size();
5018 ASSERT(size_difference >= 0);
5020 map->set_prototype(object->map()->prototype());
5022 // Allocate the backing storage for the properties.
5023 int prop_size = map->unused_property_fields() - map->inobject_properties();
5025 maybe = AllocateFixedArray(prop_size, TENURED);
5026 if (!maybe->ToObject(&properties)) return maybe;
5028 // Functions require some allocation, which might fail here.
5029 SharedFunctionInfo* shared = NULL;
5030 if (type == JS_FUNCTION_TYPE) {
5033 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5034 if (!maybe->To<String>(&name)) return maybe;
5035 maybe = AllocateSharedFunctionInfo(name);
5036 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5039 // Because of possible retries of this function after failure,
5040 // we must NOT fail after this point, where we have changed the type!
5042 // Reset the map for the object.
5043 object->set_map(map);
5044 JSObject* jsobj = JSObject::cast(object);
5046 // Reinitialize the object from the constructor map.
5047 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5049 // Functions require some minimal initialization.
5050 if (type == JS_FUNCTION_TYPE) {
5051 map->set_function_with_prototype(true);
5052 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5053 JSFunction::cast(object)->set_context(
5054 isolate()->context()->native_context());
5057 // Put in filler if the new object is smaller than the old.
5058 if (size_difference > 0) {
5059 CreateFillerObjectAt(
5060 object->address() + map->instance_size(), size_difference);
5067 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5068 JSGlobalProxy* object) {
5069 ASSERT(constructor->has_initial_map());
5070 Map* map = constructor->initial_map();
5072 // Check that the already allocated object has the same size and type as
5073 // objects allocated using the constructor.
5074 ASSERT(map->instance_size() == object->map()->instance_size());
5075 ASSERT(map->instance_type() == object->map()->instance_type());
5077 // Allocate the backing storage for the properties.
5078 int prop_size = map->unused_property_fields() - map->inobject_properties();
5080 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5081 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5084 // Reset the map for the object.
5085 object->set_map(constructor->initial_map());
5087 // Reinitialize the object from the constructor map.
5088 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5093 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5094 PretenureFlag pretenure) {
5095 int length = string.length();
5097 return Heap::LookupSingleCharacterStringFromCode(string[0]);
5100 { MaybeObject* maybe_result =
5101 AllocateRawOneByteString(string.length(), pretenure);
5102 if (!maybe_result->ToObject(&result)) return maybe_result;
5105 // Copy the characters into the new object.
5106 CopyChars(SeqOneByteString::cast(result)->GetChars(),
5113 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5114 int non_ascii_start,
5115 PretenureFlag pretenure) {
5116 // Continue counting the number of characters in the UTF-8 string, starting
5117 // from the first non-ascii character or word.
5118 Access<UnicodeCache::Utf8Decoder>
5119 decoder(isolate_->unicode_cache()->utf8_decoder());
5120 decoder->Reset(string.start() + non_ascii_start,
5121 string.length() - non_ascii_start);
5122 int utf16_length = decoder->Utf16Length();
5123 ASSERT(utf16_length > 0);
5127 int chars = non_ascii_start + utf16_length;
5128 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5129 if (!maybe_result->ToObject(&result)) return maybe_result;
5131 // Convert and copy the characters into the new object.
5132 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5133 // Copy ascii portion.
5134 uint16_t* data = twobyte->GetChars();
5135 if (non_ascii_start != 0) {
5136 const char* ascii_data = string.start();
5137 for (int i = 0; i < non_ascii_start; i++) {
5138 *data++ = *ascii_data++;
5141 // Now write the remainder.
5142 decoder->WriteUtf16(data, utf16_length);
5147 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5148 PretenureFlag pretenure) {
5149 // Check if the string is an ASCII string.
5151 int length = string.length();
5152 const uc16* start = string.start();
5154 if (String::IsOneByte(start, length)) {
5155 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5156 if (!maybe_result->ToObject(&result)) return maybe_result;
5157 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5158 } else { // It's not a one byte string.
5159 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5160 if (!maybe_result->ToObject(&result)) return maybe_result;
5161 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5167 Map* Heap::InternalizedStringMapForString(String* string) {
5168 // If the string is in new space it cannot be used as internalized.
5169 if (InNewSpace(string)) return NULL;
5171 // Find the corresponding internalized string map for strings.
5172 switch (string->map()->instance_type()) {
5173 case STRING_TYPE: return internalized_string_map();
5174 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5175 case CONS_STRING_TYPE: return cons_internalized_string_map();
5176 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5177 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5178 case EXTERNAL_ASCII_STRING_TYPE:
5179 return external_ascii_internalized_string_map();
5180 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5181 return external_internalized_string_with_one_byte_data_map();
5182 case SHORT_EXTERNAL_STRING_TYPE:
5183 return short_external_internalized_string_map();
5184 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5185 return short_external_ascii_internalized_string_map();
5186 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5187 return short_external_internalized_string_with_one_byte_data_map();
5188 default: return NULL; // No match found.
5193 static inline void WriteOneByteData(Vector<const char> vector,
5196 // Only works for ascii.
5197 ASSERT(vector.length() == len);
5198 OS::MemCopy(chars, vector.start(), len);
5201 static inline void WriteTwoByteData(Vector<const char> vector,
5204 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5205 unsigned stream_length = vector.length();
5206 while (stream_length != 0) {
5207 unsigned consumed = 0;
5208 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5209 ASSERT(c != unibrow::Utf8::kBadChar);
5210 ASSERT(consumed <= stream_length);
5211 stream_length -= consumed;
5213 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5216 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5217 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5224 ASSERT(stream_length == 0);
5229 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5230 ASSERT(s->length() == len);
5231 String::WriteToFlat(s, chars, 0, len);
5234 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5235 ASSERT(s->length() == len);
5236 String::WriteToFlat(s, chars, 0, len);
5240 template<bool is_one_byte, typename T>
5241 MaybeObject* Heap::AllocateInternalizedStringImpl(
5242 T t, int chars, uint32_t hash_field) {
5244 // Compute map and object size.
5249 if (chars > SeqOneByteString::kMaxLength) {
5250 return Failure::OutOfMemoryException(0x9);
5252 map = ascii_internalized_string_map();
5253 size = SeqOneByteString::SizeFor(chars);
5255 if (chars > SeqTwoByteString::kMaxLength) {
5256 return Failure::OutOfMemoryException(0xa);
5258 map = internalized_string_map();
5259 size = SeqTwoByteString::SizeFor(chars);
5264 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5265 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5266 : old_data_space_->AllocateRaw(size);
5267 if (!maybe_result->ToObject(&result)) return maybe_result;
5270 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5271 // Set length and hash fields of the allocated string.
5272 String* answer = String::cast(result);
5273 answer->set_length(chars);
5274 answer->set_hash_field(hash_field);
5276 ASSERT_EQ(size, answer->Size());
5279 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5281 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5287 // Need explicit instantiations.
5289 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5291 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5292 String*, int, uint32_t);
5294 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5295 Vector<const char>, int, uint32_t);
5298 MaybeObject* Heap::AllocateRawOneByteString(int length,
5299 PretenureFlag pretenure) {
5300 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5301 return Failure::OutOfMemoryException(0xb);
5304 int size = SeqOneByteString::SizeFor(length);
5305 ASSERT(size <= SeqOneByteString::kMaxSize);
5307 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5308 AllocationSpace retry_space = OLD_DATA_SPACE;
5310 if (space == NEW_SPACE) {
5311 if (size > kMaxObjectSizeInNewSpace) {
5312 // Allocate in large object space, retry space will be ignored.
5314 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5315 // Allocate in new space, retry in large object space.
5316 retry_space = LO_SPACE;
5318 } else if (space == OLD_DATA_SPACE &&
5319 size > Page::kMaxNonCodeHeapObjectSize) {
5323 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5324 if (!maybe_result->ToObject(&result)) return maybe_result;
5327 // Partially initialize the object.
5328 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5329 String::cast(result)->set_length(length);
5330 String::cast(result)->set_hash_field(String::kEmptyHashField);
5331 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5337 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5338 PretenureFlag pretenure) {
5339 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5340 return Failure::OutOfMemoryException(0xc);
5342 int size = SeqTwoByteString::SizeFor(length);
5343 ASSERT(size <= SeqTwoByteString::kMaxSize);
5344 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5345 AllocationSpace retry_space = OLD_DATA_SPACE;
5347 if (space == NEW_SPACE) {
5348 if (size > kMaxObjectSizeInNewSpace) {
5349 // Allocate in large object space, retry space will be ignored.
5351 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5352 // Allocate in new space, retry in large object space.
5353 retry_space = LO_SPACE;
5355 } else if (space == OLD_DATA_SPACE &&
5356 size > Page::kMaxNonCodeHeapObjectSize) {
5360 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5361 if (!maybe_result->ToObject(&result)) return maybe_result;
5364 // Partially initialize the object.
5365 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5366 String::cast(result)->set_length(length);
5367 String::cast(result)->set_hash_field(String::kEmptyHashField);
5368 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5373 MaybeObject* Heap::AllocateJSArray(
5374 ElementsKind elements_kind,
5375 PretenureFlag pretenure) {
5376 Context* native_context = isolate()->context()->native_context();
5377 JSFunction* array_function = native_context->array_function();
5378 Map* map = array_function->initial_map();
5379 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5380 if (transition_map != NULL) map = transition_map;
5381 return AllocateJSObjectFromMap(map, pretenure);
5385 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5386 ElementsKind elements_kind,
5387 Handle<Object> allocation_site_info_payload) {
5388 Context* native_context = isolate()->context()->native_context();
5389 JSFunction* array_function = native_context->array_function();
5390 Map* map = array_function->initial_map();
5391 Object* maybe_map_array = native_context->js_array_maps();
5392 if (!maybe_map_array->IsUndefined()) {
5393 Object* maybe_transitioned_map =
5394 FixedArray::cast(maybe_map_array)->get(elements_kind);
5395 if (!maybe_transitioned_map->IsUndefined()) {
5396 map = Map::cast(maybe_transitioned_map);
5399 return AllocateJSObjectFromMapWithAllocationSite(map,
5400 allocation_site_info_payload);
5404 MaybeObject* Heap::AllocateEmptyFixedArray() {
5405 int size = FixedArray::SizeFor(0);
5407 { MaybeObject* maybe_result =
5408 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5409 if (!maybe_result->ToObject(&result)) return maybe_result;
5411 // Initialize the object.
5412 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5414 reinterpret_cast<FixedArray*>(result)->set_length(0);
5418 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5419 return AllocateExternalArray(0, array_type, NULL, TENURED);
5423 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5424 if (length < 0 || length > FixedArray::kMaxLength) {
5425 return Failure::OutOfMemoryException(0xd);
5428 // Use the general function if we're forced to always allocate.
5429 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5430 // Allocate the raw data for a fixed array.
5431 int size = FixedArray::SizeFor(length);
5432 return size <= kMaxObjectSizeInNewSpace
5433 ? new_space_.AllocateRaw(size)
5434 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5438 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5439 int len = src->length();
5441 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5442 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5444 if (InNewSpace(obj)) {
5445 HeapObject* dst = HeapObject::cast(obj);
5446 dst->set_map_no_write_barrier(map);
5447 CopyBlock(dst->address() + kPointerSize,
5448 src->address() + kPointerSize,
5449 FixedArray::SizeFor(len) - kPointerSize);
5452 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5453 FixedArray* result = FixedArray::cast(obj);
5454 result->set_length(len);
5457 DisallowHeapAllocation no_gc;
5458 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5459 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5464 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5466 int len = src->length();
5468 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5469 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5471 HeapObject* dst = HeapObject::cast(obj);
5472 dst->set_map_no_write_barrier(map);
5474 dst->address() + FixedDoubleArray::kLengthOffset,
5475 src->address() + FixedDoubleArray::kLengthOffset,
5476 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5481 MaybeObject* Heap::AllocateFixedArray(int length) {
5482 ASSERT(length >= 0);
5483 if (length == 0) return empty_fixed_array();
5485 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5486 if (!maybe_result->ToObject(&result)) return maybe_result;
5488 // Initialize header.
5489 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5490 array->set_map_no_write_barrier(fixed_array_map());
5491 array->set_length(length);
5493 ASSERT(!InNewSpace(undefined_value()));
5494 MemsetPointer(array->data_start(), undefined_value(), length);
5499 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5500 if (length < 0 || length > FixedArray::kMaxLength) {
5501 return Failure::OutOfMemoryException(0xe);
5504 AllocationSpace space =
5505 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5506 int size = FixedArray::SizeFor(length);
5507 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5508 // Too big for new space.
5510 } else if (space == OLD_POINTER_SPACE &&
5511 size > Page::kMaxNonCodeHeapObjectSize) {
5512 // Too big for old pointer space.
5516 AllocationSpace retry_space =
5517 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5519 return AllocateRaw(size, space, retry_space);
5523 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5526 PretenureFlag pretenure,
5528 ASSERT(length >= 0);
5529 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5530 if (length == 0) return heap->empty_fixed_array();
5532 ASSERT(!heap->InNewSpace(filler));
5534 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5535 if (!maybe_result->ToObject(&result)) return maybe_result;
5538 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5539 FixedArray* array = FixedArray::cast(result);
5540 array->set_length(length);
5541 MemsetPointer(array->data_start(), filler, length);
5546 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5547 return AllocateFixedArrayWithFiller(this,
5554 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5555 PretenureFlag pretenure) {
5556 return AllocateFixedArrayWithFiller(this,
5563 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5564 if (length == 0) return empty_fixed_array();
5567 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5568 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5571 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5573 FixedArray::cast(obj)->set_length(length);
5578 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5579 int size = FixedDoubleArray::SizeFor(0);
5581 { MaybeObject* maybe_result =
5582 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5583 if (!maybe_result->ToObject(&result)) return maybe_result;
5585 // Initialize the object.
5586 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5587 fixed_double_array_map());
5588 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5593 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5595 PretenureFlag pretenure) {
5596 if (length == 0) return empty_fixed_array();
5598 Object* elements_object;
5599 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5600 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5601 FixedDoubleArray* elements =
5602 reinterpret_cast<FixedDoubleArray*>(elements_object);
5604 elements->set_map_no_write_barrier(fixed_double_array_map());
5605 elements->set_length(length);
5610 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5612 PretenureFlag pretenure) {
5613 if (length == 0) return empty_fixed_array();
5615 Object* elements_object;
5616 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5617 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5618 FixedDoubleArray* elements =
5619 reinterpret_cast<FixedDoubleArray*>(elements_object);
5621 for (int i = 0; i < length; ++i) {
5622 elements->set_the_hole(i);
5625 elements->set_map_no_write_barrier(fixed_double_array_map());
5626 elements->set_length(length);
5631 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5632 PretenureFlag pretenure) {
5633 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5634 return Failure::OutOfMemoryException(0xf);
5637 AllocationSpace space =
5638 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5639 int size = FixedDoubleArray::SizeFor(length);
5641 #ifndef V8_HOST_ARCH_64_BIT
5642 size += kPointerSize;
5645 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5646 // Too big for new space.
5648 } else if (space == OLD_DATA_SPACE &&
5649 size > Page::kMaxNonCodeHeapObjectSize) {
5650 // Too big for old data space.
5654 AllocationSpace retry_space =
5655 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5658 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5659 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5662 return EnsureDoubleAligned(this, object, size);
5666 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5668 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5669 if (!maybe_result->ToObject(&result)) return maybe_result;
5671 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5673 ASSERT(result->IsHashTable());
5678 MaybeObject* Heap::AllocateSymbol() {
5679 // Statically ensure that it is safe to allocate symbols in paged spaces.
5680 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5683 MaybeObject* maybe =
5684 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5685 if (!maybe->ToObject(&result)) return maybe;
5687 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5689 // Generate a random hash value.
5693 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5695 } while (hash == 0 && attempts < 30);
5696 if (hash == 0) hash = 1; // never return 0
5698 Symbol::cast(result)->set_hash_field(
5699 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5700 Symbol::cast(result)->set_name(undefined_value());
5702 ASSERT(result->IsSymbol());
5707 MaybeObject* Heap::AllocateNativeContext() {
5709 { MaybeObject* maybe_result =
5710 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5711 if (!maybe_result->ToObject(&result)) return maybe_result;
5713 Context* context = reinterpret_cast<Context*>(result);
5714 context->set_map_no_write_barrier(native_context_map());
5715 context->set_js_array_maps(undefined_value());
5716 ASSERT(context->IsNativeContext());
5717 ASSERT(result->IsContext());
5722 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5723 ScopeInfo* scope_info) {
5725 { MaybeObject* maybe_result =
5726 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5727 if (!maybe_result->ToObject(&result)) return maybe_result;
5729 Context* context = reinterpret_cast<Context*>(result);
5730 context->set_map_no_write_barrier(global_context_map());
5731 context->set_closure(function);
5732 context->set_previous(function->context());
5733 context->set_extension(scope_info);
5734 context->set_global_object(function->context()->global_object());
5735 ASSERT(context->IsGlobalContext());
5736 ASSERT(result->IsContext());
5741 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5743 { MaybeObject* maybe_result =
5744 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5745 if (!maybe_result->ToObject(&result)) return maybe_result;
5747 Context* context = reinterpret_cast<Context*>(result);
5748 context->set_map_no_write_barrier(module_context_map());
5749 // Instance link will be set later.
5750 context->set_extension(Smi::FromInt(0));
5755 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5756 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5758 { MaybeObject* maybe_result = AllocateFixedArray(length);
5759 if (!maybe_result->ToObject(&result)) return maybe_result;
5761 Context* context = reinterpret_cast<Context*>(result);
5762 context->set_map_no_write_barrier(function_context_map());
5763 context->set_closure(function);
5764 context->set_previous(function->context());
5765 context->set_extension(Smi::FromInt(0));
5766 context->set_global_object(function->context()->global_object());
5771 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5774 Object* thrown_object) {
5775 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5777 { MaybeObject* maybe_result =
5778 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5779 if (!maybe_result->ToObject(&result)) return maybe_result;
5781 Context* context = reinterpret_cast<Context*>(result);
5782 context->set_map_no_write_barrier(catch_context_map());
5783 context->set_closure(function);
5784 context->set_previous(previous);
5785 context->set_extension(name);
5786 context->set_global_object(previous->global_object());
5787 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5792 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5794 JSObject* extension) {
5796 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5797 if (!maybe_result->ToObject(&result)) return maybe_result;
5799 Context* context = reinterpret_cast<Context*>(result);
5800 context->set_map_no_write_barrier(with_context_map());
5801 context->set_closure(function);
5802 context->set_previous(previous);
5803 context->set_extension(extension);
5804 context->set_global_object(previous->global_object());
5809 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5811 ScopeInfo* scope_info) {
5813 { MaybeObject* maybe_result =
5814 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5815 if (!maybe_result->ToObject(&result)) return maybe_result;
5817 Context* context = reinterpret_cast<Context*>(result);
5818 context->set_map_no_write_barrier(block_context_map());
5819 context->set_closure(function);
5820 context->set_previous(previous);
5821 context->set_extension(scope_info);
5822 context->set_global_object(previous->global_object());
5827 MaybeObject* Heap::AllocateScopeInfo(int length) {
5828 FixedArray* scope_info;
5829 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5830 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5831 scope_info->set_map_no_write_barrier(scope_info_map());
5836 MaybeObject* Heap::AllocateExternal(void* value) {
5838 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5839 if (!maybe_result->To(&foreign)) return maybe_result;
5842 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5843 if (!maybe_result->To(&external)) return maybe_result;
5845 external->SetInternalField(0, foreign);
5850 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5853 #define MAKE_CASE(NAME, Name, name) \
5854 case NAME##_TYPE: map = name##_map(); break;
5855 STRUCT_LIST(MAKE_CASE)
5859 return Failure::InternalError();
5861 int size = map->instance_size();
5862 AllocationSpace space =
5863 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5865 { MaybeObject* maybe_result = Allocate(map, space);
5866 if (!maybe_result->ToObject(&result)) return maybe_result;
5868 Struct::cast(result)->InitializeBody(size);
5873 bool Heap::IsHeapIterable() {
5874 return (!old_pointer_space()->was_swept_conservatively() &&
5875 !old_data_space()->was_swept_conservatively());
5879 void Heap::EnsureHeapIsIterable() {
5880 ASSERT(AllowHeapAllocation::IsAllowed());
5881 if (!IsHeapIterable()) {
5882 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5884 ASSERT(IsHeapIterable());
5888 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5889 incremental_marking()->Step(step_size,
5890 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5892 if (incremental_marking()->IsComplete()) {
5893 bool uncommit = false;
5894 if (gc_count_at_last_idle_gc_ == gc_count_) {
5895 // No GC since the last full GC, the mutator is probably not active.
5896 isolate_->compilation_cache()->Clear();
5899 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5900 mark_sweeps_since_idle_round_started_++;
5901 gc_count_at_last_idle_gc_ = gc_count_;
5903 new_space_.Shrink();
5904 UncommitFromSpace();
5910 bool Heap::IdleNotification(int hint) {
5911 // Hints greater than this value indicate that
5912 // the embedder is requesting a lot of GC work.
5913 const int kMaxHint = 1000;
5914 const int kMinHintForIncrementalMarking = 10;
5915 // Minimal hint that allows to do full GC.
5916 const int kMinHintForFullGC = 100;
5917 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5918 // The size factor is in range [5..250]. The numbers here are chosen from
5919 // experiments. If you changes them, make sure to test with
5920 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5921 intptr_t step_size =
5922 size_factor * IncrementalMarking::kAllocatedThreshold;
5924 if (contexts_disposed_ > 0) {
5925 if (hint >= kMaxHint) {
5926 // The embedder is requesting a lot of GC work after context disposal,
5927 // we age inline caches so that they don't keep objects from
5928 // the old context alive.
5931 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5932 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5933 incremental_marking()->IsStopped()) {
5934 HistogramTimerScope scope(isolate_->counters()->gc_context());
5935 CollectAllGarbage(kReduceMemoryFootprintMask,
5936 "idle notification: contexts disposed");
5938 AdvanceIdleIncrementalMarking(step_size);
5939 contexts_disposed_ = 0;
5941 // After context disposal there is likely a lot of garbage remaining, reset
5942 // the idle notification counters in order to trigger more incremental GCs
5943 // on subsequent idle notifications.
5948 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5949 return IdleGlobalGC();
5952 // By doing small chunks of GC work in each IdleNotification,
5953 // perform a round of incremental GCs and after that wait until
5954 // the mutator creates enough garbage to justify a new round.
5955 // An incremental GC progresses as follows:
5956 // 1. many incremental marking steps,
5957 // 2. one old space mark-sweep-compact,
5958 // 3. many lazy sweep steps.
5959 // Use mark-sweep-compact events to count incremental GCs in a round.
5961 if (incremental_marking()->IsStopped()) {
5962 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5963 !IsSweepingComplete() &&
5964 !AdvanceSweepers(static_cast<int>(step_size))) {
5969 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5970 if (EnoughGarbageSinceLastIdleRound()) {
5977 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5978 mark_sweeps_since_idle_round_started_;
5980 if (incremental_marking()->IsStopped()) {
5981 // If there are no more than two GCs left in this idle round and we are
5982 // allowed to do a full GC, then make those GCs full in order to compact
5984 // TODO(ulan): Once we enable code compaction for incremental marking,
5985 // we can get rid of this special case and always start incremental marking.
5986 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5987 CollectAllGarbage(kReduceMemoryFootprintMask,
5988 "idle notification: finalize idle round");
5989 mark_sweeps_since_idle_round_started_++;
5990 } else if (hint > kMinHintForIncrementalMarking) {
5991 incremental_marking()->Start();
5994 if (!incremental_marking()->IsStopped() &&
5995 hint > kMinHintForIncrementalMarking) {
5996 AdvanceIdleIncrementalMarking(step_size);
5999 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6008 bool Heap::IdleGlobalGC() {
6009 static const int kIdlesBeforeScavenge = 4;
6010 static const int kIdlesBeforeMarkSweep = 7;
6011 static const int kIdlesBeforeMarkCompact = 8;
6012 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6013 static const unsigned int kGCsBetweenCleanup = 4;
6015 if (!last_idle_notification_gc_count_init_) {
6016 last_idle_notification_gc_count_ = gc_count_;
6017 last_idle_notification_gc_count_init_ = true;
6020 bool uncommit = true;
6021 bool finished = false;
6023 // Reset the number of idle notifications received when a number of
6024 // GCs have taken place. This allows another round of cleanup based
6025 // on idle notifications if enough work has been carried out to
6026 // provoke a number of garbage collections.
6027 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6028 number_idle_notifications_ =
6029 Min(number_idle_notifications_ + 1, kMaxIdleCount);
6031 number_idle_notifications_ = 0;
6032 last_idle_notification_gc_count_ = gc_count_;
6035 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6036 CollectGarbage(NEW_SPACE, "idle notification");
6037 new_space_.Shrink();
6038 last_idle_notification_gc_count_ = gc_count_;
6039 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6040 // Before doing the mark-sweep collections we clear the
6041 // compilation cache to avoid hanging on to source code and
6042 // generated code for cached functions.
6043 isolate_->compilation_cache()->Clear();
6045 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6046 new_space_.Shrink();
6047 last_idle_notification_gc_count_ = gc_count_;
6049 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6050 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6051 new_space_.Shrink();
6052 last_idle_notification_gc_count_ = gc_count_;
6053 number_idle_notifications_ = 0;
6055 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6056 // If we have received more than kIdlesBeforeMarkCompact idle
6057 // notifications we do not perform any cleanup because we don't
6058 // expect to gain much by doing so.
6062 if (uncommit) UncommitFromSpace();
6070 void Heap::Print() {
6071 if (!HasBeenSetUp()) return;
6072 isolate()->PrintStack(stdout);
6073 AllSpaces spaces(this);
6074 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6080 void Heap::ReportCodeStatistics(const char* title) {
6081 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6082 PagedSpace::ResetCodeStatistics();
6083 // We do not look for code in new space, map space, or old space. If code
6084 // somehow ends up in those spaces, we would miss it here.
6085 code_space_->CollectCodeStatistics();
6086 lo_space_->CollectCodeStatistics();
6087 PagedSpace::ReportCodeStatistics();
6091 // This function expects that NewSpace's allocated objects histogram is
6092 // populated (via a call to CollectStatistics or else as a side effect of a
6093 // just-completed scavenge collection).
6094 void Heap::ReportHeapStatistics(const char* title) {
6096 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6098 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6099 old_generation_allocation_limit_);
6102 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6103 isolate_->global_handles()->PrintStats();
6106 PrintF("Heap statistics : ");
6107 isolate_->memory_allocator()->ReportStatistics();
6108 PrintF("To space : ");
6109 new_space_.ReportStatistics();
6110 PrintF("Old pointer space : ");
6111 old_pointer_space_->ReportStatistics();
6112 PrintF("Old data space : ");
6113 old_data_space_->ReportStatistics();
6114 PrintF("Code space : ");
6115 code_space_->ReportStatistics();
6116 PrintF("Map space : ");
6117 map_space_->ReportStatistics();
6118 PrintF("Cell space : ");
6119 cell_space_->ReportStatistics();
6120 PrintF("PropertyCell space : ");
6121 property_cell_space_->ReportStatistics();
6122 PrintF("Large object space : ");
6123 lo_space_->ReportStatistics();
6124 PrintF(">>>>>> ========================================= >>>>>>\n");
6129 bool Heap::Contains(HeapObject* value) {
6130 return Contains(value->address());
6134 bool Heap::Contains(Address addr) {
6135 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6136 return HasBeenSetUp() &&
6137 (new_space_.ToSpaceContains(addr) ||
6138 old_pointer_space_->Contains(addr) ||
6139 old_data_space_->Contains(addr) ||
6140 code_space_->Contains(addr) ||
6141 map_space_->Contains(addr) ||
6142 cell_space_->Contains(addr) ||
6143 property_cell_space_->Contains(addr) ||
6144 lo_space_->SlowContains(addr));
6148 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6149 return InSpace(value->address(), space);
6153 bool Heap::InSpace(Address addr, AllocationSpace space) {
6154 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6155 if (!HasBeenSetUp()) return false;
6159 return new_space_.ToSpaceContains(addr);
6160 case OLD_POINTER_SPACE:
6161 return old_pointer_space_->Contains(addr);
6162 case OLD_DATA_SPACE:
6163 return old_data_space_->Contains(addr);
6165 return code_space_->Contains(addr);
6167 return map_space_->Contains(addr);
6169 return cell_space_->Contains(addr);
6170 case PROPERTY_CELL_SPACE:
6171 return property_cell_space_->Contains(addr);
6173 return lo_space_->SlowContains(addr);
6181 void Heap::Verify() {
6182 CHECK(HasBeenSetUp());
6184 store_buffer()->Verify();
6186 VerifyPointersVisitor visitor;
6187 IterateRoots(&visitor, VISIT_ONLY_STRONG);
6189 new_space_.Verify();
6191 old_pointer_space_->Verify(&visitor);
6192 map_space_->Verify(&visitor);
6194 VerifyPointersVisitor no_dirty_regions_visitor;
6195 old_data_space_->Verify(&no_dirty_regions_visitor);
6196 code_space_->Verify(&no_dirty_regions_visitor);
6197 cell_space_->Verify(&no_dirty_regions_visitor);
6198 property_cell_space_->Verify(&no_dirty_regions_visitor);
6200 lo_space_->Verify();
6205 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6206 Object* result = NULL;
6208 { MaybeObject* maybe_new_table =
6209 string_table()->LookupUtf8String(string, &result);
6210 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6212 // Can't use set_string_table because StringTable::cast knows that
6213 // StringTable is a singleton and checks for identity.
6214 roots_[kStringTableRootIndex] = new_table;
6215 ASSERT(result != NULL);
6220 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6221 Object* result = NULL;
6223 { MaybeObject* maybe_new_table =
6224 string_table()->LookupOneByteString(string, &result);
6225 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6227 // Can't use set_string_table because StringTable::cast knows that
6228 // StringTable is a singleton and checks for identity.
6229 roots_[kStringTableRootIndex] = new_table;
6230 ASSERT(result != NULL);
6235 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6238 Object* result = NULL;
6240 { MaybeObject* maybe_new_table =
6241 string_table()->LookupSubStringOneByteString(string,
6245 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6247 // Can't use set_string_table because StringTable::cast knows that
6248 // StringTable is a singleton and checks for identity.
6249 roots_[kStringTableRootIndex] = new_table;
6250 ASSERT(result != NULL);
6255 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6256 Object* result = NULL;
6258 { MaybeObject* maybe_new_table =
6259 string_table()->LookupTwoByteString(string, &result);
6260 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6262 // Can't use set_string_table because StringTable::cast knows that
6263 // StringTable is a singleton and checks for identity.
6264 roots_[kStringTableRootIndex] = new_table;
6265 ASSERT(result != NULL);
6270 MaybeObject* Heap::InternalizeString(String* string) {
6271 if (string->IsInternalizedString()) return string;
6272 Object* result = NULL;
6274 { MaybeObject* maybe_new_table =
6275 string_table()->LookupString(string, &result);
6276 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6278 // Can't use set_string_table because StringTable::cast knows that
6279 // StringTable is a singleton and checks for identity.
6280 roots_[kStringTableRootIndex] = new_table;
6281 ASSERT(result != NULL);
6286 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6287 if (string->IsInternalizedString()) {
6291 return string_table()->LookupStringIfExists(string, result);
6295 void Heap::ZapFromSpace() {
6296 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6297 new_space_.FromSpaceEnd());
6298 while (it.has_next()) {
6299 NewSpacePage* page = it.next();
6300 for (Address cursor = page->area_start(), limit = page->area_end();
6302 cursor += kPointerSize) {
6303 Memory::Address_at(cursor) = kFromSpaceZapValue;
6309 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6311 ObjectSlotCallback callback) {
6312 Address slot_address = start;
6314 // We are not collecting slots on new space objects during mutation
6315 // thus we have to scan for pointers to evacuation candidates when we
6316 // promote objects. But we should not record any slots in non-black
6317 // objects. Grey object's slots would be rescanned.
6318 // White object might not survive until the end of collection
6319 // it would be a violation of the invariant to record it's slots.
6320 bool record_slots = false;
6321 if (incremental_marking()->IsCompacting()) {
6322 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6323 record_slots = Marking::IsBlack(mark_bit);
6326 while (slot_address < end) {
6327 Object** slot = reinterpret_cast<Object**>(slot_address);
6328 Object* object = *slot;
6329 // If the store buffer becomes overfull we mark pages as being exempt from
6330 // the store buffer. These pages are scanned to find pointers that point
6331 // to the new space. In that case we may hit newly promoted objects and
6332 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6333 if (object->IsHeapObject()) {
6334 if (Heap::InFromSpace(object)) {
6335 callback(reinterpret_cast<HeapObject**>(slot),
6336 HeapObject::cast(object));
6337 Object* new_object = *slot;
6338 if (InNewSpace(new_object)) {
6339 SLOW_ASSERT(Heap::InToSpace(new_object));
6340 SLOW_ASSERT(new_object->IsHeapObject());
6341 store_buffer_.EnterDirectlyIntoStoreBuffer(
6342 reinterpret_cast<Address>(slot));
6344 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6345 } else if (record_slots &&
6346 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6347 mark_compact_collector()->RecordSlot(slot, slot, object);
6350 slot_address += kPointerSize;
6356 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6359 bool IsAMapPointerAddress(Object** addr) {
6360 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6361 int mod = a % Map::kSize;
6362 return mod >= Map::kPointerFieldsBeginOffset &&
6363 mod < Map::kPointerFieldsEndOffset;
6367 bool EverythingsAPointer(Object** addr) {
6372 static void CheckStoreBuffer(Heap* heap,
6375 Object**** store_buffer_position,
6376 Object*** store_buffer_top,
6377 CheckStoreBufferFilter filter,
6378 Address special_garbage_start,
6379 Address special_garbage_end) {
6380 Map* free_space_map = heap->free_space_map();
6381 for ( ; current < limit; current++) {
6382 Object* o = *current;
6383 Address current_address = reinterpret_cast<Address>(current);
6385 if (o == free_space_map) {
6386 Address current_address = reinterpret_cast<Address>(current);
6387 FreeSpace* free_space =
6388 FreeSpace::cast(HeapObject::FromAddress(current_address));
6389 int skip = free_space->Size();
6390 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6392 current_address += skip - kPointerSize;
6393 current = reinterpret_cast<Object**>(current_address);
6396 // Skip the current linear allocation space between top and limit which is
6397 // unmarked with the free space map, but can contain junk.
6398 if (current_address == special_garbage_start &&
6399 special_garbage_end != special_garbage_start) {
6400 current_address = special_garbage_end - kPointerSize;
6401 current = reinterpret_cast<Object**>(current_address);
6404 if (!(*filter)(current)) continue;
6405 ASSERT(current_address < special_garbage_start ||
6406 current_address >= special_garbage_end);
6407 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6408 // We have to check that the pointer does not point into new space
6409 // without trying to cast it to a heap object since the hash field of
6410 // a string can contain values like 1 and 3 which are tagged null
6412 if (!heap->InNewSpace(o)) continue;
6413 while (**store_buffer_position < current &&
6414 *store_buffer_position < store_buffer_top) {
6415 (*store_buffer_position)++;
6417 if (**store_buffer_position != current ||
6418 *store_buffer_position == store_buffer_top) {
6419 Object** obj_start = current;
6420 while (!(*obj_start)->IsMap()) obj_start--;
6427 // Check that the store buffer contains all intergenerational pointers by
6428 // scanning a page and ensuring that all pointers to young space are in the
6430 void Heap::OldPointerSpaceCheckStoreBuffer() {
6431 OldSpace* space = old_pointer_space();
6432 PageIterator pages(space);
6434 store_buffer()->SortUniq();
6436 while (pages.has_next()) {
6437 Page* page = pages.next();
6438 Object** current = reinterpret_cast<Object**>(page->area_start());
6440 Address end = page->area_end();
6442 Object*** store_buffer_position = store_buffer()->Start();
6443 Object*** store_buffer_top = store_buffer()->Top();
6445 Object** limit = reinterpret_cast<Object**>(end);
6446 CheckStoreBuffer(this,
6449 &store_buffer_position,
6451 &EverythingsAPointer,
6458 void Heap::MapSpaceCheckStoreBuffer() {
6459 MapSpace* space = map_space();
6460 PageIterator pages(space);
6462 store_buffer()->SortUniq();
6464 while (pages.has_next()) {
6465 Page* page = pages.next();
6466 Object** current = reinterpret_cast<Object**>(page->area_start());
6468 Address end = page->area_end();
6470 Object*** store_buffer_position = store_buffer()->Start();
6471 Object*** store_buffer_top = store_buffer()->Top();
6473 Object** limit = reinterpret_cast<Object**>(end);
6474 CheckStoreBuffer(this,
6477 &store_buffer_position,
6479 &IsAMapPointerAddress,
6486 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6487 LargeObjectIterator it(lo_space());
6488 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6489 // We only have code, sequential strings, or fixed arrays in large
6490 // object space, and only fixed arrays can possibly contain pointers to
6491 // the young generation.
6492 if (object->IsFixedArray()) {
6493 Object*** store_buffer_position = store_buffer()->Start();
6494 Object*** store_buffer_top = store_buffer()->Top();
6495 Object** current = reinterpret_cast<Object**>(object->address());
6497 reinterpret_cast<Object**>(object->address() + object->Size());
6498 CheckStoreBuffer(this,
6501 &store_buffer_position,
6503 &EverythingsAPointer,
6512 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6513 IterateStrongRoots(v, mode);
6514 IterateWeakRoots(v, mode);
6518 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6519 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6520 v->Synchronize(VisitorSynchronization::kStringTable);
6521 if (mode != VISIT_ALL_IN_SCAVENGE &&
6522 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6523 // Scavenge collections have special processing for this.
6524 external_string_table_.Iterate(v);
6525 error_object_list_.Iterate(v);
6527 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6531 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6532 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6533 v->Synchronize(VisitorSynchronization::kStrongRootList);
6535 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6536 v->Synchronize(VisitorSynchronization::kInternalizedString);
6538 isolate_->bootstrapper()->Iterate(v);
6539 v->Synchronize(VisitorSynchronization::kBootstrapper);
6540 isolate_->Iterate(v);
6541 v->Synchronize(VisitorSynchronization::kTop);
6542 Relocatable::Iterate(v);
6543 v->Synchronize(VisitorSynchronization::kRelocatable);
6545 #ifdef ENABLE_DEBUGGER_SUPPORT
6546 isolate_->debug()->Iterate(v);
6547 if (isolate_->deoptimizer_data() != NULL) {
6548 isolate_->deoptimizer_data()->Iterate(v);
6551 v->Synchronize(VisitorSynchronization::kDebug);
6552 isolate_->compilation_cache()->Iterate(v);
6553 v->Synchronize(VisitorSynchronization::kCompilationCache);
6555 // Iterate over local handles in handle scopes.
6556 isolate_->handle_scope_implementer()->Iterate(v);
6557 isolate_->IterateDeferredHandles(v);
6558 v->Synchronize(VisitorSynchronization::kHandleScope);
6560 // Iterate over the builtin code objects and code stubs in the
6561 // heap. Note that it is not necessary to iterate over code objects
6562 // on scavenge collections.
6563 if (mode != VISIT_ALL_IN_SCAVENGE) {
6564 isolate_->builtins()->IterateBuiltins(v);
6566 v->Synchronize(VisitorSynchronization::kBuiltins);
6568 // Iterate over global handles.
6570 case VISIT_ONLY_STRONG:
6571 isolate_->global_handles()->IterateStrongRoots(v);
6573 case VISIT_ALL_IN_SCAVENGE:
6574 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6576 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6578 isolate_->global_handles()->IterateAllRoots(v);
6581 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6583 // Iterate over pointers being held by inactive threads.
6584 isolate_->thread_manager()->Iterate(v);
6585 v->Synchronize(VisitorSynchronization::kThreadManager);
6587 // Iterate over the pointers the Serialization/Deserialization code is
6589 // During garbage collection this keeps the partial snapshot cache alive.
6590 // During deserialization of the startup snapshot this creates the partial
6591 // snapshot cache and deserializes the objects it refers to. During
6592 // serialization this does nothing, since the partial snapshot cache is
6593 // empty. However the next thing we do is create the partial snapshot,
6594 // filling up the partial snapshot cache with objects it needs as we go.
6595 SerializerDeserializer::Iterate(v);
6596 // We don't do a v->Synchronize call here, because in debug mode that will
6597 // output a flag to the snapshot. However at this point the serializer and
6598 // deserializer are deliberately a little unsynchronized (see above) so the
6599 // checking of the sync flag in the snapshot would fail.
6603 // TODO(1236194): Since the heap size is configurable on the command line
6604 // and through the API, we should gracefully handle the case that the heap
6605 // size is not big enough to fit all the initial objects.
6606 bool Heap::ConfigureHeap(int max_semispace_size,
6607 intptr_t max_old_gen_size,
6608 intptr_t max_executable_size) {
6609 if (HasBeenSetUp()) return false;
6611 if (FLAG_stress_compaction) {
6612 // This will cause more frequent GCs when stressing.
6613 max_semispace_size_ = Page::kPageSize;
6616 if (max_semispace_size > 0) {
6617 if (max_semispace_size < Page::kPageSize) {
6618 max_semispace_size = Page::kPageSize;
6619 if (FLAG_trace_gc) {
6620 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6621 Page::kPageSize >> 10);
6624 max_semispace_size_ = max_semispace_size;
6627 if (Snapshot::IsEnabled()) {
6628 // If we are using a snapshot we always reserve the default amount
6629 // of memory for each semispace because code in the snapshot has
6630 // write-barrier code that relies on the size and alignment of new
6631 // space. We therefore cannot use a larger max semispace size
6632 // than the default reserved semispace size.
6633 if (max_semispace_size_ > reserved_semispace_size_) {
6634 max_semispace_size_ = reserved_semispace_size_;
6635 if (FLAG_trace_gc) {
6636 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6637 reserved_semispace_size_ >> 10);
6641 // If we are not using snapshots we reserve space for the actual
6642 // max semispace size.
6643 reserved_semispace_size_ = max_semispace_size_;
6646 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6647 if (max_executable_size > 0) {
6648 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6651 // The max executable size must be less than or equal to the max old
6653 if (max_executable_size_ > max_old_generation_size_) {
6654 max_executable_size_ = max_old_generation_size_;
6657 // The new space size must be a power of two to support single-bit testing
6659 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6660 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6661 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6663 // The external allocation limit should be below 256 MB on all architectures
6664 // to avoid unnecessary low memory notifications, as that is the threshold
6665 // for some embedders.
6666 external_allocation_limit_ = 12 * max_semispace_size_;
6667 ASSERT(external_allocation_limit_ <= 256 * MB);
6669 // The old generation is paged and needs at least one page for each space.
6670 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6671 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6673 RoundUp(max_old_generation_size_,
6681 bool Heap::ConfigureHeapDefault() {
6682 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6683 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6684 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6688 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6689 *stats->start_marker = HeapStats::kStartMarker;
6690 *stats->end_marker = HeapStats::kEndMarker;
6691 *stats->new_space_size = new_space_.SizeAsInt();
6692 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6693 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6694 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6695 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6696 *stats->old_data_space_capacity = old_data_space_->Capacity();
6697 *stats->code_space_size = code_space_->SizeOfObjects();
6698 *stats->code_space_capacity = code_space_->Capacity();
6699 *stats->map_space_size = map_space_->SizeOfObjects();
6700 *stats->map_space_capacity = map_space_->Capacity();
6701 *stats->cell_space_size = cell_space_->SizeOfObjects();
6702 *stats->cell_space_capacity = cell_space_->Capacity();
6703 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6704 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6705 *stats->lo_space_size = lo_space_->Size();
6706 isolate_->global_handles()->RecordStats(stats);
6707 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6708 *stats->memory_allocator_capacity =
6709 isolate()->memory_allocator()->Size() +
6710 isolate()->memory_allocator()->Available();
6711 *stats->os_error = OS::GetLastError();
6712 isolate()->memory_allocator()->Available();
6713 if (take_snapshot) {
6714 HeapIterator iterator(this);
6715 for (HeapObject* obj = iterator.next();
6717 obj = iterator.next()) {
6718 InstanceType type = obj->map()->instance_type();
6719 ASSERT(0 <= type && type <= LAST_TYPE);
6720 stats->objects_per_type[type]++;
6721 stats->size_per_type[type] += obj->Size();
6727 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6728 return old_pointer_space_->SizeOfObjects()
6729 + old_data_space_->SizeOfObjects()
6730 + code_space_->SizeOfObjects()
6731 + map_space_->SizeOfObjects()
6732 + cell_space_->SizeOfObjects()
6733 + property_cell_space_->SizeOfObjects()
6734 + lo_space_->SizeOfObjects();
6738 intptr_t Heap::PromotedExternalMemorySize() {
6739 if (amount_of_external_allocated_memory_
6740 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6741 return amount_of_external_allocated_memory_
6742 - amount_of_external_allocated_memory_at_last_global_gc_;
6746 V8_DECLARE_ONCE(initialize_gc_once);
6748 static void InitializeGCOnce() {
6749 InitializeScavengingVisitorsTables();
6750 NewSpaceScavenger::Initialize();
6751 MarkCompactCollector::Initialize();
6754 bool Heap::SetUp() {
6756 allocation_timeout_ = FLAG_gc_interval;
6759 // Initialize heap spaces and initial maps and objects. Whenever something
6760 // goes wrong, just return false. The caller should check the results and
6761 // call Heap::TearDown() to release allocated memory.
6763 // If the heap is not yet configured (e.g. through the API), configure it.
6764 // Configuration is based on the flags new-space-size (really the semispace
6765 // size) and old-space-size if set or the initial values of semispace_size_
6766 // and old_generation_size_ otherwise.
6768 if (!ConfigureHeapDefault()) return false;
6771 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6773 MarkMapPointersAsEncoded(false);
6775 // Set up memory allocator.
6776 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6779 // Set up new space.
6780 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6784 // Initialize old pointer space.
6785 old_pointer_space_ =
6787 max_old_generation_size_,
6790 if (old_pointer_space_ == NULL) return false;
6791 if (!old_pointer_space_->SetUp()) return false;
6793 // Initialize old data space.
6796 max_old_generation_size_,
6799 if (old_data_space_ == NULL) return false;
6800 if (!old_data_space_->SetUp()) return false;
6802 // Initialize the code space, set its maximum capacity to the old
6803 // generation size. It needs executable memory.
6804 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6805 // virtual address space, so that they can call each other with near calls.
6806 if (code_range_size_ > 0) {
6807 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6813 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6814 if (code_space_ == NULL) return false;
6815 if (!code_space_->SetUp()) return false;
6817 // Initialize map space.
6818 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6819 if (map_space_ == NULL) return false;
6820 if (!map_space_->SetUp()) return false;
6822 // Initialize simple cell space.
6823 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6824 if (cell_space_ == NULL) return false;
6825 if (!cell_space_->SetUp()) return false;
6827 // Initialize global property cell space.
6828 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6829 PROPERTY_CELL_SPACE);
6830 if (property_cell_space_ == NULL) return false;
6831 if (!property_cell_space_->SetUp()) return false;
6833 // The large object code space may contain code or data. We set the memory
6834 // to be non-executable here for safety, but this means we need to enable it
6835 // explicitly when allocating large code objects.
6836 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6837 if (lo_space_ == NULL) return false;
6838 if (!lo_space_->SetUp()) return false;
6840 // Set up the seed that is used to randomize the string hash function.
6841 ASSERT(hash_seed() == 0);
6842 if (FLAG_randomize_hashes) {
6843 if (FLAG_hash_seed == 0) {
6845 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6847 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6851 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6852 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6854 store_buffer()->SetUp();
6856 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6858 relocation_mutex_locked_by_optimizer_thread_ = false;
6864 bool Heap::CreateHeapObjects() {
6865 // Create initial maps.
6866 if (!CreateInitialMaps()) return false;
6867 if (!CreateApiObjects()) return false;
6869 // Create initial objects
6870 if (!CreateInitialObjects()) return false;
6872 native_contexts_list_ = undefined_value();
6873 array_buffers_list_ = undefined_value();
6878 void Heap::SetStackLimits() {
6879 ASSERT(isolate_ != NULL);
6880 ASSERT(isolate_ == isolate());
6881 // On 64 bit machines, pointers are generally out of range of Smis. We write
6882 // something that looks like an out of range Smi to the GC.
6884 // Set up the special root array entries containing the stack limits.
6885 // These are actually addresses, but the tag makes the GC ignore it.
6886 roots_[kStackLimitRootIndex] =
6887 reinterpret_cast<Object*>(
6888 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6889 roots_[kRealStackLimitRootIndex] =
6890 reinterpret_cast<Object*>(
6891 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6895 void Heap::TearDown() {
6897 if (FLAG_verify_heap) {
6902 if (FLAG_print_cumulative_gc_stat) {
6904 PrintF("gc_count=%d ", gc_count_);
6905 PrintF("mark_sweep_count=%d ", ms_count_);
6906 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6907 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6908 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6909 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6910 get_max_alive_after_gc());
6911 PrintF("total_marking_time=%.1f ", marking_time());
6912 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6916 TearDownArrayBuffers();
6918 isolate_->global_handles()->TearDown();
6920 external_string_table_.TearDown();
6922 error_object_list_.TearDown();
6924 new_space_.TearDown();
6926 if (old_pointer_space_ != NULL) {
6927 old_pointer_space_->TearDown();
6928 delete old_pointer_space_;
6929 old_pointer_space_ = NULL;
6932 if (old_data_space_ != NULL) {
6933 old_data_space_->TearDown();
6934 delete old_data_space_;
6935 old_data_space_ = NULL;
6938 if (code_space_ != NULL) {
6939 code_space_->TearDown();
6944 if (map_space_ != NULL) {
6945 map_space_->TearDown();
6950 if (cell_space_ != NULL) {
6951 cell_space_->TearDown();
6956 if (property_cell_space_ != NULL) {
6957 property_cell_space_->TearDown();
6958 delete property_cell_space_;
6959 property_cell_space_ = NULL;
6962 if (lo_space_ != NULL) {
6963 lo_space_->TearDown();
6968 store_buffer()->TearDown();
6969 incremental_marking()->TearDown();
6971 isolate_->memory_allocator()->TearDown();
6973 delete relocation_mutex_;
6977 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6978 ASSERT(callback != NULL);
6979 GCPrologueCallbackPair pair(callback, gc_type);
6980 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6981 return gc_prologue_callbacks_.Add(pair);
6985 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6986 ASSERT(callback != NULL);
6987 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6988 if (gc_prologue_callbacks_[i].callback == callback) {
6989 gc_prologue_callbacks_.Remove(i);
6997 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6998 ASSERT(callback != NULL);
6999 GCEpilogueCallbackPair pair(callback, gc_type);
7000 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7001 return gc_epilogue_callbacks_.Add(pair);
7005 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7006 ASSERT(callback != NULL);
7007 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7008 if (gc_epilogue_callbacks_[i].callback == callback) {
7009 gc_epilogue_callbacks_.Remove(i);
7019 class PrintHandleVisitor: public ObjectVisitor {
7021 void VisitPointers(Object** start, Object** end) {
7022 for (Object** p = start; p < end; p++)
7023 PrintF(" handle %p to %p\n",
7024 reinterpret_cast<void*>(p),
7025 reinterpret_cast<void*>(*p));
7029 void Heap::PrintHandles() {
7030 PrintF("Handles:\n");
7031 PrintHandleVisitor v;
7032 isolate_->handle_scope_implementer()->Iterate(&v);
7038 Space* AllSpaces::next() {
7039 switch (counter_++) {
7041 return heap_->new_space();
7042 case OLD_POINTER_SPACE:
7043 return heap_->old_pointer_space();
7044 case OLD_DATA_SPACE:
7045 return heap_->old_data_space();
7047 return heap_->code_space();
7049 return heap_->map_space();
7051 return heap_->cell_space();
7052 case PROPERTY_CELL_SPACE:
7053 return heap_->property_cell_space();
7055 return heap_->lo_space();
7062 PagedSpace* PagedSpaces::next() {
7063 switch (counter_++) {
7064 case OLD_POINTER_SPACE:
7065 return heap_->old_pointer_space();
7066 case OLD_DATA_SPACE:
7067 return heap_->old_data_space();
7069 return heap_->code_space();
7071 return heap_->map_space();
7073 return heap_->cell_space();
7074 case PROPERTY_CELL_SPACE:
7075 return heap_->property_cell_space();
7083 OldSpace* OldSpaces::next() {
7084 switch (counter_++) {
7085 case OLD_POINTER_SPACE:
7086 return heap_->old_pointer_space();
7087 case OLD_DATA_SPACE:
7088 return heap_->old_data_space();
7090 return heap_->code_space();
7097 SpaceIterator::SpaceIterator(Heap* heap)
7099 current_space_(FIRST_SPACE),
7105 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7107 current_space_(FIRST_SPACE),
7109 size_func_(size_func) {
7113 SpaceIterator::~SpaceIterator() {
7114 // Delete active iterator if any.
7119 bool SpaceIterator::has_next() {
7120 // Iterate until no more spaces.
7121 return current_space_ != LAST_SPACE;
7125 ObjectIterator* SpaceIterator::next() {
7126 if (iterator_ != NULL) {
7129 // Move to the next space
7131 if (current_space_ > LAST_SPACE) {
7136 // Return iterator for the new current space.
7137 return CreateIterator();
7141 // Create an iterator for the space to iterate.
7142 ObjectIterator* SpaceIterator::CreateIterator() {
7143 ASSERT(iterator_ == NULL);
7145 switch (current_space_) {
7147 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7149 case OLD_POINTER_SPACE:
7151 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7153 case OLD_DATA_SPACE:
7154 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7157 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7160 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7163 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7165 case PROPERTY_CELL_SPACE:
7166 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7170 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7174 // Return the newly allocated iterator;
7175 ASSERT(iterator_ != NULL);
7180 class HeapObjectsFilter {
7182 virtual ~HeapObjectsFilter() {}
7183 virtual bool SkipObject(HeapObject* object) = 0;
7187 class UnreachableObjectsFilter : public HeapObjectsFilter {
7189 UnreachableObjectsFilter() {
7190 MarkReachableObjects();
7193 ~UnreachableObjectsFilter() {
7194 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7197 bool SkipObject(HeapObject* object) {
7198 MarkBit mark_bit = Marking::MarkBitFrom(object);
7199 return !mark_bit.Get();
7203 class MarkingVisitor : public ObjectVisitor {
7205 MarkingVisitor() : marking_stack_(10) {}
7207 void VisitPointers(Object** start, Object** end) {
7208 for (Object** p = start; p < end; p++) {
7209 if (!(*p)->IsHeapObject()) continue;
7210 HeapObject* obj = HeapObject::cast(*p);
7211 MarkBit mark_bit = Marking::MarkBitFrom(obj);
7212 if (!mark_bit.Get()) {
7214 marking_stack_.Add(obj);
7219 void TransitiveClosure() {
7220 while (!marking_stack_.is_empty()) {
7221 HeapObject* obj = marking_stack_.RemoveLast();
7227 List<HeapObject*> marking_stack_;
7230 void MarkReachableObjects() {
7231 Heap* heap = Isolate::Current()->heap();
7232 MarkingVisitor visitor;
7233 heap->IterateRoots(&visitor, VISIT_ALL);
7234 visitor.TransitiveClosure();
7237 DisallowHeapAllocation no_allocation_;
7241 HeapIterator::HeapIterator(Heap* heap)
7243 filtering_(HeapIterator::kNoFiltering),
7249 HeapIterator::HeapIterator(Heap* heap,
7250 HeapIterator::HeapObjectsFiltering filtering)
7252 filtering_(filtering),
7258 HeapIterator::~HeapIterator() {
7263 void HeapIterator::Init() {
7264 // Start the iteration.
7265 space_iterator_ = new SpaceIterator(heap_);
7266 switch (filtering_) {
7267 case kFilterUnreachable:
7268 filter_ = new UnreachableObjectsFilter;
7273 object_iterator_ = space_iterator_->next();
7277 void HeapIterator::Shutdown() {
7279 // Assert that in filtering mode we have iterated through all
7280 // objects. Otherwise, heap will be left in an inconsistent state.
7281 if (filtering_ != kNoFiltering) {
7282 ASSERT(object_iterator_ == NULL);
7285 // Make sure the last iterator is deallocated.
7286 delete space_iterator_;
7287 space_iterator_ = NULL;
7288 object_iterator_ = NULL;
7294 HeapObject* HeapIterator::next() {
7295 if (filter_ == NULL) return NextObject();
7297 HeapObject* obj = NextObject();
7298 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7303 HeapObject* HeapIterator::NextObject() {
7304 // No iterator means we are done.
7305 if (object_iterator_ == NULL) return NULL;
7307 if (HeapObject* obj = object_iterator_->next_object()) {
7308 // If the current iterator has more objects we are fine.
7311 // Go though the spaces looking for one that has objects.
7312 while (space_iterator_->has_next()) {
7313 object_iterator_ = space_iterator_->next();
7314 if (HeapObject* obj = object_iterator_->next_object()) {
7319 // Done with the last space.
7320 object_iterator_ = NULL;
7325 void HeapIterator::reset() {
7326 // Restart the iterator.
7334 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7336 class PathTracer::MarkVisitor: public ObjectVisitor {
7338 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7339 void VisitPointers(Object** start, Object** end) {
7340 // Scan all HeapObject pointers in [start, end)
7341 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7342 if ((*p)->IsHeapObject())
7343 tracer_->MarkRecursively(p, this);
7348 PathTracer* tracer_;
7352 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7354 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7355 void VisitPointers(Object** start, Object** end) {
7356 // Scan all HeapObject pointers in [start, end)
7357 for (Object** p = start; p < end; p++) {
7358 if ((*p)->IsHeapObject())
7359 tracer_->UnmarkRecursively(p, this);
7364 PathTracer* tracer_;
7368 void PathTracer::VisitPointers(Object** start, Object** end) {
7369 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7370 // Visit all HeapObject pointers in [start, end)
7371 for (Object** p = start; !done && (p < end); p++) {
7372 if ((*p)->IsHeapObject()) {
7374 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7380 void PathTracer::Reset() {
7381 found_target_ = false;
7382 object_stack_.Clear();
7386 void PathTracer::TracePathFrom(Object** root) {
7387 ASSERT((search_target_ == kAnyGlobalObject) ||
7388 search_target_->IsHeapObject());
7389 found_target_in_trace_ = false;
7392 MarkVisitor mark_visitor(this);
7393 MarkRecursively(root, &mark_visitor);
7395 UnmarkVisitor unmark_visitor(this);
7396 UnmarkRecursively(root, &unmark_visitor);
7402 static bool SafeIsNativeContext(HeapObject* obj) {
7403 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7407 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7408 if (!(*p)->IsHeapObject()) return;
7410 HeapObject* obj = HeapObject::cast(*p);
7412 Object* map = obj->map();
7414 if (!map->IsHeapObject()) return; // visited before
7416 if (found_target_in_trace_) return; // stop if target found
7417 object_stack_.Add(obj);
7418 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7419 (obj == search_target_)) {
7420 found_target_in_trace_ = true;
7421 found_target_ = true;
7425 bool is_native_context = SafeIsNativeContext(obj);
7428 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7430 Address map_addr = map_p->address();
7432 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7434 // Scan the object body.
7435 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7436 // This is specialized to scan Context's properly.
7437 Object** start = reinterpret_cast<Object**>(obj->address() +
7438 Context::kHeaderSize);
7439 Object** end = reinterpret_cast<Object**>(obj->address() +
7440 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7441 mark_visitor->VisitPointers(start, end);
7443 obj->IterateBody(map_p->instance_type(),
7444 obj->SizeFromMap(map_p),
7448 // Scan the map after the body because the body is a lot more interesting
7449 // when doing leak detection.
7450 MarkRecursively(&map, mark_visitor);
7452 if (!found_target_in_trace_) // don't pop if found the target
7453 object_stack_.RemoveLast();
7457 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7458 if (!(*p)->IsHeapObject()) return;
7460 HeapObject* obj = HeapObject::cast(*p);
7462 Object* map = obj->map();
7464 if (map->IsHeapObject()) return; // unmarked already
7466 Address map_addr = reinterpret_cast<Address>(map);
7468 map_addr -= kMarkTag;
7470 ASSERT_TAG_ALIGNED(map_addr);
7472 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7474 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7476 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7478 obj->IterateBody(Map::cast(map_p)->instance_type(),
7479 obj->SizeFromMap(Map::cast(map_p)),
7484 void PathTracer::ProcessResults() {
7485 if (found_target_) {
7486 PrintF("=====================================\n");
7487 PrintF("==== Path to object ====\n");
7488 PrintF("=====================================\n\n");
7490 ASSERT(!object_stack_.is_empty());
7491 for (int i = 0; i < object_stack_.length(); i++) {
7492 if (i > 0) PrintF("\n |\n |\n V\n\n");
7493 Object* obj = object_stack_[i];
7496 PrintF("=====================================\n");
7501 // Triggers a depth-first traversal of reachable objects from one
7502 // given root object and finds a path to a specific heap object and
7504 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7505 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7506 tracer.VisitPointer(&root);
7510 // Triggers a depth-first traversal of reachable objects from roots
7511 // and finds a path to a specific heap object and prints it.
7512 void Heap::TracePathToObject(Object* target) {
7513 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7514 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7518 // Triggers a depth-first traversal of reachable objects from roots
7519 // and finds a path to any global object and prints it. Useful for
7520 // determining the source for leaks of global objects.
7521 void Heap::TracePathToGlobal() {
7522 PathTracer tracer(PathTracer::kAnyGlobalObject,
7523 PathTracer::FIND_ALL,
7525 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7530 static intptr_t CountTotalHolesSize(Heap* heap) {
7531 intptr_t holes_size = 0;
7532 OldSpaces spaces(heap);
7533 for (OldSpace* space = spaces.next();
7535 space = spaces.next()) {
7536 holes_size += space->Waste() + space->Available();
7542 GCTracer::GCTracer(Heap* heap,
7543 const char* gc_reason,
7544 const char* collector_reason)
7546 start_object_size_(0),
7547 start_memory_size_(0),
7550 allocated_since_last_gc_(0),
7551 spent_in_mutator_(0),
7552 promoted_objects_size_(0),
7553 nodes_died_in_new_space_(0),
7554 nodes_copied_in_new_space_(0),
7557 gc_reason_(gc_reason),
7558 collector_reason_(collector_reason) {
7559 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7560 start_time_ = OS::TimeCurrentMillis();
7561 start_object_size_ = heap_->SizeOfObjects();
7562 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7564 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7568 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7570 allocated_since_last_gc_ =
7571 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7573 if (heap_->last_gc_end_timestamp_ > 0) {
7574 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7577 steps_count_ = heap_->incremental_marking()->steps_count();
7578 steps_took_ = heap_->incremental_marking()->steps_took();
7579 longest_step_ = heap_->incremental_marking()->longest_step();
7580 steps_count_since_last_gc_ =
7581 heap_->incremental_marking()->steps_count_since_last_gc();
7582 steps_took_since_last_gc_ =
7583 heap_->incremental_marking()->steps_took_since_last_gc();
7587 GCTracer::~GCTracer() {
7588 // Printf ONE line iff flag is set.
7589 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7591 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7593 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7594 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7596 double time = heap_->last_gc_end_timestamp_ - start_time_;
7598 // Update cumulative GC statistics if required.
7599 if (FLAG_print_cumulative_gc_stat) {
7600 heap_->total_gc_time_ms_ += time;
7601 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7602 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7603 heap_->alive_after_last_gc_);
7605 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7608 } else if (FLAG_trace_gc_verbose) {
7609 heap_->total_gc_time_ms_ += time;
7612 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7614 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7616 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7617 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7619 if (!FLAG_trace_gc_nvp) {
7620 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7622 double end_memory_size_mb =
7623 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7625 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7627 static_cast<double>(start_object_size_) / MB,
7628 static_cast<double>(start_memory_size_) / MB,
7629 SizeOfHeapObjects(),
7630 end_memory_size_mb);
7632 if (external_time > 0) PrintF("%d / ", external_time);
7633 PrintF("%.1f ms", time);
7634 if (steps_count_ > 0) {
7635 if (collector_ == SCAVENGER) {
7636 PrintF(" (+ %.1f ms in %d steps since last GC)",
7637 steps_took_since_last_gc_,
7638 steps_count_since_last_gc_);
7640 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7641 "biggest step %.1f ms)",
7648 if (gc_reason_ != NULL) {
7649 PrintF(" [%s]", gc_reason_);
7652 if (collector_reason_ != NULL) {
7653 PrintF(" [%s]", collector_reason_);
7658 PrintF("pause=%.1f ", time);
7659 PrintF("mutator=%.1f ", spent_in_mutator_);
7661 switch (collector_) {
7665 case MARK_COMPACTOR:
7673 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7674 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7675 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7676 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7677 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7678 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7679 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7680 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7681 PrintF("compaction_ptrs=%.1f ",
7682 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7683 PrintF("intracompaction_ptrs=%.1f ",
7684 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7685 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7686 PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
7687 PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
7689 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7690 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7691 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7692 in_free_list_or_wasted_before_gc_);
7693 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7695 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7696 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7697 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7698 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7699 PrintF("nodes_promoted=%d ", nodes_promoted_);
7701 if (collector_ == SCAVENGER) {
7702 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7703 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7705 PrintF("stepscount=%d ", steps_count_);
7706 PrintF("stepstook=%.1f ", steps_took_);
7707 PrintF("longeststep=%.1f ", longest_step_);
7713 heap_->PrintShortHeapStatistics();
7717 const char* GCTracer::CollectorString() {
7718 switch (collector_) {
7721 case MARK_COMPACTOR:
7722 return "Mark-sweep";
7724 return "Unknown GC";
7728 int KeyedLookupCache::Hash(Map* map, Name* name) {
7729 // Uses only lower 32 bits if pointers are larger.
7730 uintptr_t addr_hash =
7731 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7732 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7736 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7737 int index = (Hash(map, name) & kHashMask);
7738 for (int i = 0; i < kEntriesPerBucket; i++) {
7739 Key& key = keys_[index + i];
7740 if ((key.map == map) && key.name->Equals(name)) {
7741 return field_offsets_[index + i];
7748 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7749 if (!name->IsUniqueName()) {
7750 String* internalized_string;
7751 if (!HEAP->InternalizeStringIfExists(
7752 String::cast(name), &internalized_string)) {
7755 name = internalized_string;
7757 // This cache is cleared only between mark compact passes, so we expect the
7758 // cache to only contain old space names.
7759 ASSERT(!HEAP->InNewSpace(name));
7761 int index = (Hash(map, name) & kHashMask);
7762 // After a GC there will be free slots, so we use them in order (this may
7763 // help to get the most frequently used one in position 0).
7764 for (int i = 0; i< kEntriesPerBucket; i++) {
7765 Key& key = keys_[index];
7766 Object* free_entry_indicator = NULL;
7767 if (key.map == free_entry_indicator) {
7770 field_offsets_[index + i] = field_offset;
7774 // No free entry found in this bucket, so we move them all down one and
7775 // put the new entry at position zero.
7776 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7777 Key& key = keys_[index + i];
7778 Key& key2 = keys_[index + i - 1];
7780 field_offsets_[index + i] = field_offsets_[index + i - 1];
7783 // Write the new first entry.
7784 Key& key = keys_[index];
7787 field_offsets_[index] = field_offset;
7791 void KeyedLookupCache::Clear() {
7792 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7796 void DescriptorLookupCache::Clear() {
7797 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7802 void Heap::GarbageCollectionGreedyCheck() {
7803 ASSERT(FLAG_gc_greedy);
7804 if (isolate_->bootstrapper()->IsActive()) return;
7805 if (disallow_allocation_failure()) return;
7806 CollectGarbage(NEW_SPACE);
7811 TranscendentalCache::SubCache::SubCache(Type t)
7813 isolate_(Isolate::Current()) {
7814 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7815 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7816 for (int i = 0; i < kCacheSize; i++) {
7817 elements_[i].in[0] = in0;
7818 elements_[i].in[1] = in1;
7819 elements_[i].output = NULL;
7824 void TranscendentalCache::Clear() {
7825 for (int i = 0; i < kNumberOfCaches; i++) {
7826 if (caches_[i] != NULL) {
7834 void ExternalStringTable::CleanUp() {
7836 for (int i = 0; i < new_space_strings_.length(); ++i) {
7837 if (new_space_strings_[i] == heap_->the_hole_value()) {
7840 if (heap_->InNewSpace(new_space_strings_[i])) {
7841 new_space_strings_[last++] = new_space_strings_[i];
7843 old_space_strings_.Add(new_space_strings_[i]);
7846 new_space_strings_.Rewind(last);
7847 new_space_strings_.Trim();
7850 for (int i = 0; i < old_space_strings_.length(); ++i) {
7851 if (old_space_strings_[i] == heap_->the_hole_value()) {
7854 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7855 old_space_strings_[last++] = old_space_strings_[i];
7857 old_space_strings_.Rewind(last);
7858 old_space_strings_.Trim();
7860 if (FLAG_verify_heap) {
7867 void ExternalStringTable::TearDown() {
7868 new_space_strings_.Free();
7869 old_space_strings_.Free();
7873 // Update all references.
7874 void ErrorObjectList::UpdateReferences() {
7875 for (int i = 0; i < list_.length(); i++) {
7876 HeapObject* object = HeapObject::cast(list_[i]);
7877 MapWord first_word = object->map_word();
7878 if (first_word.IsForwardingAddress()) {
7879 list_[i] = first_word.ToForwardingAddress();
7885 // Unforwarded objects in new space are dead and removed from the list.
7886 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7887 if (list_.is_empty()) return;
7889 int write_index = 0;
7890 for (int i = 0; i < list_.length(); i++) {
7891 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7892 if (first_word.IsForwardingAddress()) {
7893 list_[write_index++] = first_word.ToForwardingAddress();
7896 list_.Rewind(write_index);
7898 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7899 // objects in the list, just remove dead ones, as to not confuse the
7900 // loop in DeferredFormatStackTrace.
7901 for (int i = 0; i < list_.length(); i++) {
7902 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7903 list_[i] = first_word.IsForwardingAddress()
7904 ? first_word.ToForwardingAddress()
7905 : heap->the_hole_value();
7911 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7912 // If formatting the stack trace causes a GC, this method will be
7913 // recursively called. In that case, skip the recursive call, since
7914 // the loop modifies the list while iterating over it.
7915 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7917 HandleScope scope(isolate);
7918 Handle<String> stack_key = isolate->factory()->stack_string();
7919 int write_index = 0;
7920 int budget = kBudgetPerGC;
7921 for (int i = 0; i < list_.length(); i++) {
7922 Object* object = list_[i];
7923 JSFunction* getter_fun;
7925 { DisallowHeapAllocation no_gc;
7926 // Skip possible holes in the list.
7927 if (object->IsTheHole()) continue;
7928 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7929 list_[write_index++] = object;
7933 // Check whether the stack property is backed by the original getter.
7934 LookupResult lookup(isolate);
7935 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7936 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7937 Object* callback = lookup.GetCallbackObject();
7938 if (!callback->IsAccessorPair()) continue;
7939 Object* getter_obj = AccessorPair::cast(callback)->getter();
7940 if (!getter_obj->IsJSFunction()) continue;
7941 getter_fun = JSFunction::cast(getter_obj);
7942 String* key = isolate->heap()->hidden_stack_trace_string();
7943 Object* value = getter_fun->GetHiddenProperty(key);
7944 if (key != value) continue;
7948 HandleScope scope(isolate);
7949 bool has_exception = false;
7951 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7953 Handle<Object> object_handle(object, isolate);
7954 Handle<Object> getter_handle(getter_fun, isolate);
7955 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7956 ASSERT(*map == HeapObject::cast(*object_handle)->map());
7957 if (has_exception) {
7958 // Hit an exception (most likely a stack overflow).
7959 // Wrap up this pass and retry after another GC.
7960 isolate->clear_pending_exception();
7961 // We use the handle since calling the getter might have caused a GC.
7962 list_[write_index++] = *object_handle;
7966 list_.Rewind(write_index);
7972 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7973 for (int i = 0; i < list_.length(); i++) {
7974 HeapObject* object = HeapObject::cast(list_[i]);
7975 if (!Marking::MarkBitFrom(object).Get()) {
7976 list_[i] = heap->the_hole_value();
7982 void ErrorObjectList::TearDown() {
7987 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7988 chunk->set_next_chunk(chunks_queued_for_free_);
7989 chunks_queued_for_free_ = chunk;
7993 void Heap::FreeQueuedChunks() {
7994 if (chunks_queued_for_free_ == NULL) return;
7997 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7998 next = chunk->next_chunk();
7999 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8001 if (chunk->owner()->identity() == LO_SPACE) {
8002 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
8003 // If FromAnyPointerAddress encounters a slot that belongs to a large
8004 // chunk queued for deletion it will fail to find the chunk because
8005 // it try to perform a search in the list of pages owned by of the large
8006 // object space and queued chunks were detached from that list.
8007 // To work around this we split large chunk into normal kPageSize aligned
8008 // pieces and initialize size, owner and flags field of every piece.
8009 // If FromAnyPointerAddress encounters a slot that belongs to one of
8010 // these smaller pieces it will treat it as a slot on a normal Page.
8011 Address chunk_end = chunk->address() + chunk->size();
8012 MemoryChunk* inner = MemoryChunk::FromAddress(
8013 chunk->address() + Page::kPageSize);
8014 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
8015 while (inner <= inner_last) {
8016 // Size of a large chunk is always a multiple of
8017 // OS::AllocateAlignment() so there is always
8018 // enough space for a fake MemoryChunk header.
8019 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
8020 // Guard against overflow.
8021 if (area_end < inner->address()) area_end = chunk_end;
8022 inner->SetArea(inner->address(), area_end);
8023 inner->set_size(Page::kPageSize);
8024 inner->set_owner(lo_space());
8025 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8026 inner = MemoryChunk::FromAddress(
8027 inner->address() + Page::kPageSize);
8031 isolate_->heap()->store_buffer()->Compact();
8032 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
8033 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8034 next = chunk->next_chunk();
8035 isolate_->memory_allocator()->Free(chunk);
8037 chunks_queued_for_free_ = NULL;
8041 void Heap::RememberUnmappedPage(Address page, bool compacted) {
8042 uintptr_t p = reinterpret_cast<uintptr_t>(page);
8043 // Tag the page pointer to make it findable in the dump file.
8045 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
8047 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
8049 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
8050 reinterpret_cast<Address>(p);
8051 remembered_unmapped_pages_index_++;
8052 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
8056 void Heap::ClearObjectStats(bool clear_last_time_stats) {
8057 memset(object_counts_, 0, sizeof(object_counts_));
8058 memset(object_sizes_, 0, sizeof(object_sizes_));
8059 if (clear_last_time_stats) {
8060 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
8061 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
8066 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
8069 void Heap::CheckpointObjectStats() {
8070 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8071 Counters* counters = isolate()->counters();
8072 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8073 counters->count_of_##name()->Increment( \
8074 static_cast<int>(object_counts_[name])); \
8075 counters->count_of_##name()->Decrement( \
8076 static_cast<int>(object_counts_last_time_[name])); \
8077 counters->size_of_##name()->Increment( \
8078 static_cast<int>(object_sizes_[name])); \
8079 counters->size_of_##name()->Decrement( \
8080 static_cast<int>(object_sizes_last_time_[name]));
8081 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8082 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8084 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8085 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
8086 counters->count_of_CODE_TYPE_##name()->Increment( \
8087 static_cast<int>(object_counts_[index])); \
8088 counters->count_of_CODE_TYPE_##name()->Decrement( \
8089 static_cast<int>(object_counts_last_time_[index])); \
8090 counters->size_of_CODE_TYPE_##name()->Increment( \
8091 static_cast<int>(object_sizes_[index])); \
8092 counters->size_of_CODE_TYPE_##name()->Decrement( \
8093 static_cast<int>(object_sizes_last_time_[index]));
8094 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8095 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8096 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8097 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
8098 counters->count_of_FIXED_ARRAY_##name()->Increment( \
8099 static_cast<int>(object_counts_[index])); \
8100 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
8101 static_cast<int>(object_counts_last_time_[index])); \
8102 counters->size_of_FIXED_ARRAY_##name()->Increment( \
8103 static_cast<int>(object_sizes_[index])); \
8104 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
8105 static_cast<int>(object_sizes_last_time_[index]));
8106 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8107 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8109 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8110 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8115 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8116 if (FLAG_parallel_recompilation) {
8117 heap_->relocation_mutex_->Lock();
8119 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8120 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8125 } } // namespace v8::internal