1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71 code_range_size_(512*MB),
73 #define LUMP_OF_MEMORY MB
77 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 initial_semispace_size_(Page::kPageSize),
80 max_old_generation_size_(192*MB),
81 max_executable_size_(max_old_generation_size_),
83 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 initial_semispace_size_(Page::kPageSize),
86 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87 max_executable_size_(256l * LUMP_OF_MEMORY),
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94 survived_since_last_expansion_(0),
96 always_allocate_scope_depth_(0),
97 linear_allocation_scope_depth_(0),
98 contexts_disposed_(0),
100 flush_monomorphic_ics_(false),
101 scan_on_scavenge_pages_(0),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 gc_post_processing_depth_(0),
113 remembered_unmapped_pages_index_(0),
114 unflattened_strings_length_(0),
116 allocation_allowed_(true),
117 allocation_timeout_(0),
118 disallow_allocation_failure_(false),
120 new_space_high_promotion_mode_active_(false),
121 old_gen_promotion_limit_(kMinimumPromotionLimit),
122 old_gen_allocation_limit_(kMinimumAllocationLimit),
123 old_gen_limit_factor_(1),
124 size_of_old_gen_at_last_old_space_gc_(0),
125 external_allocation_limit_(0),
126 amount_of_external_allocated_memory_(0),
127 amount_of_external_allocated_memory_at_last_global_gc_(0),
128 old_gen_exhausted_(false),
129 store_buffer_rebuilder_(store_buffer()),
130 hidden_string_(NULL),
131 global_gc_prologue_callback_(NULL),
132 global_gc_epilogue_callback_(NULL),
133 gc_safe_size_of_old_object_(NULL),
134 total_regexp_code_generated_(0),
136 young_survivors_after_last_gc_(0),
137 high_survival_rate_period_length_(0),
138 low_survival_rate_period_length_(0),
140 previous_survival_rate_trend_(Heap::STABLE),
141 survival_rate_trend_(Heap::STABLE),
143 total_gc_time_ms_(0.0),
144 max_alive_after_gc_(0),
145 min_in_mutator_(kMaxInt),
146 alive_after_last_gc_(0),
147 last_gc_end_timestamp_(0.0),
152 incremental_marking_(this),
153 number_idle_notifications_(0),
154 last_idle_notification_gc_count_(0),
155 last_idle_notification_gc_count_init_(false),
156 mark_sweeps_since_idle_round_started_(0),
157 ms_count_at_last_idle_notification_(0),
158 gc_count_at_last_idle_gc_(0),
159 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
160 gcs_since_last_deopt_(0),
162 no_weak_embedded_maps_verification_scope_depth_(0),
164 promotion_queue_(this),
166 chunks_queued_for_free_(NULL),
167 relocation_mutex_(NULL) {
168 // Allow build-time customization of the max semispace size. Building
169 // V8 with snapshots and a non-default max semispace size is much
170 // easier if you can define it as part of the build environment.
171 #if defined(V8_MAX_SEMISPACE_SIZE)
172 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
175 intptr_t max_virtual = OS::MaxVirtualMemory();
177 if (max_virtual > 0) {
178 if (code_range_size_ > 0) {
179 // Reserve no more than 1/8 of the memory for the code range.
180 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
184 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
185 native_contexts_list_ = NULL;
186 mark_compact_collector_.heap_ = this;
187 external_string_table_.heap_ = this;
188 // Put a dummy entry in the remembered pages so we can find the list the
189 // minidump even if there are no real unmapped pages.
190 RememberUnmappedPage(NULL, false);
192 ClearObjectStats(true);
196 intptr_t Heap::Capacity() {
197 if (!HasBeenSetUp()) return 0;
199 return new_space_.Capacity() +
200 old_pointer_space_->Capacity() +
201 old_data_space_->Capacity() +
202 code_space_->Capacity() +
203 map_space_->Capacity() +
204 cell_space_->Capacity();
208 intptr_t Heap::CommittedMemory() {
209 if (!HasBeenSetUp()) return 0;
211 return new_space_.CommittedMemory() +
212 old_pointer_space_->CommittedMemory() +
213 old_data_space_->CommittedMemory() +
214 code_space_->CommittedMemory() +
215 map_space_->CommittedMemory() +
216 cell_space_->CommittedMemory() +
221 size_t Heap::CommittedPhysicalMemory() {
222 if (!HasBeenSetUp()) return 0;
224 return new_space_.CommittedPhysicalMemory() +
225 old_pointer_space_->CommittedPhysicalMemory() +
226 old_data_space_->CommittedPhysicalMemory() +
227 code_space_->CommittedPhysicalMemory() +
228 map_space_->CommittedPhysicalMemory() +
229 cell_space_->CommittedPhysicalMemory() +
230 lo_space_->CommittedPhysicalMemory();
234 intptr_t Heap::CommittedMemoryExecutable() {
235 if (!HasBeenSetUp()) return 0;
237 return isolate()->memory_allocator()->SizeExecutable();
241 intptr_t Heap::Available() {
242 if (!HasBeenSetUp()) return 0;
244 return new_space_.Available() +
245 old_pointer_space_->Available() +
246 old_data_space_->Available() +
247 code_space_->Available() +
248 map_space_->Available() +
249 cell_space_->Available();
253 bool Heap::HasBeenSetUp() {
254 return old_pointer_space_ != NULL &&
255 old_data_space_ != NULL &&
256 code_space_ != NULL &&
257 map_space_ != NULL &&
258 cell_space_ != NULL &&
263 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
264 if (IntrusiveMarking::IsMarked(object)) {
265 return IntrusiveMarking::SizeOfMarkedObject(object);
267 return object->SizeFromMap(object->map());
271 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
272 const char** reason) {
273 // Is global GC requested?
274 if (space != NEW_SPACE) {
275 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
276 *reason = "GC in old space requested";
277 return MARK_COMPACTOR;
280 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
281 *reason = "GC in old space forced by flags";
282 return MARK_COMPACTOR;
285 // Is enough data promoted to justify a global GC?
286 if (OldGenerationPromotionLimitReached()) {
287 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
288 *reason = "promotion limit reached";
289 return MARK_COMPACTOR;
292 // Have allocation in OLD and LO failed?
293 if (old_gen_exhausted_) {
294 isolate_->counters()->
295 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
296 *reason = "old generations exhausted";
297 return MARK_COMPACTOR;
300 // Is there enough space left in OLD to guarantee that a scavenge can
303 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
304 // for object promotion. It counts only the bytes that the memory
305 // allocator has not yet allocated from the OS and assigned to any space,
306 // and does not count available bytes already in the old space or code
307 // space. Undercounting is safe---we may get an unrequested full GC when
308 // a scavenge would have succeeded.
309 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
310 isolate_->counters()->
311 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
312 *reason = "scavenge might not succeed";
313 return MARK_COMPACTOR;
322 // TODO(1238405): Combine the infrastructure for --heap-stats and
323 // --log-gc to avoid the complicated preprocessor and flag testing.
324 void Heap::ReportStatisticsBeforeGC() {
325 // Heap::ReportHeapStatistics will also log NewSpace statistics when
326 // compiled --log-gc is set. The following logic is used to avoid
329 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
330 if (FLAG_heap_stats) {
331 ReportHeapStatistics("Before GC");
332 } else if (FLAG_log_gc) {
333 new_space_.ReportStatistics();
335 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
338 new_space_.CollectStatistics();
339 new_space_.ReportStatistics();
340 new_space_.ClearHistograms();
346 void Heap::PrintShortHeapStatistics() {
347 if (!FLAG_trace_gc_verbose) return;
348 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
349 ", available: %6" V8_PTR_PREFIX "d KB\n",
350 isolate_->memory_allocator()->Size() / KB,
351 isolate_->memory_allocator()->Available() / KB);
352 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
353 ", available: %6" V8_PTR_PREFIX "d KB"
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 new_space_.Size() / KB,
356 new_space_.Available() / KB,
357 new_space_.CommittedMemory() / KB);
358 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
359 ", available: %6" V8_PTR_PREFIX "d KB"
360 ", committed: %6" V8_PTR_PREFIX "d KB\n",
361 old_pointer_space_->SizeOfObjects() / KB,
362 old_pointer_space_->Available() / KB,
363 old_pointer_space_->CommittedMemory() / KB);
364 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
365 ", available: %6" V8_PTR_PREFIX "d KB"
366 ", committed: %6" V8_PTR_PREFIX "d KB\n",
367 old_data_space_->SizeOfObjects() / KB,
368 old_data_space_->Available() / KB,
369 old_data_space_->CommittedMemory() / KB);
370 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
371 ", available: %6" V8_PTR_PREFIX "d KB"
372 ", committed: %6" V8_PTR_PREFIX "d KB\n",
373 code_space_->SizeOfObjects() / KB,
374 code_space_->Available() / KB,
375 code_space_->CommittedMemory() / KB);
376 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
377 ", available: %6" V8_PTR_PREFIX "d KB"
378 ", committed: %6" V8_PTR_PREFIX "d KB\n",
379 map_space_->SizeOfObjects() / KB,
380 map_space_->Available() / KB,
381 map_space_->CommittedMemory() / KB);
382 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
383 ", available: %6" V8_PTR_PREFIX "d KB"
384 ", committed: %6" V8_PTR_PREFIX "d KB\n",
385 cell_space_->SizeOfObjects() / KB,
386 cell_space_->Available() / KB,
387 cell_space_->CommittedMemory() / KB);
388 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
389 ", available: %6" V8_PTR_PREFIX "d KB"
390 ", committed: %6" V8_PTR_PREFIX "d KB\n",
391 lo_space_->SizeOfObjects() / KB,
392 lo_space_->Available() / KB,
393 lo_space_->CommittedMemory() / KB);
394 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
395 ", available: %6" V8_PTR_PREFIX "d KB"
396 ", committed: %6" V8_PTR_PREFIX "d KB\n",
397 this->SizeOfObjects() / KB,
398 this->Available() / KB,
399 this->CommittedMemory() / KB);
400 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407 // Similar to the before GC, we use some complicated logic to ensure that
408 // NewSpace statistics are logged exactly once when --log-gc is turned on.
410 if (FLAG_heap_stats) {
411 new_space_.CollectStatistics();
412 ReportHeapStatistics("After GC");
413 } else if (FLAG_log_gc) {
414 new_space_.ReportStatistics();
417 if (FLAG_log_gc) new_space_.ReportStatistics();
422 void Heap::GarbageCollectionPrologue() {
423 isolate_->transcendental_cache()->Clear();
424 ClearJSFunctionResultCaches();
426 unflattened_strings_length_ = 0;
428 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
429 mark_compact_collector()->EnableCodeFlushing(true);
433 if (FLAG_verify_heap) {
439 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
440 allow_allocation(false);
442 if (FLAG_gc_verbose) Print();
444 ReportStatisticsBeforeGC();
447 store_buffer()->GCPrologue();
451 intptr_t Heap::SizeOfObjects() {
453 AllSpaces spaces(this);
454 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
455 total += space->SizeOfObjects();
461 void Heap::RepairFreeListsAfterBoot() {
462 PagedSpaces spaces(this);
463 for (PagedSpace* space = spaces.next();
465 space = spaces.next()) {
466 space->RepairFreeListsAfterBoot();
471 void Heap::GarbageCollectionEpilogue() {
472 store_buffer()->GCEpilogue();
474 // In release mode, we only zap the from space under heap verification.
475 if (Heap::ShouldZapGarbage()) {
480 if (FLAG_verify_heap) {
486 allow_allocation(true);
487 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488 if (FLAG_print_handles) PrintHandles();
489 if (FLAG_gc_verbose) Print();
490 if (FLAG_code_stats) ReportCodeStatistics("After GC");
492 if (FLAG_deopt_every_n_garbage_collections > 0) {
493 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494 Deoptimizer::DeoptimizeAll(isolate());
495 gcs_since_last_deopt_ = 0;
499 isolate_->counters()->alive_after_last_gc()->Set(
500 static_cast<int>(SizeOfObjects()));
502 isolate_->counters()->string_table_capacity()->Set(
503 string_table()->Capacity());
504 isolate_->counters()->number_of_symbols()->Set(
505 string_table()->NumberOfElements());
507 if (CommittedMemory() > 0) {
508 isolate_->counters()->external_fragmentation_total()->AddSample(
509 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
511 isolate_->counters()->heap_fraction_map_space()->AddSample(
513 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514 isolate_->counters()->heap_fraction_cell_space()->AddSample(
516 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
518 isolate_->counters()->heap_sample_total_committed()->AddSample(
519 static_cast<int>(CommittedMemory() / KB));
520 isolate_->counters()->heap_sample_total_used()->AddSample(
521 static_cast<int>(SizeOfObjects() / KB));
522 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523 static_cast<int>(map_space()->CommittedMemory() / KB));
524 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525 static_cast<int>(cell_space()->CommittedMemory() / KB));
528 #define UPDATE_COUNTERS_FOR_SPACE(space) \
529 isolate_->counters()->space##_bytes_available()->Set( \
530 static_cast<int>(space()->Available())); \
531 isolate_->counters()->space##_bytes_committed()->Set( \
532 static_cast<int>(space()->CommittedMemory())); \
533 isolate_->counters()->space##_bytes_used()->Set( \
534 static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
536 if (space()->CommittedMemory() > 0) { \
537 isolate_->counters()->external_fragmentation_##space()->AddSample( \
538 static_cast<int>(100 - \
539 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
542 UPDATE_COUNTERS_FOR_SPACE(space) \
543 UPDATE_FRAGMENTATION_FOR_SPACE(space)
545 UPDATE_COUNTERS_FOR_SPACE(new_space)
546 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
557 ReportStatisticsAfterGC();
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560 isolate_->debug()->AfterGarbageCollection();
561 #endif // ENABLE_DEBUGGER_SUPPORT
563 error_object_list_.DeferredFormatStackTrace(isolate());
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568 // Since we are ignoring the return value, the exact choice of space does
569 // not matter, so long as we do not specify NEW_SPACE, which would not
571 mark_compact_collector_.SetFlags(flags);
572 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573 mark_compact_collector_.SetFlags(kNoGCFlags);
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578 // Since we are ignoring the return value, the exact choice of space does
579 // not matter, so long as we do not specify NEW_SPACE, which would not
581 // Major GC would invoke weak handle callbacks on weakly reachable
582 // handles, but won't collect weakly reachable objects until next
583 // major GC. Therefore if we collect aggressively and weak handle callback
584 // has been invoked, we rerun major GC to release objects which become
586 // Note: as weak callbacks can execute arbitrary code, we cannot
587 // hope that eventually there will be no weak callbacks invocations.
588 // Therefore stop recollecting after several attempts.
589 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590 kReduceMemoryFootprintMask);
591 isolate_->compilation_cache()->Clear();
592 const int kMaxNumberOfAttempts = 7;
593 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
598 mark_compact_collector()->SetFlags(kNoGCFlags);
601 incremental_marking()->UncommitMarkingDeque();
605 bool Heap::CollectGarbage(AllocationSpace space,
606 GarbageCollector collector,
607 const char* gc_reason,
608 const char* collector_reason) {
609 // The VM is in the GC state until exiting this function.
610 VMState<GC> state(isolate_);
613 // Reset the allocation timeout to the GC interval, but make sure to
614 // allow at least a few allocations after a collection. The reason
615 // for this is that we have a lot of allocation sequences and we
616 // assume that a garbage collection will allow the subsequent
617 // allocation attempts to go through.
618 allocation_timeout_ = Max(6, FLAG_gc_interval);
621 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622 if (FLAG_trace_incremental_marking) {
623 PrintF("[IncrementalMarking] Scavenge during marking.\n");
627 if (collector == MARK_COMPACTOR &&
628 !mark_compact_collector()->abort_incremental_marking() &&
629 !incremental_marking()->IsStopped() &&
630 !incremental_marking()->should_hurry() &&
631 FLAG_incremental_marking_steps) {
632 // Make progress in incremental marking.
633 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636 if (!incremental_marking()->IsComplete()) {
637 if (FLAG_trace_incremental_marking) {
638 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
640 collector = SCAVENGER;
641 collector_reason = "incremental marking delaying mark-sweep";
645 bool next_gc_likely_to_collect_more = false;
647 { GCTracer tracer(this, gc_reason, collector_reason);
648 GarbageCollectionPrologue();
649 // The GC count was incremented in the prologue. Tell the tracer about
651 tracer.set_gc_count(gc_count_);
653 // Tell the tracer which collector we've selected.
654 tracer.set_collector(collector);
657 HistogramTimerScope histogram_timer_scope(
658 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
659 : isolate_->counters()->gc_compactor());
660 next_gc_likely_to_collect_more =
661 PerformGarbageCollection(collector, &tracer);
664 GarbageCollectionEpilogue();
667 // Start incremental marking for the next cycle. The heap snapshot
668 // generator needs incremental marking to stay off after it aborted.
669 if (!mark_compact_collector()->abort_incremental_marking() &&
670 incremental_marking()->IsStopped() &&
671 incremental_marking()->WorthActivating() &&
672 NextGCIsLikelyToBeFull()) {
673 incremental_marking()->Start();
676 return next_gc_likely_to_collect_more;
680 void Heap::PerformScavenge() {
681 GCTracer tracer(this, NULL, NULL);
682 if (incremental_marking()->IsStopped()) {
683 PerformGarbageCollection(SCAVENGER, &tracer);
685 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
690 void Heap::MoveElements(FixedArray* array,
694 if (len == 0) return;
696 ASSERT(array->map() != HEAP->fixed_cow_array_map());
697 Object** dst_objects = array->data_start() + dst_index;
698 OS::MemMove(dst_objects,
699 array->data_start() + src_index,
701 if (!InNewSpace(array)) {
702 for (int i = 0; i < len; i++) {
703 // TODO(hpayer): check store buffer for entries
704 if (InNewSpace(dst_objects[i])) {
705 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
709 incremental_marking()->RecordWrites(array);
714 // Helper class for verifying the string table.
715 class StringTableVerifier : public ObjectVisitor {
717 void VisitPointers(Object** start, Object** end) {
718 // Visit all HeapObject pointers in [start, end).
719 for (Object** p = start; p < end; p++) {
720 if ((*p)->IsHeapObject()) {
721 // Check that the string is actually internalized.
722 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
723 (*p)->IsInternalizedString());
730 static void VerifyStringTable() {
731 StringTableVerifier verifier;
732 HEAP->string_table()->IterateElements(&verifier);
734 #endif // VERIFY_HEAP
737 static bool AbortIncrementalMarkingAndCollectGarbage(
739 AllocationSpace space,
740 const char* gc_reason = NULL) {
741 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
742 bool result = heap->CollectGarbage(space, gc_reason);
743 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
748 void Heap::ReserveSpace(
750 Address *locations_out) {
751 bool gc_performed = true;
753 static const int kThreshold = 20;
754 while (gc_performed && counter++ < kThreshold) {
755 gc_performed = false;
756 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
757 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
758 if (sizes[space] != 0) {
759 MaybeObject* allocation;
760 if (space == NEW_SPACE) {
761 allocation = new_space()->AllocateRaw(sizes[space]);
763 allocation = paged_space(space)->AllocateRaw(sizes[space]);
766 if (!allocation->To<FreeListNode>(&node)) {
767 if (space == NEW_SPACE) {
768 Heap::CollectGarbage(NEW_SPACE,
769 "failed to reserve space in the new space");
771 AbortIncrementalMarkingAndCollectGarbage(
773 static_cast<AllocationSpace>(space),
774 "failed to reserve space in paged space");
779 // Mark with a free list node, in case we have a GC before
781 node->set_size(this, sizes[space]);
782 locations_out[space] = node->address();
789 // Failed to reserve the space after several attempts.
790 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
795 void Heap::EnsureFromSpaceIsCommitted() {
796 if (new_space_.CommitFromSpaceIfNeeded()) return;
798 // Committing memory to from space failed.
799 // Memory is exhausted and we will die.
800 V8::FatalProcessOutOfMemory("Committing semi space failed.");
804 void Heap::ClearJSFunctionResultCaches() {
805 if (isolate_->bootstrapper()->IsActive()) return;
807 Object* context = native_contexts_list_;
808 while (!context->IsUndefined()) {
809 // Get the caches for this context. GC can happen when the context
810 // is not fully initialized, so the caches can be undefined.
811 Object* caches_or_undefined =
812 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
813 if (!caches_or_undefined->IsUndefined()) {
814 FixedArray* caches = FixedArray::cast(caches_or_undefined);
816 int length = caches->length();
817 for (int i = 0; i < length; i++) {
818 JSFunctionResultCache::cast(caches->get(i))->Clear();
821 // Get the next context:
822 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
827 void Heap::ClearNormalizedMapCaches() {
828 if (isolate_->bootstrapper()->IsActive() &&
829 !incremental_marking()->IsMarking()) {
833 Object* context = native_contexts_list_;
834 while (!context->IsUndefined()) {
835 // GC can happen when the context is not fully initialized,
836 // so the cache can be undefined.
838 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
839 if (!cache->IsUndefined()) {
840 NormalizedMapCache::cast(cache)->Clear();
842 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
847 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
848 double survival_rate =
849 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
850 start_new_space_size;
852 if (survival_rate > kYoungSurvivalRateHighThreshold) {
853 high_survival_rate_period_length_++;
855 high_survival_rate_period_length_ = 0;
858 if (survival_rate < kYoungSurvivalRateLowThreshold) {
859 low_survival_rate_period_length_++;
861 low_survival_rate_period_length_ = 0;
864 double survival_rate_diff = survival_rate_ - survival_rate;
866 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
867 set_survival_rate_trend(DECREASING);
868 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
869 set_survival_rate_trend(INCREASING);
871 set_survival_rate_trend(STABLE);
874 survival_rate_ = survival_rate;
877 bool Heap::PerformGarbageCollection(GarbageCollector collector,
879 bool next_gc_likely_to_collect_more = false;
881 if (collector != SCAVENGER) {
882 PROFILE(isolate_, CodeMovingGCEvent());
886 if (FLAG_verify_heap) {
892 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
895 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
896 VMState<EXTERNAL> state(isolate_);
897 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
900 EnsureFromSpaceIsCommitted();
902 int start_new_space_size = Heap::new_space()->SizeAsInt();
904 if (IsHighSurvivalRate()) {
905 // We speed up the incremental marker if it is running so that it
906 // does not fall behind the rate of promotion, which would cause a
907 // constantly growing old space.
908 incremental_marking()->NotifyOfHighPromotionRate();
911 if (collector == MARK_COMPACTOR) {
912 // Perform mark-sweep with optional compaction.
915 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
916 IsStableOrIncreasingSurvivalTrend();
918 UpdateSurvivalRateTrend(start_new_space_size);
920 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
922 if (high_survival_rate_during_scavenges &&
923 IsStableOrIncreasingSurvivalTrend()) {
924 // Stable high survival rates of young objects both during partial and
925 // full collection indicate that mutator is either building or modifying
926 // a structure with a long lifetime.
927 // In this case we aggressively raise old generation memory limits to
928 // postpone subsequent mark-sweep collection and thus trade memory
929 // space for the mutation speed.
930 old_gen_limit_factor_ = 2;
932 old_gen_limit_factor_ = 1;
935 old_gen_promotion_limit_ =
936 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
937 old_gen_allocation_limit_ =
938 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
940 old_gen_exhausted_ = false;
946 UpdateSurvivalRateTrend(start_new_space_size);
949 if (!new_space_high_promotion_mode_active_ &&
950 new_space_.Capacity() == new_space_.MaximumCapacity() &&
951 IsStableOrIncreasingSurvivalTrend() &&
952 IsHighSurvivalRate()) {
953 // Stable high survival rates even though young generation is at
954 // maximum capacity indicates that most objects will be promoted.
955 // To decrease scavenger pauses and final mark-sweep pauses, we
956 // have to limit maximal capacity of the young generation.
957 new_space_high_promotion_mode_active_ = true;
959 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
960 new_space_.InitialCapacity() / MB);
962 // Support for global pre-tenuring uses the high promotion mode as a
963 // heuristic indicator of whether to pretenure or not, we trigger
964 // deoptimization here to take advantage of pre-tenuring as soon as
966 if (FLAG_pretenure_literals) {
967 isolate_->stack_guard()->FullDeopt();
969 } else if (new_space_high_promotion_mode_active_ &&
970 IsStableOrDecreasingSurvivalTrend() &&
971 IsLowSurvivalRate()) {
972 // Decreasing low survival rates might indicate that the above high
973 // promotion mode is over and we should allow the young generation
975 new_space_high_promotion_mode_active_ = false;
977 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
978 new_space_.MaximumCapacity() / MB);
980 // Trigger deoptimization here to turn off pre-tenuring as soon as
982 if (FLAG_pretenure_literals) {
983 isolate_->stack_guard()->FullDeopt();
987 if (new_space_high_promotion_mode_active_ &&
988 new_space_.Capacity() > new_space_.InitialCapacity()) {
992 isolate_->counters()->objs_since_last_young()->Set(0);
994 // Callbacks that fire after this point might trigger nested GCs and
995 // restart incremental marking, the assertion can't be moved down.
996 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
998 gc_post_processing_depth_++;
999 { DisableAssertNoAllocation allow_allocation;
1000 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1001 next_gc_likely_to_collect_more =
1002 isolate_->global_handles()->PostGarbageCollectionProcessing(
1005 gc_post_processing_depth_--;
1007 // Update relocatables.
1008 Relocatable::PostGarbageCollectionProcessing();
1010 if (collector == MARK_COMPACTOR) {
1011 // Register the amount of external allocated memory.
1012 amount_of_external_allocated_memory_at_last_global_gc_ =
1013 amount_of_external_allocated_memory_;
1017 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1018 VMState<EXTERNAL> state(isolate_);
1019 CallGCEpilogueCallbacks(gc_type);
1023 if (FLAG_verify_heap) {
1024 VerifyStringTable();
1028 return next_gc_likely_to_collect_more;
1032 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1033 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1034 global_gc_prologue_callback_();
1036 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1037 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1038 gc_prologue_callbacks_[i].callback(gc_type, flags);
1044 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1045 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1046 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1047 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1050 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1051 global_gc_epilogue_callback_();
1056 void Heap::MarkCompact(GCTracer* tracer) {
1057 gc_state_ = MARK_COMPACT;
1058 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1060 mark_compact_collector_.Prepare(tracer);
1063 tracer->set_full_gc_count(ms_count_);
1065 MarkCompactPrologue();
1067 mark_compact_collector_.CollectGarbage();
1069 LOG(isolate_, ResourceEvent("markcompact", "end"));
1071 gc_state_ = NOT_IN_GC;
1073 isolate_->counters()->objs_since_last_full()->Set(0);
1075 contexts_disposed_ = 0;
1077 flush_monomorphic_ics_ = false;
1081 void Heap::MarkCompactPrologue() {
1082 // At any old GC clear the keyed lookup cache to enable collection of unused
1084 isolate_->keyed_lookup_cache()->Clear();
1085 isolate_->context_slot_cache()->Clear();
1086 isolate_->descriptor_lookup_cache()->Clear();
1087 RegExpResultsCache::Clear(string_split_cache());
1088 RegExpResultsCache::Clear(regexp_multiple_cache());
1090 isolate_->compilation_cache()->MarkCompactPrologue();
1092 CompletelyClearInstanceofCache();
1094 FlushNumberStringCache();
1095 if (FLAG_cleanup_code_caches_at_gc) {
1096 polymorphic_code_cache()->set_cache(undefined_value());
1099 ClearNormalizedMapCaches();
1103 Object* Heap::FindCodeObject(Address a) {
1104 return isolate()->inner_pointer_to_code_cache()->
1105 GcSafeFindCodeForInnerPointer(a);
1109 // Helper class for copying HeapObjects
1110 class ScavengeVisitor: public ObjectVisitor {
1112 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1114 void VisitPointer(Object** p) { ScavengePointer(p); }
1116 void VisitPointers(Object** start, Object** end) {
1117 // Copy all HeapObject pointers in [start, end)
1118 for (Object** p = start; p < end; p++) ScavengePointer(p);
1122 void ScavengePointer(Object** p) {
1123 Object* object = *p;
1124 if (!heap_->InNewSpace(object)) return;
1125 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1126 reinterpret_cast<HeapObject*>(object));
1134 // Visitor class to verify pointers in code or data space do not point into
1136 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1138 void VisitPointers(Object** start, Object**end) {
1139 for (Object** current = start; current < end; current++) {
1140 if ((*current)->IsHeapObject()) {
1141 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1148 static void VerifyNonPointerSpacePointers() {
1149 // Verify that there are no pointers to new space in spaces where we
1150 // do not expect them.
1151 VerifyNonPointerSpacePointersVisitor v;
1152 HeapObjectIterator code_it(HEAP->code_space());
1153 for (HeapObject* object = code_it.Next();
1154 object != NULL; object = code_it.Next())
1155 object->Iterate(&v);
1157 // The old data space was normally swept conservatively so that the iterator
1158 // doesn't work, so we normally skip the next bit.
1159 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1160 HeapObjectIterator data_it(HEAP->old_data_space());
1161 for (HeapObject* object = data_it.Next();
1162 object != NULL; object = data_it.Next())
1163 object->Iterate(&v);
1166 #endif // VERIFY_HEAP
1169 void Heap::CheckNewSpaceExpansionCriteria() {
1170 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1171 survived_since_last_expansion_ > new_space_.Capacity() &&
1172 !new_space_high_promotion_mode_active_) {
1173 // Grow the size of new space if there is room to grow, enough data
1174 // has survived scavenge since the last expansion and we are not in
1175 // high promotion mode.
1177 survived_since_last_expansion_ = 0;
1182 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1183 return heap->InNewSpace(*p) &&
1184 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1188 void Heap::ScavengeStoreBufferCallback(
1191 StoreBufferEvent event) {
1192 heap->store_buffer_rebuilder_.Callback(page, event);
1196 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1197 if (event == kStoreBufferStartScanningPagesEvent) {
1198 start_of_current_page_ = NULL;
1199 current_page_ = NULL;
1200 } else if (event == kStoreBufferScanningPageEvent) {
1201 if (current_page_ != NULL) {
1202 // If this page already overflowed the store buffer during this iteration.
1203 if (current_page_->scan_on_scavenge()) {
1204 // Then we should wipe out the entries that have been added for it.
1205 store_buffer_->SetTop(start_of_current_page_);
1206 } else if (store_buffer_->Top() - start_of_current_page_ >=
1207 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1208 // Did we find too many pointers in the previous page? The heuristic is
1209 // that no page can take more then 1/5 the remaining slots in the store
1211 current_page_->set_scan_on_scavenge(true);
1212 store_buffer_->SetTop(start_of_current_page_);
1214 // In this case the page we scanned took a reasonable number of slots in
1215 // the store buffer. It has now been rehabilitated and is no longer
1216 // marked scan_on_scavenge.
1217 ASSERT(!current_page_->scan_on_scavenge());
1220 start_of_current_page_ = store_buffer_->Top();
1221 current_page_ = page;
1222 } else if (event == kStoreBufferFullEvent) {
1223 // The current page overflowed the store buffer again. Wipe out its entries
1224 // in the store buffer and mark it scan-on-scavenge again. This may happen
1225 // several times while scanning.
1226 if (current_page_ == NULL) {
1227 // Store Buffer overflowed while scanning promoted objects. These are not
1228 // in any particular page, though they are likely to be clustered by the
1229 // allocation routines.
1230 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1232 // Store Buffer overflowed while scanning a particular old space page for
1233 // pointers to new space.
1234 ASSERT(current_page_ == page);
1235 ASSERT(page != NULL);
1236 current_page_->set_scan_on_scavenge(true);
1237 ASSERT(start_of_current_page_ != store_buffer_->Top());
1238 store_buffer_->SetTop(start_of_current_page_);
1246 void PromotionQueue::Initialize() {
1247 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1248 // entries (where each is a pair of intptr_t). This allows us to simplify
1249 // the test fpr when to switch pages.
1250 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1252 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1254 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1255 emergency_stack_ = NULL;
1260 void PromotionQueue::RelocateQueueHead() {
1261 ASSERT(emergency_stack_ == NULL);
1263 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1264 intptr_t* head_start = rear_;
1265 intptr_t* head_end =
1266 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1269 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1271 emergency_stack_ = new List<Entry>(2 * entries_count);
1273 while (head_start != head_end) {
1274 int size = static_cast<int>(*(head_start++));
1275 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1276 emergency_stack_->Add(Entry(obj, size));
1282 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1284 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1286 virtual Object* RetainAs(Object* object) {
1287 if (!heap_->InFromSpace(object)) {
1291 MapWord map_word = HeapObject::cast(object)->map_word();
1292 if (map_word.IsForwardingAddress()) {
1293 return map_word.ToForwardingAddress();
1303 void Heap::Scavenge() {
1304 RelocationLock relocation_lock(this);
1307 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1310 gc_state_ = SCAVENGE;
1312 // Implements Cheney's copying algorithm
1313 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1315 // Clear descriptor cache.
1316 isolate_->descriptor_lookup_cache()->Clear();
1318 // Used for updating survived_since_last_expansion_ at function end.
1319 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1321 CheckNewSpaceExpansionCriteria();
1323 SelectScavengingVisitorsTable();
1325 incremental_marking()->PrepareForScavenge();
1327 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1328 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1330 // Flip the semispaces. After flipping, to space is empty, from space has
1333 new_space_.ResetAllocationInfo();
1335 // We need to sweep newly copied objects which can be either in the
1336 // to space or promoted to the old generation. For to-space
1337 // objects, we treat the bottom of the to space as a queue. Newly
1338 // copied and unswept objects lie between a 'front' mark and the
1339 // allocation pointer.
1341 // Promoted objects can go into various old-generation spaces, and
1342 // can be allocated internally in the spaces (from the free list).
1343 // We treat the top of the to space as a queue of addresses of
1344 // promoted objects. The addresses of newly promoted and unswept
1345 // objects lie between a 'front' mark and a 'rear' mark that is
1346 // updated as a side effect of promoting an object.
1348 // There is guaranteed to be enough room at the top of the to space
1349 // for the addresses of promoted objects: every object promoted
1350 // frees up its size in bytes from the top of the new space, and
1351 // objects are at least one pointer in size.
1352 Address new_space_front = new_space_.ToSpaceStart();
1353 promotion_queue_.Initialize();
1356 store_buffer()->Clean();
1359 ScavengeVisitor scavenge_visitor(this);
1361 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1363 // Copy objects reachable from the old generation.
1365 StoreBufferRebuildScope scope(this,
1367 &ScavengeStoreBufferCallback);
1368 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1371 // Copy objects reachable from cells by scavenging cell values directly.
1372 HeapObjectIterator cell_iterator(cell_space_);
1373 for (HeapObject* heap_object = cell_iterator.Next();
1374 heap_object != NULL;
1375 heap_object = cell_iterator.Next()) {
1376 if (heap_object->IsJSGlobalPropertyCell()) {
1377 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1378 Address value_address = cell->ValueAddress();
1379 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1383 // Copy objects reachable from the code flushing candidates list.
1384 MarkCompactCollector* collector = mark_compact_collector();
1385 if (collector->is_code_flushing_enabled()) {
1386 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1389 // Scavenge object reachable from the native contexts list directly.
1390 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1392 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1394 while (isolate()->global_handles()->IterateObjectGroups(
1395 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1396 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1398 isolate()->global_handles()->RemoveObjectGroups();
1399 isolate()->global_handles()->RemoveImplicitRefGroups();
1401 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1402 &IsUnscavengedHeapObject);
1403 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1405 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1407 UpdateNewSpaceReferencesInExternalStringTable(
1408 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1410 error_object_list_.UpdateReferencesInNewSpace(this);
1412 promotion_queue_.Destroy();
1414 if (!FLAG_watch_ic_patching) {
1415 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1417 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1419 ScavengeWeakObjectRetainer weak_object_retainer(this);
1420 ProcessWeakReferences(&weak_object_retainer);
1422 ASSERT(new_space_front == new_space_.top());
1425 new_space_.set_age_mark(new_space_.top());
1427 new_space_.LowerInlineAllocationLimit(
1428 new_space_.inline_allocation_limit_step());
1430 // Update how much has survived scavenge.
1431 IncrementYoungSurvivorsCounter(static_cast<int>(
1432 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1434 LOG(isolate_, ResourceEvent("scavenge", "end"));
1436 gc_state_ = NOT_IN_GC;
1438 scavenges_since_last_idle_round_++;
1442 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1444 MapWord first_word = HeapObject::cast(*p)->map_word();
1446 if (!first_word.IsForwardingAddress()) {
1447 // Unreachable external string can be finalized.
1448 heap->FinalizeExternalString(String::cast(*p));
1452 // String is still reachable.
1453 return String::cast(first_word.ToForwardingAddress());
1457 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1458 ExternalStringTableUpdaterCallback updater_func) {
1460 if (FLAG_verify_heap) {
1461 external_string_table_.Verify();
1465 if (external_string_table_.new_space_strings_.is_empty()) return;
1467 Object** start = &external_string_table_.new_space_strings_[0];
1468 Object** end = start + external_string_table_.new_space_strings_.length();
1469 Object** last = start;
1471 for (Object** p = start; p < end; ++p) {
1472 ASSERT(InFromSpace(*p));
1473 String* target = updater_func(this, p);
1475 if (target == NULL) continue;
1477 ASSERT(target->IsExternalString());
1479 if (InNewSpace(target)) {
1480 // String is still in new space. Update the table entry.
1484 // String got promoted. Move it to the old string list.
1485 external_string_table_.AddOldString(target);
1489 ASSERT(last <= end);
1490 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1494 void Heap::UpdateReferencesInExternalStringTable(
1495 ExternalStringTableUpdaterCallback updater_func) {
1497 // Update old space string references.
1498 if (external_string_table_.old_space_strings_.length() > 0) {
1499 Object** start = &external_string_table_.old_space_strings_[0];
1500 Object** end = start + external_string_table_.old_space_strings_.length();
1501 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1504 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1508 static Object* ProcessFunctionWeakReferences(Heap* heap,
1510 WeakObjectRetainer* retainer,
1511 bool record_slots) {
1512 Object* undefined = heap->undefined_value();
1513 Object* head = undefined;
1514 JSFunction* tail = NULL;
1515 Object* candidate = function;
1516 while (candidate != undefined) {
1517 // Check whether to keep the candidate in the list.
1518 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1519 Object* retain = retainer->RetainAs(candidate);
1520 if (retain != NULL) {
1521 if (head == undefined) {
1522 // First element in the list.
1525 // Subsequent elements in the list.
1526 ASSERT(tail != NULL);
1527 tail->set_next_function_link(retain);
1529 Object** next_function =
1530 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1531 heap->mark_compact_collector()->RecordSlot(
1532 next_function, next_function, retain);
1535 // Retained function is new tail.
1536 candidate_function = reinterpret_cast<JSFunction*>(retain);
1537 tail = candidate_function;
1539 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1541 if (retain == undefined) break;
1544 // Move to next element in the list.
1545 candidate = candidate_function->next_function_link();
1548 // Terminate the list if there is one or more elements.
1550 tail->set_next_function_link(undefined);
1557 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1558 Object* undefined = undefined_value();
1559 Object* head = undefined;
1560 Context* tail = NULL;
1561 Object* candidate = native_contexts_list_;
1563 // We don't record weak slots during marking or scavenges.
1564 // Instead we do it once when we complete mark-compact cycle.
1565 // Note that write barrier has no effect if we are already in the middle of
1566 // compacting mark-sweep cycle and we have to record slots manually.
1568 gc_state() == MARK_COMPACT &&
1569 mark_compact_collector()->is_compacting();
1571 while (candidate != undefined) {
1572 // Check whether to keep the candidate in the list.
1573 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1574 Object* retain = retainer->RetainAs(candidate);
1575 if (retain != NULL) {
1576 if (head == undefined) {
1577 // First element in the list.
1580 // Subsequent elements in the list.
1581 ASSERT(tail != NULL);
1582 tail->set_unchecked(this,
1583 Context::NEXT_CONTEXT_LINK,
1585 UPDATE_WRITE_BARRIER);
1588 Object** next_context =
1589 HeapObject::RawField(
1590 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1591 mark_compact_collector()->RecordSlot(
1592 next_context, next_context, retain);
1595 // Retained context is new tail.
1596 candidate_context = reinterpret_cast<Context*>(retain);
1597 tail = candidate_context;
1599 if (retain == undefined) break;
1601 // Process the weak list of optimized functions for the context.
1602 Object* function_list_head =
1603 ProcessFunctionWeakReferences(
1605 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1608 candidate_context->set_unchecked(this,
1609 Context::OPTIMIZED_FUNCTIONS_LIST,
1611 UPDATE_WRITE_BARRIER);
1613 Object** optimized_functions =
1614 HeapObject::RawField(
1615 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1616 mark_compact_collector()->RecordSlot(
1617 optimized_functions, optimized_functions, function_list_head);
1621 // Move to next element in the list.
1622 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1625 // Terminate the list if there is one or more elements.
1627 tail->set_unchecked(this,
1628 Context::NEXT_CONTEXT_LINK,
1629 Heap::undefined_value(),
1630 UPDATE_WRITE_BARRIER);
1633 // Update the head of the list of contexts.
1634 native_contexts_list_ = head;
1638 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1639 AssertNoAllocation no_allocation;
1641 // Both the external string table and the string table may contain
1642 // external strings, but neither lists them exhaustively, nor is the
1643 // intersection set empty. Therefore we iterate over the external string
1644 // table first, ignoring internalized strings, and then over the
1645 // internalized string table.
1647 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1649 explicit ExternalStringTableVisitorAdapter(
1650 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1651 virtual void VisitPointers(Object** start, Object** end) {
1652 for (Object** p = start; p < end; p++) {
1653 // Visit non-internalized external strings,
1654 // since internalized strings are listed in the string table.
1655 if (!(*p)->IsInternalizedString()) {
1656 ASSERT((*p)->IsExternalString());
1657 visitor_->VisitExternalString(Utils::ToLocal(
1658 Handle<String>(String::cast(*p))));
1663 v8::ExternalResourceVisitor* visitor_;
1664 } external_string_table_visitor(visitor);
1666 external_string_table_.Iterate(&external_string_table_visitor);
1668 class StringTableVisitorAdapter : public ObjectVisitor {
1670 explicit StringTableVisitorAdapter(
1671 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1672 virtual void VisitPointers(Object** start, Object** end) {
1673 for (Object** p = start; p < end; p++) {
1674 if ((*p)->IsExternalString()) {
1675 ASSERT((*p)->IsInternalizedString());
1676 visitor_->VisitExternalString(Utils::ToLocal(
1677 Handle<String>(String::cast(*p))));
1682 v8::ExternalResourceVisitor* visitor_;
1683 } string_table_visitor(visitor);
1685 string_table()->IterateElements(&string_table_visitor);
1689 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1691 static inline void VisitPointer(Heap* heap, Object** p) {
1692 Object* object = *p;
1693 if (!heap->InNewSpace(object)) return;
1694 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1695 reinterpret_cast<HeapObject*>(object));
1700 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1701 Address new_space_front) {
1703 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1704 // The addresses new_space_front and new_space_.top() define a
1705 // queue of unprocessed copied objects. Process them until the
1707 while (new_space_front != new_space_.top()) {
1708 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1709 HeapObject* object = HeapObject::FromAddress(new_space_front);
1711 NewSpaceScavenger::IterateBody(object->map(), object);
1714 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1718 // Promote and process all the to-be-promoted objects.
1720 StoreBufferRebuildScope scope(this,
1722 &ScavengeStoreBufferCallback);
1723 while (!promotion_queue()->is_empty()) {
1726 promotion_queue()->remove(&target, &size);
1728 // Promoted object might be already partially visited
1729 // during old space pointer iteration. Thus we search specificly
1730 // for pointers to from semispace instead of looking for pointers
1732 ASSERT(!target->IsMap());
1733 IterateAndMarkPointersToFromSpace(target->address(),
1734 target->address() + size,
1739 // Take another spin if there are now unswept objects in new space
1740 // (there are currently no more unswept promoted objects).
1741 } while (new_space_front != new_space_.top());
1743 return new_space_front;
1747 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1750 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1754 static HeapObject* EnsureDoubleAligned(Heap* heap,
1757 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1758 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1759 return HeapObject::FromAddress(object->address() + kPointerSize);
1761 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1768 enum LoggingAndProfiling {
1769 LOGGING_AND_PROFILING_ENABLED,
1770 LOGGING_AND_PROFILING_DISABLED
1774 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1777 template<MarksHandling marks_handling,
1778 LoggingAndProfiling logging_and_profiling_mode>
1779 class ScavengingVisitor : public StaticVisitorBase {
1781 static void Initialize() {
1782 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1783 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1784 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1785 table_.Register(kVisitByteArray, &EvacuateByteArray);
1786 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1787 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1789 table_.Register(kVisitNativeContext,
1790 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1791 template VisitSpecialized<Context::kSize>);
1793 table_.Register(kVisitConsString,
1794 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1795 template VisitSpecialized<ConsString::kSize>);
1797 table_.Register(kVisitSlicedString,
1798 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1799 template VisitSpecialized<SlicedString::kSize>);
1801 table_.Register(kVisitSymbol,
1802 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1803 template VisitSpecialized<Symbol::kSize>);
1805 table_.Register(kVisitSharedFunctionInfo,
1806 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1807 template VisitSpecialized<SharedFunctionInfo::kSize>);
1809 table_.Register(kVisitJSWeakMap,
1810 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1813 table_.Register(kVisitJSRegExp,
1814 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1817 if (marks_handling == IGNORE_MARKS) {
1818 table_.Register(kVisitJSFunction,
1819 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1820 template VisitSpecialized<JSFunction::kSize>);
1822 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1825 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1827 kVisitDataObjectGeneric>();
1829 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1831 kVisitJSObjectGeneric>();
1833 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1835 kVisitStructGeneric>();
1838 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1843 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1844 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1846 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1847 bool should_record = false;
1849 should_record = FLAG_heap_stats;
1851 should_record = should_record || FLAG_log_gc;
1852 if (should_record) {
1853 if (heap->new_space()->Contains(obj)) {
1854 heap->new_space()->RecordAllocation(obj);
1856 heap->new_space()->RecordPromotion(obj);
1861 // Helper function used by CopyObject to copy a source object to an
1862 // allocated target object and update the forwarding pointer in the source
1863 // object. Returns the target object.
1864 INLINE(static void MigrateObject(Heap* heap,
1868 // Copy the content of source to target.
1869 heap->CopyBlock(target->address(), source->address(), size);
1871 // Set the forwarding address.
1872 source->set_map_word(MapWord::FromForwardingAddress(target));
1874 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1875 // Update NewSpace stats if necessary.
1876 RecordCopiedObject(heap, target);
1877 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1878 Isolate* isolate = heap->isolate();
1879 if (isolate->logger()->is_logging_code_events() ||
1880 isolate->cpu_profiler()->is_profiling()) {
1881 if (target->IsSharedFunctionInfo()) {
1882 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1883 source->address(), target->address()));
1888 if (marks_handling == TRANSFER_MARKS) {
1889 if (Marking::TransferColor(source, target)) {
1890 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1896 template<ObjectContents object_contents,
1897 SizeRestriction size_restriction,
1899 static inline void EvacuateObject(Map* map,
1903 SLOW_ASSERT((size_restriction != SMALL) ||
1904 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1905 SLOW_ASSERT(object->Size() == object_size);
1907 int allocation_size = object_size;
1908 if (alignment != kObjectAlignment) {
1909 ASSERT(alignment == kDoubleAlignment);
1910 allocation_size += kPointerSize;
1913 Heap* heap = map->GetHeap();
1914 if (heap->ShouldBePromoted(object->address(), object_size)) {
1915 MaybeObject* maybe_result;
1917 if ((size_restriction != SMALL) &&
1918 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1919 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1922 if (object_contents == DATA_OBJECT) {
1923 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1926 heap->old_pointer_space()->AllocateRaw(allocation_size);
1930 Object* result = NULL; // Initialization to please compiler.
1931 if (maybe_result->ToObject(&result)) {
1932 HeapObject* target = HeapObject::cast(result);
1934 if (alignment != kObjectAlignment) {
1935 target = EnsureDoubleAligned(heap, target, allocation_size);
1938 // Order is important: slot might be inside of the target if target
1939 // was allocated over a dead object and slot comes from the store
1942 MigrateObject(heap, object, target, object_size);
1944 if (object_contents == POINTER_OBJECT) {
1945 if (map->instance_type() == JS_FUNCTION_TYPE) {
1946 heap->promotion_queue()->insert(
1947 target, JSFunction::kNonWeakFieldsEndOffset);
1949 heap->promotion_queue()->insert(target, object_size);
1953 heap->tracer()->increment_promoted_objects_size(object_size);
1957 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1958 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1959 Object* result = allocation->ToObjectUnchecked();
1960 HeapObject* target = HeapObject::cast(result);
1962 if (alignment != kObjectAlignment) {
1963 target = EnsureDoubleAligned(heap, target, allocation_size);
1966 // Order is important: slot might be inside of the target if target
1967 // was allocated over a dead object and slot comes from the store
1970 MigrateObject(heap, object, target, object_size);
1975 static inline void EvacuateJSFunction(Map* map,
1977 HeapObject* object) {
1978 ObjectEvacuationStrategy<POINTER_OBJECT>::
1979 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1981 HeapObject* target = *slot;
1982 MarkBit mark_bit = Marking::MarkBitFrom(target);
1983 if (Marking::IsBlack(mark_bit)) {
1984 // This object is black and it might not be rescanned by marker.
1985 // We should explicitly record code entry slot for compaction because
1986 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1987 // miss it as it is not HeapObject-tagged.
1988 Address code_entry_slot =
1989 target->address() + JSFunction::kCodeEntryOffset;
1990 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1991 map->GetHeap()->mark_compact_collector()->
1992 RecordCodeEntrySlot(code_entry_slot, code);
1997 static inline void EvacuateFixedArray(Map* map,
1999 HeapObject* object) {
2000 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2001 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2008 static inline void EvacuateFixedDoubleArray(Map* map,
2010 HeapObject* object) {
2011 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2012 int object_size = FixedDoubleArray::SizeFor(length);
2013 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2021 static inline void EvacuateByteArray(Map* map,
2023 HeapObject* object) {
2024 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2025 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2026 map, slot, object, object_size);
2030 static inline void EvacuateSeqOneByteString(Map* map,
2032 HeapObject* object) {
2033 int object_size = SeqOneByteString::cast(object)->
2034 SeqOneByteStringSize(map->instance_type());
2035 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2036 map, slot, object, object_size);
2040 static inline void EvacuateSeqTwoByteString(Map* map,
2042 HeapObject* object) {
2043 int object_size = SeqTwoByteString::cast(object)->
2044 SeqTwoByteStringSize(map->instance_type());
2045 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2046 map, slot, object, object_size);
2050 static inline bool IsShortcutCandidate(int type) {
2051 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2054 static inline void EvacuateShortcutCandidate(Map* map,
2056 HeapObject* object) {
2057 ASSERT(IsShortcutCandidate(map->instance_type()));
2059 Heap* heap = map->GetHeap();
2061 if (marks_handling == IGNORE_MARKS &&
2062 ConsString::cast(object)->unchecked_second() ==
2063 heap->empty_string()) {
2065 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2069 if (!heap->InNewSpace(first)) {
2070 object->set_map_word(MapWord::FromForwardingAddress(first));
2074 MapWord first_word = first->map_word();
2075 if (first_word.IsForwardingAddress()) {
2076 HeapObject* target = first_word.ToForwardingAddress();
2079 object->set_map_word(MapWord::FromForwardingAddress(target));
2083 heap->DoScavengeObject(first->map(), slot, first);
2084 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2088 int object_size = ConsString::kSize;
2089 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2090 map, slot, object, object_size);
2093 template<ObjectContents object_contents>
2094 class ObjectEvacuationStrategy {
2096 template<int object_size>
2097 static inline void VisitSpecialized(Map* map,
2099 HeapObject* object) {
2100 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2101 map, slot, object, object_size);
2104 static inline void Visit(Map* map,
2106 HeapObject* object) {
2107 int object_size = map->instance_size();
2108 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2109 map, slot, object, object_size);
2113 static VisitorDispatchTable<ScavengingCallback> table_;
2117 template<MarksHandling marks_handling,
2118 LoggingAndProfiling logging_and_profiling_mode>
2119 VisitorDispatchTable<ScavengingCallback>
2120 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2123 static void InitializeScavengingVisitorsTables() {
2124 ScavengingVisitor<TRANSFER_MARKS,
2125 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2126 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2127 ScavengingVisitor<TRANSFER_MARKS,
2128 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2129 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2133 void Heap::SelectScavengingVisitorsTable() {
2134 bool logging_and_profiling =
2135 isolate()->logger()->is_logging() ||
2136 isolate()->cpu_profiler()->is_profiling() ||
2137 (isolate()->heap_profiler() != NULL &&
2138 isolate()->heap_profiler()->is_profiling());
2140 if (!incremental_marking()->IsMarking()) {
2141 if (!logging_and_profiling) {
2142 scavenging_visitors_table_.CopyFrom(
2143 ScavengingVisitor<IGNORE_MARKS,
2144 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2146 scavenging_visitors_table_.CopyFrom(
2147 ScavengingVisitor<IGNORE_MARKS,
2148 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2151 if (!logging_and_profiling) {
2152 scavenging_visitors_table_.CopyFrom(
2153 ScavengingVisitor<TRANSFER_MARKS,
2154 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2156 scavenging_visitors_table_.CopyFrom(
2157 ScavengingVisitor<TRANSFER_MARKS,
2158 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2161 if (incremental_marking()->IsCompacting()) {
2162 // When compacting forbid short-circuiting of cons-strings.
2163 // Scavenging code relies on the fact that new space object
2164 // can't be evacuated into evacuation candidate but
2165 // short-circuiting violates this assumption.
2166 scavenging_visitors_table_.Register(
2167 StaticVisitorBase::kVisitShortcutCandidate,
2168 scavenging_visitors_table_.GetVisitorById(
2169 StaticVisitorBase::kVisitConsString));
2175 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2176 SLOW_ASSERT(HEAP->InFromSpace(object));
2177 MapWord first_word = object->map_word();
2178 SLOW_ASSERT(!first_word.IsForwardingAddress());
2179 Map* map = first_word.ToMap();
2180 map->GetHeap()->DoScavengeObject(map, p, object);
2184 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2185 int instance_size) {
2187 MaybeObject* maybe_result = AllocateRawMap();
2188 if (!maybe_result->ToObject(&result)) return maybe_result;
2190 // Map::cast cannot be used due to uninitialized map field.
2191 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2192 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2193 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2194 reinterpret_cast<Map*>(result)->set_visitor_id(
2195 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2196 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2197 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2198 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2199 reinterpret_cast<Map*>(result)->set_bit_field(0);
2200 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2201 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2202 Map::OwnsDescriptors::encode(true);
2203 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2208 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2210 ElementsKind elements_kind) {
2212 MaybeObject* maybe_result = AllocateRawMap();
2213 if (!maybe_result->To(&result)) return maybe_result;
2215 Map* map = reinterpret_cast<Map*>(result);
2216 map->set_map_no_write_barrier(meta_map());
2217 map->set_instance_type(instance_type);
2218 map->set_visitor_id(
2219 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2220 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2221 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2222 map->set_instance_size(instance_size);
2223 map->set_inobject_properties(0);
2224 map->set_pre_allocated_property_fields(0);
2225 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2226 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2227 SKIP_WRITE_BARRIER);
2228 map->init_back_pointer(undefined_value());
2229 map->set_unused_property_fields(0);
2230 map->set_instance_descriptors(empty_descriptor_array());
2231 map->set_bit_field(0);
2232 map->set_bit_field2(1 << Map::kIsExtensible);
2233 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2234 Map::OwnsDescriptors::encode(true);
2235 map->set_bit_field3(bit_field3);
2236 map->set_elements_kind(elements_kind);
2242 MaybeObject* Heap::AllocateCodeCache() {
2243 CodeCache* code_cache;
2244 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2245 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2247 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2248 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2253 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2254 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2258 MaybeObject* Heap::AllocateAccessorPair() {
2259 AccessorPair* accessors;
2260 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2261 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2263 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2264 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2269 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2270 TypeFeedbackInfo* info;
2271 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2272 if (!maybe_info->To(&info)) return maybe_info;
2274 info->initialize_storage();
2275 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2276 SKIP_WRITE_BARRIER);
2281 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2282 AliasedArgumentsEntry* entry;
2283 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2284 if (!maybe_entry->To(&entry)) return maybe_entry;
2286 entry->set_aliased_context_slot(aliased_context_slot);
2291 const Heap::StringTypeTable Heap::string_type_table[] = {
2292 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2293 {type, size, k##camel_name##MapRootIndex},
2294 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2295 #undef STRING_TYPE_ELEMENT
2299 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2300 #define CONSTANT_STRING_ELEMENT(name, contents) \
2301 {contents, k##name##RootIndex},
2302 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2303 #undef CONSTANT_STRING_ELEMENT
2307 const Heap::StructTable Heap::struct_table[] = {
2308 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2309 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2310 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2311 #undef STRUCT_TABLE_ELEMENT
2315 bool Heap::CreateInitialMaps() {
2317 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2318 if (!maybe_obj->ToObject(&obj)) return false;
2320 // Map::cast cannot be used due to uninitialized map field.
2321 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2322 set_meta_map(new_meta_map);
2323 new_meta_map->set_map(new_meta_map);
2325 { MaybeObject* maybe_obj =
2326 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2327 if (!maybe_obj->ToObject(&obj)) return false;
2329 set_fixed_array_map(Map::cast(obj));
2331 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2332 if (!maybe_obj->ToObject(&obj)) return false;
2334 set_oddball_map(Map::cast(obj));
2336 // Allocate the empty array.
2337 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2338 if (!maybe_obj->ToObject(&obj)) return false;
2340 set_empty_fixed_array(FixedArray::cast(obj));
2342 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2343 if (!maybe_obj->ToObject(&obj)) return false;
2345 set_null_value(Oddball::cast(obj));
2346 Oddball::cast(obj)->set_kind(Oddball::kNull);
2348 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2349 if (!maybe_obj->ToObject(&obj)) return false;
2351 set_undefined_value(Oddball::cast(obj));
2352 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2353 ASSERT(!InNewSpace(undefined_value()));
2355 // Allocate the empty descriptor array.
2356 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2357 if (!maybe_obj->ToObject(&obj)) return false;
2359 set_empty_descriptor_array(DescriptorArray::cast(obj));
2361 // Fix the instance_descriptors for the existing maps.
2362 meta_map()->set_code_cache(empty_fixed_array());
2363 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2364 meta_map()->init_back_pointer(undefined_value());
2365 meta_map()->set_instance_descriptors(empty_descriptor_array());
2367 fixed_array_map()->set_code_cache(empty_fixed_array());
2368 fixed_array_map()->set_dependent_code(
2369 DependentCode::cast(empty_fixed_array()));
2370 fixed_array_map()->init_back_pointer(undefined_value());
2371 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2373 oddball_map()->set_code_cache(empty_fixed_array());
2374 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2375 oddball_map()->init_back_pointer(undefined_value());
2376 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2378 // Fix prototype object for existing maps.
2379 meta_map()->set_prototype(null_value());
2380 meta_map()->set_constructor(null_value());
2382 fixed_array_map()->set_prototype(null_value());
2383 fixed_array_map()->set_constructor(null_value());
2385 oddball_map()->set_prototype(null_value());
2386 oddball_map()->set_constructor(null_value());
2388 { MaybeObject* maybe_obj =
2389 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2390 if (!maybe_obj->ToObject(&obj)) return false;
2392 set_fixed_cow_array_map(Map::cast(obj));
2393 ASSERT(fixed_array_map() != fixed_cow_array_map());
2395 { MaybeObject* maybe_obj =
2396 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2397 if (!maybe_obj->ToObject(&obj)) return false;
2399 set_scope_info_map(Map::cast(obj));
2401 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2402 if (!maybe_obj->ToObject(&obj)) return false;
2404 set_heap_number_map(Map::cast(obj));
2406 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2407 if (!maybe_obj->ToObject(&obj)) return false;
2409 set_symbol_map(Map::cast(obj));
2411 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2412 if (!maybe_obj->ToObject(&obj)) return false;
2414 set_foreign_map(Map::cast(obj));
2416 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2417 const StringTypeTable& entry = string_type_table[i];
2418 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2419 if (!maybe_obj->ToObject(&obj)) return false;
2421 roots_[entry.index] = Map::cast(obj);
2424 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2425 if (!maybe_obj->ToObject(&obj)) return false;
2427 set_undetectable_string_map(Map::cast(obj));
2428 Map::cast(obj)->set_is_undetectable();
2430 { MaybeObject* maybe_obj =
2431 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2432 if (!maybe_obj->ToObject(&obj)) return false;
2434 set_undetectable_ascii_string_map(Map::cast(obj));
2435 Map::cast(obj)->set_is_undetectable();
2437 { MaybeObject* maybe_obj =
2438 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2439 if (!maybe_obj->ToObject(&obj)) return false;
2441 set_fixed_double_array_map(Map::cast(obj));
2443 { MaybeObject* maybe_obj =
2444 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2445 if (!maybe_obj->ToObject(&obj)) return false;
2447 set_byte_array_map(Map::cast(obj));
2449 { MaybeObject* maybe_obj =
2450 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2451 if (!maybe_obj->ToObject(&obj)) return false;
2453 set_free_space_map(Map::cast(obj));
2455 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2456 if (!maybe_obj->ToObject(&obj)) return false;
2458 set_empty_byte_array(ByteArray::cast(obj));
2460 { MaybeObject* maybe_obj =
2461 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2462 if (!maybe_obj->ToObject(&obj)) return false;
2464 set_external_pixel_array_map(Map::cast(obj));
2466 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2467 ExternalArray::kAlignedSize);
2468 if (!maybe_obj->ToObject(&obj)) return false;
2470 set_external_byte_array_map(Map::cast(obj));
2472 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2473 ExternalArray::kAlignedSize);
2474 if (!maybe_obj->ToObject(&obj)) return false;
2476 set_external_unsigned_byte_array_map(Map::cast(obj));
2478 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2479 ExternalArray::kAlignedSize);
2480 if (!maybe_obj->ToObject(&obj)) return false;
2482 set_external_short_array_map(Map::cast(obj));
2484 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2485 ExternalArray::kAlignedSize);
2486 if (!maybe_obj->ToObject(&obj)) return false;
2488 set_external_unsigned_short_array_map(Map::cast(obj));
2490 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2491 ExternalArray::kAlignedSize);
2492 if (!maybe_obj->ToObject(&obj)) return false;
2494 set_external_int_array_map(Map::cast(obj));
2496 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2497 ExternalArray::kAlignedSize);
2498 if (!maybe_obj->ToObject(&obj)) return false;
2500 set_external_unsigned_int_array_map(Map::cast(obj));
2502 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2503 ExternalArray::kAlignedSize);
2504 if (!maybe_obj->ToObject(&obj)) return false;
2506 set_external_float_array_map(Map::cast(obj));
2508 { MaybeObject* maybe_obj =
2509 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2510 if (!maybe_obj->ToObject(&obj)) return false;
2512 set_non_strict_arguments_elements_map(Map::cast(obj));
2514 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2515 ExternalArray::kAlignedSize);
2516 if (!maybe_obj->ToObject(&obj)) return false;
2518 set_external_double_array_map(Map::cast(obj));
2520 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2521 if (!maybe_obj->ToObject(&obj)) return false;
2523 set_code_map(Map::cast(obj));
2525 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2526 JSGlobalPropertyCell::kSize);
2527 if (!maybe_obj->ToObject(&obj)) return false;
2529 set_global_property_cell_map(Map::cast(obj));
2531 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_one_pointer_filler_map(Map::cast(obj));
2536 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2537 if (!maybe_obj->ToObject(&obj)) return false;
2539 set_two_pointer_filler_map(Map::cast(obj));
2541 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2542 const StructTable& entry = struct_table[i];
2543 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2544 if (!maybe_obj->ToObject(&obj)) return false;
2546 roots_[entry.index] = Map::cast(obj);
2549 { MaybeObject* maybe_obj =
2550 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2551 if (!maybe_obj->ToObject(&obj)) return false;
2553 set_hash_table_map(Map::cast(obj));
2555 { MaybeObject* maybe_obj =
2556 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557 if (!maybe_obj->ToObject(&obj)) return false;
2559 set_function_context_map(Map::cast(obj));
2561 { MaybeObject* maybe_obj =
2562 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2563 if (!maybe_obj->ToObject(&obj)) return false;
2565 set_catch_context_map(Map::cast(obj));
2567 { MaybeObject* maybe_obj =
2568 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2569 if (!maybe_obj->ToObject(&obj)) return false;
2571 set_with_context_map(Map::cast(obj));
2573 { MaybeObject* maybe_obj =
2574 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2575 if (!maybe_obj->ToObject(&obj)) return false;
2577 set_block_context_map(Map::cast(obj));
2579 { MaybeObject* maybe_obj =
2580 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2581 if (!maybe_obj->ToObject(&obj)) return false;
2583 set_module_context_map(Map::cast(obj));
2585 { MaybeObject* maybe_obj =
2586 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2587 if (!maybe_obj->ToObject(&obj)) return false;
2589 set_global_context_map(Map::cast(obj));
2591 { MaybeObject* maybe_obj =
2592 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2593 if (!maybe_obj->ToObject(&obj)) return false;
2595 Map* native_context_map = Map::cast(obj);
2596 native_context_map->set_dictionary_map(true);
2597 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2598 set_native_context_map(native_context_map);
2600 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2601 SharedFunctionInfo::kAlignedSize);
2602 if (!maybe_obj->ToObject(&obj)) return false;
2604 set_shared_function_info_map(Map::cast(obj));
2606 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2607 JSMessageObject::kSize);
2608 if (!maybe_obj->ToObject(&obj)) return false;
2610 set_message_object_map(Map::cast(obj));
2613 { MaybeObject* maybe_obj =
2614 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2615 if (!maybe_obj->To(&external_map)) return false;
2617 external_map->set_is_extensible(false);
2618 set_external_map(external_map);
2620 ASSERT(!InNewSpace(empty_fixed_array()));
2625 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2626 // Statically ensure that it is safe to allocate heap numbers in paged
2628 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2629 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2632 { MaybeObject* maybe_result =
2633 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2634 if (!maybe_result->ToObject(&result)) return maybe_result;
2637 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2638 HeapNumber::cast(result)->set_value(value);
2643 MaybeObject* Heap::AllocateHeapNumber(double value) {
2644 // Use general version, if we're forced to always allocate.
2645 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2647 // This version of AllocateHeapNumber is optimized for
2648 // allocation in new space.
2649 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2650 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2652 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2653 if (!maybe_result->ToObject(&result)) return maybe_result;
2655 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2656 HeapNumber::cast(result)->set_value(value);
2661 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2663 { MaybeObject* maybe_result = AllocateRawCell();
2664 if (!maybe_result->ToObject(&result)) return maybe_result;
2666 HeapObject::cast(result)->set_map_no_write_barrier(
2667 global_property_cell_map());
2668 JSGlobalPropertyCell::cast(result)->set_value(value);
2673 MaybeObject* Heap::CreateOddball(const char* to_string,
2677 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2678 if (!maybe_result->ToObject(&result)) return maybe_result;
2680 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2684 bool Heap::CreateApiObjects() {
2687 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2688 if (!maybe_obj->ToObject(&obj)) return false;
2690 // Don't use Smi-only elements optimizations for objects with the neander
2691 // map. There are too many cases where element values are set directly with a
2692 // bottleneck to trap the Smi-only -> fast elements transition, and there
2693 // appears to be no benefit for optimize this case.
2694 Map* new_neander_map = Map::cast(obj);
2695 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2696 set_neander_map(new_neander_map);
2698 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2699 if (!maybe_obj->ToObject(&obj)) return false;
2702 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2703 if (!maybe_elements->ToObject(&elements)) return false;
2705 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2706 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2707 set_message_listeners(JSObject::cast(obj));
2713 void Heap::CreateJSEntryStub() {
2715 set_js_entry_code(*stub.GetCode(isolate()));
2719 void Heap::CreateJSConstructEntryStub() {
2720 JSConstructEntryStub stub;
2721 set_js_construct_entry_code(*stub.GetCode(isolate()));
2725 void Heap::CreateFixedStubs() {
2726 // Here we create roots for fixed stubs. They are needed at GC
2727 // for cooking and uncooking (check out frames.cc).
2728 // The eliminates the need for doing dictionary lookup in the
2729 // stub cache for these stubs.
2730 HandleScope scope(isolate());
2731 // gcc-4.4 has problem generating correct code of following snippet:
2732 // { JSEntryStub stub;
2733 // js_entry_code_ = *stub.GetCode();
2735 // { JSConstructEntryStub stub;
2736 // js_construct_entry_code_ = *stub.GetCode();
2738 // To workaround the problem, make separate functions without inlining.
2739 Heap::CreateJSEntryStub();
2740 Heap::CreateJSConstructEntryStub();
2742 // Create stubs that should be there, so we don't unexpectedly have to
2743 // create them if we need them during the creation of another stub.
2744 // Stub creation mixes raw pointers and handles in an unsafe manner so
2745 // we cannot create stubs while we are creating stubs.
2746 CodeStub::GenerateStubsAheadOfTime(isolate());
2750 bool Heap::CreateInitialObjects() {
2753 // The -0 value must be set before NumberFromDouble works.
2754 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2755 if (!maybe_obj->ToObject(&obj)) return false;
2757 set_minus_zero_value(HeapNumber::cast(obj));
2758 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2760 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2761 if (!maybe_obj->ToObject(&obj)) return false;
2763 set_nan_value(HeapNumber::cast(obj));
2765 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2766 if (!maybe_obj->ToObject(&obj)) return false;
2768 set_infinity_value(HeapNumber::cast(obj));
2770 // The hole has not been created yet, but we want to put something
2771 // predictable in the gaps in the string table, so lets make that Smi zero.
2772 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2774 // Allocate initial string table.
2775 { MaybeObject* maybe_obj =
2776 StringTable::Allocate(this, kInitialStringTableSize);
2777 if (!maybe_obj->ToObject(&obj)) return false;
2779 // Don't use set_string_table() due to asserts.
2780 roots_[kStringTableRootIndex] = obj;
2782 // Finish initializing oddballs after creating the string table.
2783 { MaybeObject* maybe_obj =
2784 undefined_value()->Initialize("undefined",
2786 Oddball::kUndefined);
2787 if (!maybe_obj->ToObject(&obj)) return false;
2790 // Initialize the null_value.
2791 { MaybeObject* maybe_obj =
2792 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2793 if (!maybe_obj->ToObject(&obj)) return false;
2796 { MaybeObject* maybe_obj = CreateOddball("true",
2799 if (!maybe_obj->ToObject(&obj)) return false;
2801 set_true_value(Oddball::cast(obj));
2803 { MaybeObject* maybe_obj = CreateOddball("false",
2806 if (!maybe_obj->ToObject(&obj)) return false;
2808 set_false_value(Oddball::cast(obj));
2810 { MaybeObject* maybe_obj = CreateOddball("hole",
2813 if (!maybe_obj->ToObject(&obj)) return false;
2815 set_the_hole_value(Oddball::cast(obj));
2817 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2819 Oddball::kArgumentMarker);
2820 if (!maybe_obj->ToObject(&obj)) return false;
2822 set_arguments_marker(Oddball::cast(obj));
2824 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2827 if (!maybe_obj->ToObject(&obj)) return false;
2829 set_no_interceptor_result_sentinel(obj);
2831 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2834 if (!maybe_obj->ToObject(&obj)) return false;
2836 set_termination_exception(obj);
2838 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2839 { MaybeObject* maybe_obj =
2840 InternalizeUtf8String(constant_string_table[i].contents);
2841 if (!maybe_obj->ToObject(&obj)) return false;
2843 roots_[constant_string_table[i].index] = String::cast(obj);
2846 // Allocate the hidden string which is used to identify the hidden properties
2847 // in JSObjects. The hash code has a special value so that it will not match
2848 // the empty string when searching for the property. It cannot be part of the
2849 // loop above because it needs to be allocated manually with the special
2850 // hash code in place. The hash code for the hidden_string is zero to ensure
2851 // that it will always be at the first entry in property descriptors.
2852 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
2853 OneByteVector("", 0), String::kEmptyStringHash);
2854 if (!maybe_obj->ToObject(&obj)) return false;
2856 hidden_string_ = String::cast(obj);
2858 // Allocate the code_stubs dictionary. The initial size is set to avoid
2859 // expanding the dictionary during bootstrapping.
2860 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
2861 if (!maybe_obj->ToObject(&obj)) return false;
2863 set_code_stubs(UnseededNumberDictionary::cast(obj));
2866 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2867 // is set to avoid expanding the dictionary during bootstrapping.
2868 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
2869 if (!maybe_obj->ToObject(&obj)) return false;
2871 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2873 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2874 if (!maybe_obj->ToObject(&obj)) return false;
2876 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2878 set_instanceof_cache_function(Smi::FromInt(0));
2879 set_instanceof_cache_map(Smi::FromInt(0));
2880 set_instanceof_cache_answer(Smi::FromInt(0));
2884 // Allocate the dictionary of intrinsic function names.
2885 { MaybeObject* maybe_obj =
2886 NameDictionary::Allocate(this, Runtime::kNumFunctions);
2887 if (!maybe_obj->ToObject(&obj)) return false;
2889 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2891 if (!maybe_obj->ToObject(&obj)) return false;
2893 set_intrinsic_function_names(NameDictionary::cast(obj));
2895 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2896 if (!maybe_obj->ToObject(&obj)) return false;
2898 set_number_string_cache(FixedArray::cast(obj));
2900 // Allocate cache for single character one byte strings.
2901 { MaybeObject* maybe_obj =
2902 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
2903 if (!maybe_obj->ToObject(&obj)) return false;
2905 set_single_character_string_cache(FixedArray::cast(obj));
2907 // Allocate cache for string split.
2908 { MaybeObject* maybe_obj = AllocateFixedArray(
2909 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2910 if (!maybe_obj->ToObject(&obj)) return false;
2912 set_string_split_cache(FixedArray::cast(obj));
2914 { MaybeObject* maybe_obj = AllocateFixedArray(
2915 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2916 if (!maybe_obj->ToObject(&obj)) return false;
2918 set_regexp_multiple_cache(FixedArray::cast(obj));
2920 // Allocate cache for external strings pointing to native source code.
2921 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2922 if (!maybe_obj->ToObject(&obj)) return false;
2924 set_natives_source_cache(FixedArray::cast(obj));
2926 // Allocate object to hold object observation state.
2927 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2928 if (!maybe_obj->ToObject(&obj)) return false;
2930 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2931 if (!maybe_obj->ToObject(&obj)) return false;
2933 set_observation_state(JSObject::cast(obj));
2935 // Handling of script id generation is in FACTORY->NewScript.
2936 set_last_script_id(undefined_value());
2938 // Initialize keyed lookup cache.
2939 isolate_->keyed_lookup_cache()->Clear();
2941 // Initialize context slot cache.
2942 isolate_->context_slot_cache()->Clear();
2944 // Initialize descriptor cache.
2945 isolate_->descriptor_lookup_cache()->Clear();
2947 // Initialize compilation cache.
2948 isolate_->compilation_cache()->Clear();
2954 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2955 RootListIndex writable_roots[] = {
2956 kStoreBufferTopRootIndex,
2957 kStackLimitRootIndex,
2958 kNumberStringCacheRootIndex,
2959 kInstanceofCacheFunctionRootIndex,
2960 kInstanceofCacheMapRootIndex,
2961 kInstanceofCacheAnswerRootIndex,
2962 kCodeStubsRootIndex,
2963 kNonMonomorphicCacheRootIndex,
2964 kPolymorphicCodeCacheRootIndex,
2965 kLastScriptIdRootIndex,
2966 kEmptyScriptRootIndex,
2967 kRealStackLimitRootIndex,
2968 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2969 kConstructStubDeoptPCOffsetRootIndex,
2970 kGetterStubDeoptPCOffsetRootIndex,
2971 kSetterStubDeoptPCOffsetRootIndex,
2972 kStringTableRootIndex,
2975 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2976 if (root_index == writable_roots[i])
2983 Object* RegExpResultsCache::Lookup(Heap* heap,
2985 Object* key_pattern,
2986 ResultsCacheType type) {
2988 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2989 if (type == STRING_SPLIT_SUBSTRINGS) {
2990 ASSERT(key_pattern->IsString());
2991 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2992 cache = heap->string_split_cache();
2994 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2995 ASSERT(key_pattern->IsFixedArray());
2996 cache = heap->regexp_multiple_cache();
2999 uint32_t hash = key_string->Hash();
3000 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3001 ~(kArrayEntriesPerCacheEntry - 1));
3002 if (cache->get(index + kStringOffset) == key_string &&
3003 cache->get(index + kPatternOffset) == key_pattern) {
3004 return cache->get(index + kArrayOffset);
3007 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3008 if (cache->get(index + kStringOffset) == key_string &&
3009 cache->get(index + kPatternOffset) == key_pattern) {
3010 return cache->get(index + kArrayOffset);
3012 return Smi::FromInt(0);
3016 void RegExpResultsCache::Enter(Heap* heap,
3018 Object* key_pattern,
3019 FixedArray* value_array,
3020 ResultsCacheType type) {
3022 if (!key_string->IsInternalizedString()) return;
3023 if (type == STRING_SPLIT_SUBSTRINGS) {
3024 ASSERT(key_pattern->IsString());
3025 if (!key_pattern->IsInternalizedString()) return;
3026 cache = heap->string_split_cache();
3028 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3029 ASSERT(key_pattern->IsFixedArray());
3030 cache = heap->regexp_multiple_cache();
3033 uint32_t hash = key_string->Hash();
3034 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3035 ~(kArrayEntriesPerCacheEntry - 1));
3036 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3037 cache->set(index + kStringOffset, key_string);
3038 cache->set(index + kPatternOffset, key_pattern);
3039 cache->set(index + kArrayOffset, value_array);
3042 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3043 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3044 cache->set(index2 + kStringOffset, key_string);
3045 cache->set(index2 + kPatternOffset, key_pattern);
3046 cache->set(index2 + kArrayOffset, value_array);
3048 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3049 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3050 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3051 cache->set(index + kStringOffset, key_string);
3052 cache->set(index + kPatternOffset, key_pattern);
3053 cache->set(index + kArrayOffset, value_array);
3056 // If the array is a reasonably short list of substrings, convert it into a
3057 // list of internalized strings.
3058 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3059 for (int i = 0; i < value_array->length(); i++) {
3060 String* str = String::cast(value_array->get(i));
3061 Object* internalized_str;
3062 MaybeObject* maybe_string = heap->InternalizeString(str);
3063 if (maybe_string->ToObject(&internalized_str)) {
3064 value_array->set(i, internalized_str);
3068 // Convert backing store to a copy-on-write array.
3069 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3073 void RegExpResultsCache::Clear(FixedArray* cache) {
3074 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3075 cache->set(i, Smi::FromInt(0));
3080 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3081 MaybeObject* maybe_obj =
3082 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3087 int Heap::FullSizeNumberStringCacheLength() {
3088 // Compute the size of the number string cache based on the max newspace size.
3089 // The number string cache has a minimum size based on twice the initial cache
3090 // size to ensure that it is bigger after being made 'full size'.
3091 int number_string_cache_size = max_semispace_size_ / 512;
3092 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3093 Min(0x4000, number_string_cache_size));
3094 // There is a string and a number per entry so the length is twice the number
3096 return number_string_cache_size * 2;
3100 void Heap::AllocateFullSizeNumberStringCache() {
3101 // The idea is to have a small number string cache in the snapshot to keep
3102 // boot-time memory usage down. If we expand the number string cache already
3103 // while creating the snapshot then that didn't work out.
3104 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3105 MaybeObject* maybe_obj =
3106 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3108 if (maybe_obj->ToObject(&new_cache)) {
3109 // We don't bother to repopulate the cache with entries from the old cache.
3110 // It will be repopulated soon enough with new strings.
3111 set_number_string_cache(FixedArray::cast(new_cache));
3113 // If allocation fails then we just return without doing anything. It is only
3114 // a cache, so best effort is OK here.
3118 void Heap::FlushNumberStringCache() {
3119 // Flush the number to string cache.
3120 int len = number_string_cache()->length();
3121 for (int i = 0; i < len; i++) {
3122 number_string_cache()->set_undefined(this, i);
3127 static inline int double_get_hash(double d) {
3128 DoubleRepresentation rep(d);
3129 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3133 static inline int smi_get_hash(Smi* smi) {
3134 return smi->value();
3138 Object* Heap::GetNumberStringCache(Object* number) {
3140 int mask = (number_string_cache()->length() >> 1) - 1;
3141 if (number->IsSmi()) {
3142 hash = smi_get_hash(Smi::cast(number)) & mask;
3144 hash = double_get_hash(number->Number()) & mask;
3146 Object* key = number_string_cache()->get(hash * 2);
3147 if (key == number) {
3148 return String::cast(number_string_cache()->get(hash * 2 + 1));
3149 } else if (key->IsHeapNumber() &&
3150 number->IsHeapNumber() &&
3151 key->Number() == number->Number()) {
3152 return String::cast(number_string_cache()->get(hash * 2 + 1));
3154 return undefined_value();
3158 void Heap::SetNumberStringCache(Object* number, String* string) {
3160 int mask = (number_string_cache()->length() >> 1) - 1;
3161 if (number->IsSmi()) {
3162 hash = smi_get_hash(Smi::cast(number)) & mask;
3164 hash = double_get_hash(number->Number()) & mask;
3166 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3167 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3168 // The first time we have a hash collision, we move to the full sized
3169 // number string cache.
3170 AllocateFullSizeNumberStringCache();
3173 number_string_cache()->set(hash * 2, number);
3174 number_string_cache()->set(hash * 2 + 1, string);
3178 MaybeObject* Heap::NumberToString(Object* number,
3179 bool check_number_string_cache) {
3180 isolate_->counters()->number_to_string_runtime()->Increment();
3181 if (check_number_string_cache) {
3182 Object* cached = GetNumberStringCache(number);
3183 if (cached != undefined_value()) {
3189 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3191 if (number->IsSmi()) {
3192 int num = Smi::cast(number)->value();
3193 str = IntToCString(num, buffer);
3195 double num = HeapNumber::cast(number)->value();
3196 str = DoubleToCString(num, buffer);
3200 MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
3201 if (maybe_js_string->ToObject(&js_string)) {
3202 SetNumberStringCache(number, String::cast(js_string));
3204 return maybe_js_string;
3208 MaybeObject* Heap::Uint32ToString(uint32_t value,
3209 bool check_number_string_cache) {
3211 MaybeObject* maybe = NumberFromUint32(value);
3212 if (!maybe->To<Object>(&number)) return maybe;
3213 return NumberToString(number, check_number_string_cache);
3217 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3218 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3222 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3223 ExternalArrayType array_type) {
3224 switch (array_type) {
3225 case kExternalByteArray:
3226 return kExternalByteArrayMapRootIndex;
3227 case kExternalUnsignedByteArray:
3228 return kExternalUnsignedByteArrayMapRootIndex;
3229 case kExternalShortArray:
3230 return kExternalShortArrayMapRootIndex;
3231 case kExternalUnsignedShortArray:
3232 return kExternalUnsignedShortArrayMapRootIndex;
3233 case kExternalIntArray:
3234 return kExternalIntArrayMapRootIndex;
3235 case kExternalUnsignedIntArray:
3236 return kExternalUnsignedIntArrayMapRootIndex;
3237 case kExternalFloatArray:
3238 return kExternalFloatArrayMapRootIndex;
3239 case kExternalDoubleArray:
3240 return kExternalDoubleArrayMapRootIndex;
3241 case kExternalPixelArray:
3242 return kExternalPixelArrayMapRootIndex;
3245 return kUndefinedValueRootIndex;
3250 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3251 // We need to distinguish the minus zero value and this cannot be
3252 // done after conversion to int. Doing this by comparing bit
3253 // patterns is faster than using fpclassify() et al.
3254 static const DoubleRepresentation minus_zero(-0.0);
3256 DoubleRepresentation rep(value);
3257 if (rep.bits == minus_zero.bits) {
3258 return AllocateHeapNumber(-0.0, pretenure);
3261 int int_value = FastD2I(value);
3262 if (value == int_value && Smi::IsValid(int_value)) {
3263 return Smi::FromInt(int_value);
3266 // Materialize the value in the heap.
3267 return AllocateHeapNumber(value, pretenure);
3271 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3272 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3273 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3274 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3276 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3277 if (!maybe_result->To(&result)) return maybe_result;
3278 result->set_foreign_address(address);
3283 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3284 SharedFunctionInfo* share;
3285 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3286 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3288 // Set pointer fields.
3289 share->set_name(name);
3290 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3291 share->set_code(illegal);
3292 share->ClearOptimizedCodeMap();
3293 share->set_scope_info(ScopeInfo::Empty(isolate_));
3294 Code* construct_stub =
3295 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3296 share->set_construct_stub(construct_stub);
3297 share->set_instance_class_name(Object_string());
3298 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3299 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3300 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3301 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3302 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3303 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3304 share->set_ast_node_count(0);
3305 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3306 share->set_counters(0);
3308 // Set integer fields (smi or int, depending on the architecture).
3309 share->set_length(0);
3310 share->set_formal_parameter_count(0);
3311 share->set_expected_nof_properties(0);
3312 share->set_num_literals(0);
3313 share->set_start_position_and_type(0);
3314 share->set_end_position(0);
3315 share->set_function_token_position(0);
3316 // All compiler hints default to false or 0.
3317 share->set_compiler_hints(0);
3318 share->set_this_property_assignments_count(0);
3319 share->set_opt_count(0);
3325 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3330 Object* stack_trace,
3331 Object* stack_frames) {
3333 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3334 if (!maybe_result->ToObject(&result)) return maybe_result;
3336 JSMessageObject* message = JSMessageObject::cast(result);
3337 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3338 message->initialize_elements();
3339 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3340 message->set_type(type);
3341 message->set_arguments(arguments);
3342 message->set_start_position(start_position);
3343 message->set_end_position(end_position);
3344 message->set_script(script);
3345 message->set_stack_trace(stack_trace);
3346 message->set_stack_frames(stack_frames);
3352 // Returns true for a character in a range. Both limits are inclusive.
3353 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3354 // This makes uses of the the unsigned wraparound.
3355 return character - from <= to - from;
3359 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3364 // Numeric strings have a different hash algorithm not known by
3365 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3366 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3367 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3369 // Now we know the length is 2, we might as well make use of that fact
3370 // when building the new string.
3371 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3373 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3375 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3376 if (!maybe_result->ToObject(&result)) return maybe_result;
3378 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3379 dest[0] = static_cast<uint8_t>(c1);
3380 dest[1] = static_cast<uint8_t>(c2);
3384 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3385 if (!maybe_result->ToObject(&result)) return maybe_result;
3387 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3395 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3396 int first_length = first->length();
3397 if (first_length == 0) {
3401 int second_length = second->length();
3402 if (second_length == 0) {
3406 int length = first_length + second_length;
3408 // Optimization for 2-byte strings often used as keys in a decompression
3409 // dictionary. Check whether we already have the string in the string
3410 // table to prevent creation of many unneccesary strings.
3412 uint16_t c1 = first->Get(0);
3413 uint16_t c2 = second->Get(0);
3414 return MakeOrFindTwoCharacterString(this, c1, c2);
3417 bool first_is_one_byte = first->IsOneByteRepresentation();
3418 bool second_is_one_byte = second->IsOneByteRepresentation();
3419 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3420 // Make sure that an out of memory exception is thrown if the length
3421 // of the new cons string is too large.
3422 if (length > String::kMaxLength || length < 0) {
3423 isolate()->context()->mark_out_of_memory();
3424 return Failure::OutOfMemoryException(0x4);
3427 bool is_one_byte_data_in_two_byte_string = false;
3429 // At least one of the strings uses two-byte representation so we
3430 // can't use the fast case code for short ASCII strings below, but
3431 // we can try to save memory if all chars actually fit in ASCII.
3432 is_one_byte_data_in_two_byte_string =
3433 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3434 if (is_one_byte_data_in_two_byte_string) {
3435 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3439 // If the resulting string is small make a flat string.
3440 if (length < ConsString::kMinLength) {
3441 // Note that neither of the two inputs can be a slice because:
3442 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3443 ASSERT(first->IsFlat());
3444 ASSERT(second->IsFlat());
3447 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3448 if (!maybe_result->ToObject(&result)) return maybe_result;
3450 // Copy the characters into the new object.
3451 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3454 if (first->IsExternalString()) {
3455 src = ExternalAsciiString::cast(first)->GetChars();
3457 src = SeqOneByteString::cast(first)->GetChars();
3459 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3460 // Copy second part.
3461 if (second->IsExternalString()) {
3462 src = ExternalAsciiString::cast(second)->GetChars();
3464 src = SeqOneByteString::cast(second)->GetChars();
3466 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3469 if (is_one_byte_data_in_two_byte_string) {
3471 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3472 if (!maybe_result->ToObject(&result)) return maybe_result;
3474 // Copy the characters into the new object.
3475 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3476 String::WriteToFlat(first, dest, 0, first_length);
3477 String::WriteToFlat(second, dest + first_length, 0, second_length);
3478 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3483 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3484 if (!maybe_result->ToObject(&result)) return maybe_result;
3486 // Copy the characters into the new object.
3487 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3488 String::WriteToFlat(first, dest, 0, first_length);
3489 String::WriteToFlat(second, dest + first_length, 0, second_length);
3494 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3495 cons_ascii_string_map() : cons_string_map();
3498 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3499 if (!maybe_result->ToObject(&result)) return maybe_result;
3502 AssertNoAllocation no_gc;
3503 ConsString* cons_string = ConsString::cast(result);
3504 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3505 cons_string->set_length(length);
3506 cons_string->set_hash_field(String::kEmptyHashField);
3507 cons_string->set_first(first, mode);
3508 cons_string->set_second(second, mode);
3513 MaybeObject* Heap::AllocateSubString(String* buffer,
3516 PretenureFlag pretenure) {
3517 int length = end - start;
3519 return empty_string();
3520 } else if (length == 1) {
3521 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3522 } else if (length == 2) {
3523 // Optimization for 2-byte strings often used as keys in a decompression
3524 // dictionary. Check whether we already have the string in the string
3525 // table to prevent creation of many unnecessary strings.
3526 uint16_t c1 = buffer->Get(start);
3527 uint16_t c2 = buffer->Get(start + 1);
3528 return MakeOrFindTwoCharacterString(this, c1, c2);
3531 // Make an attempt to flatten the buffer to reduce access time.
3532 buffer = buffer->TryFlattenGetString();
3534 if (!FLAG_string_slices ||
3535 !buffer->IsFlat() ||
3536 length < SlicedString::kMinLength ||
3537 pretenure == TENURED) {
3539 // WriteToFlat takes care of the case when an indirect string has a
3540 // different encoding from its underlying string. These encodings may
3541 // differ because of externalization.
3542 bool is_one_byte = buffer->IsOneByteRepresentation();
3543 { MaybeObject* maybe_result = is_one_byte
3544 ? AllocateRawOneByteString(length, pretenure)
3545 : AllocateRawTwoByteString(length, pretenure);
3546 if (!maybe_result->ToObject(&result)) return maybe_result;
3548 String* string_result = String::cast(result);
3549 // Copy the characters into the new object.
3551 ASSERT(string_result->IsOneByteRepresentation());
3552 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3553 String::WriteToFlat(buffer, dest, start, end);
3555 ASSERT(string_result->IsTwoByteRepresentation());
3556 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3557 String::WriteToFlat(buffer, dest, start, end);
3562 ASSERT(buffer->IsFlat());
3564 if (FLAG_verify_heap) {
3565 buffer->StringVerify();
3570 // When slicing an indirect string we use its encoding for a newly created
3571 // slice and don't check the encoding of the underlying string. This is safe
3572 // even if the encodings are different because of externalization. If an
3573 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3574 // codes of the underlying string must still fit into ASCII (because
3575 // externalization must not change char codes).
3576 { Map* map = buffer->IsOneByteRepresentation()
3577 ? sliced_ascii_string_map()
3578 : sliced_string_map();
3579 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3580 if (!maybe_result->ToObject(&result)) return maybe_result;
3583 AssertNoAllocation no_gc;
3584 SlicedString* sliced_string = SlicedString::cast(result);
3585 sliced_string->set_length(length);
3586 sliced_string->set_hash_field(String::kEmptyHashField);
3587 if (buffer->IsConsString()) {
3588 ConsString* cons = ConsString::cast(buffer);
3589 ASSERT(cons->second()->length() == 0);
3590 sliced_string->set_parent(cons->first());
3591 sliced_string->set_offset(start);
3592 } else if (buffer->IsSlicedString()) {
3593 // Prevent nesting sliced strings.
3594 SlicedString* parent_slice = SlicedString::cast(buffer);
3595 sliced_string->set_parent(parent_slice->parent());
3596 sliced_string->set_offset(start + parent_slice->offset());
3598 sliced_string->set_parent(buffer);
3599 sliced_string->set_offset(start);
3601 ASSERT(sliced_string->parent()->IsSeqString() ||
3602 sliced_string->parent()->IsExternalString());
3607 MaybeObject* Heap::AllocateExternalStringFromAscii(
3608 const ExternalAsciiString::Resource* resource) {
3609 size_t length = resource->length();
3610 if (length > static_cast<size_t>(String::kMaxLength)) {
3611 isolate()->context()->mark_out_of_memory();
3612 return Failure::OutOfMemoryException(0x5);
3615 Map* map = external_ascii_string_map();
3617 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3618 if (!maybe_result->ToObject(&result)) return maybe_result;
3621 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3622 external_string->set_length(static_cast<int>(length));
3623 external_string->set_hash_field(String::kEmptyHashField);
3624 external_string->set_resource(resource);
3630 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3631 const ExternalTwoByteString::Resource* resource) {
3632 size_t length = resource->length();
3633 if (length > static_cast<size_t>(String::kMaxLength)) {
3634 isolate()->context()->mark_out_of_memory();
3635 return Failure::OutOfMemoryException(0x6);
3638 // For small strings we check whether the resource contains only
3639 // one byte characters. If yes, we use a different string map.
3640 static const size_t kOneByteCheckLengthLimit = 32;
3641 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3642 String::IsOneByte(resource->data(), static_cast<int>(length));
3643 Map* map = is_one_byte ?
3644 external_string_with_one_byte_data_map() : external_string_map();
3646 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3647 if (!maybe_result->ToObject(&result)) return maybe_result;
3650 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3651 external_string->set_length(static_cast<int>(length));
3652 external_string->set_hash_field(String::kEmptyHashField);
3653 external_string->set_resource(resource);
3659 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3660 if (code <= String::kMaxOneByteCharCode) {
3661 Object* value = single_character_string_cache()->get(code);
3662 if (value != undefined_value()) return value;
3665 buffer[0] = static_cast<uint8_t>(code);
3667 MaybeObject* maybe_result =
3668 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3670 if (!maybe_result->ToObject(&result)) return maybe_result;
3671 single_character_string_cache()->set(code, result);
3676 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3677 if (!maybe_result->ToObject(&result)) return maybe_result;
3679 String* answer = String::cast(result);
3680 answer->Set(0, code);
3685 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3686 if (length < 0 || length > ByteArray::kMaxLength) {
3687 return Failure::OutOfMemoryException(0x7);
3689 if (pretenure == NOT_TENURED) {
3690 return AllocateByteArray(length);
3692 int size = ByteArray::SizeFor(length);
3694 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3695 ? old_data_space_->AllocateRaw(size)
3696 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3697 if (!maybe_result->ToObject(&result)) return maybe_result;
3700 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3702 reinterpret_cast<ByteArray*>(result)->set_length(length);
3707 MaybeObject* Heap::AllocateByteArray(int length) {
3708 if (length < 0 || length > ByteArray::kMaxLength) {
3709 return Failure::OutOfMemoryException(0x8);
3711 int size = ByteArray::SizeFor(length);
3712 AllocationSpace space =
3713 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3715 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3716 if (!maybe_result->ToObject(&result)) return maybe_result;
3719 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3721 reinterpret_cast<ByteArray*>(result)->set_length(length);
3726 void Heap::CreateFillerObjectAt(Address addr, int size) {
3727 if (size == 0) return;
3728 HeapObject* filler = HeapObject::FromAddress(addr);
3729 if (size == kPointerSize) {
3730 filler->set_map_no_write_barrier(one_pointer_filler_map());
3731 } else if (size == 2 * kPointerSize) {
3732 filler->set_map_no_write_barrier(two_pointer_filler_map());
3734 filler->set_map_no_write_barrier(free_space_map());
3735 FreeSpace::cast(filler)->set_size(size);
3740 MaybeObject* Heap::AllocateExternalArray(int length,
3741 ExternalArrayType array_type,
3742 void* external_pointer,
3743 PretenureFlag pretenure) {
3744 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3746 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3749 if (!maybe_result->ToObject(&result)) return maybe_result;
3752 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3753 MapForExternalArrayType(array_type));
3754 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3755 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3762 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3764 Handle<Object> self_reference,
3766 bool crankshafted) {
3767 // Allocate ByteArray before the Code object, so that we do not risk
3768 // leaving uninitialized Code object (and breaking the heap).
3769 ByteArray* reloc_info;
3770 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3771 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3774 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3775 int obj_size = Code::SizeFor(body_size);
3776 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3777 MaybeObject* maybe_result;
3778 // Large code objects and code objects which should stay at a fixed address
3779 // are allocated in large object space.
3781 bool force_lo_space = obj_size > code_space()->AreaSize();
3782 if (force_lo_space) {
3783 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3785 maybe_result = code_space_->AllocateRaw(obj_size);
3787 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3789 if (immovable && !force_lo_space &&
3790 // Objects on the first page of each space are never moved.
3791 !code_space_->FirstPage()->Contains(result->address())) {
3792 // Discard the first code allocation, which was on a page where it could be
3794 CreateFillerObjectAt(result->address(), obj_size);
3795 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3796 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3799 // Initialize the object
3800 result->set_map_no_write_barrier(code_map());
3801 Code* code = Code::cast(result);
3802 ASSERT(!isolate_->code_range()->exists() ||
3803 isolate_->code_range()->contains(code->address()));
3804 code->set_instruction_size(desc.instr_size);
3805 code->set_relocation_info(reloc_info);
3806 code->set_flags(flags);
3807 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3808 code->set_check_type(RECEIVER_MAP_CHECK);
3810 code->set_is_crankshafted(crankshafted);
3811 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3812 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3813 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3814 code->set_gc_metadata(Smi::FromInt(0));
3815 code->set_ic_age(global_ic_age_);
3816 code->set_prologue_offset(kPrologueOffsetNotSet);
3817 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3818 code->set_marked_for_deoptimization(false);
3820 // Allow self references to created code object by patching the handle to
3821 // point to the newly allocated Code object.
3822 if (!self_reference.is_null()) {
3823 *(self_reference.location()) = code;
3825 // Migrate generated code.
3826 // The generated code can contain Object** values (typically from handles)
3827 // that are dereferenced during the copy to point directly to the actual heap
3828 // objects. These pointers can include references to the code object itself,
3829 // through the self_reference parameter.
3830 code->CopyFrom(desc);
3833 if (FLAG_verify_heap) {
3841 MaybeObject* Heap::CopyCode(Code* code) {
3842 // Allocate an object the same size as the code object.
3843 int obj_size = code->Size();
3844 MaybeObject* maybe_result;
3845 if (obj_size > code_space()->AreaSize()) {
3846 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3848 maybe_result = code_space_->AllocateRaw(obj_size);
3852 if (!maybe_result->ToObject(&result)) return maybe_result;
3854 // Copy code object.
3855 Address old_addr = code->address();
3856 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3857 CopyBlock(new_addr, old_addr, obj_size);
3858 // Relocate the copy.
3859 Code* new_code = Code::cast(result);
3860 ASSERT(!isolate_->code_range()->exists() ||
3861 isolate_->code_range()->contains(code->address()));
3862 new_code->Relocate(new_addr - old_addr);
3867 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3868 // Allocate ByteArray before the Code object, so that we do not risk
3869 // leaving uninitialized Code object (and breaking the heap).
3870 Object* reloc_info_array;
3871 { MaybeObject* maybe_reloc_info_array =
3872 AllocateByteArray(reloc_info.length(), TENURED);
3873 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3874 return maybe_reloc_info_array;
3878 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3880 int new_obj_size = Code::SizeFor(new_body_size);
3882 Address old_addr = code->address();
3884 size_t relocation_offset =
3885 static_cast<size_t>(code->instruction_end() - old_addr);
3887 MaybeObject* maybe_result;
3888 if (new_obj_size > code_space()->AreaSize()) {
3889 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3891 maybe_result = code_space_->AllocateRaw(new_obj_size);
3895 if (!maybe_result->ToObject(&result)) return maybe_result;
3897 // Copy code object.
3898 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3900 // Copy header and instructions.
3901 CopyBytes(new_addr, old_addr, relocation_offset);
3903 Code* new_code = Code::cast(result);
3904 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3906 // Copy patched rinfo.
3907 CopyBytes(new_code->relocation_start(),
3909 static_cast<size_t>(reloc_info.length()));
3911 // Relocate the copy.
3912 ASSERT(!isolate_->code_range()->exists() ||
3913 isolate_->code_range()->contains(code->address()));
3914 new_code->Relocate(new_addr - old_addr);
3917 if (FLAG_verify_heap) {
3925 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
3926 Handle<Object> allocation_site_info_payload) {
3927 ASSERT(gc_state_ == NOT_IN_GC);
3928 ASSERT(map->instance_type() != MAP_TYPE);
3929 // If allocation failures are disallowed, we may allocate in a different
3930 // space when new space is full and the object is not a large object.
3931 AllocationSpace retry_space =
3932 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3933 int size = map->instance_size() + AllocationSiteInfo::kSize;
3935 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3936 if (!maybe_result->ToObject(&result)) return maybe_result;
3937 // No need for write barrier since object is white and map is in old space.
3938 HeapObject::cast(result)->set_map_no_write_barrier(map);
3939 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
3940 reinterpret_cast<Address>(result) + map->instance_size());
3941 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
3942 alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
3947 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3948 ASSERT(gc_state_ == NOT_IN_GC);
3949 ASSERT(map->instance_type() != MAP_TYPE);
3950 // If allocation failures are disallowed, we may allocate in a different
3951 // space when new space is full and the object is not a large object.
3952 AllocationSpace retry_space =
3953 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3954 int size = map->instance_size();
3956 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3957 if (!maybe_result->ToObject(&result)) return maybe_result;
3958 // No need for write barrier since object is white and map is in old space.
3959 HeapObject::cast(result)->set_map_no_write_barrier(map);
3964 void Heap::InitializeFunction(JSFunction* function,
3965 SharedFunctionInfo* shared,
3966 Object* prototype) {
3967 ASSERT(!prototype->IsMap());
3968 function->initialize_properties();
3969 function->initialize_elements();
3970 function->set_shared(shared);
3971 function->set_code(shared->code());
3972 function->set_prototype_or_initial_map(prototype);
3973 function->set_context(undefined_value());
3974 function->set_literals_or_bindings(empty_fixed_array());
3975 function->set_next_function_link(undefined_value());
3979 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3980 // Make sure to use globals from the function's context, since the function
3981 // can be from a different context.
3982 Context* native_context = function->context()->native_context();
3983 bool needs_constructor_property;
3985 if (function->shared()->is_generator()) {
3986 // Generator prototypes can share maps since they don't have "constructor"
3988 new_map = native_context->generator_object_prototype_map();
3989 needs_constructor_property = false;
3991 // Each function prototype gets a fresh map to avoid unwanted sharing of
3992 // maps between prototypes of different constructors.
3993 JSFunction* object_function = native_context->object_function();
3994 ASSERT(object_function->has_initial_map());
3995 MaybeObject* maybe_map = object_function->initial_map()->Copy();
3996 if (!maybe_map->To(&new_map)) return maybe_map;
3997 needs_constructor_property = true;
4001 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4002 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4004 if (needs_constructor_property) {
4005 MaybeObject* maybe_failure =
4006 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4007 constructor_string(), function, DONT_ENUM);
4008 if (maybe_failure->IsFailure()) return maybe_failure;
4015 MaybeObject* Heap::AllocateFunction(Map* function_map,
4016 SharedFunctionInfo* shared,
4018 PretenureFlag pretenure) {
4019 AllocationSpace space =
4020 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4022 { MaybeObject* maybe_result = Allocate(function_map, space);
4023 if (!maybe_result->ToObject(&result)) return maybe_result;
4025 InitializeFunction(JSFunction::cast(result), shared, prototype);
4030 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4031 // To get fast allocation and map sharing for arguments objects we
4032 // allocate them based on an arguments boilerplate.
4034 JSObject* boilerplate;
4035 int arguments_object_size;
4036 bool strict_mode_callee = callee->IsJSFunction() &&
4037 !JSFunction::cast(callee)->shared()->is_classic_mode();
4038 if (strict_mode_callee) {
4040 isolate()->context()->native_context()->
4041 strict_mode_arguments_boilerplate();
4042 arguments_object_size = kArgumentsObjectSizeStrict;
4045 isolate()->context()->native_context()->arguments_boilerplate();
4046 arguments_object_size = kArgumentsObjectSize;
4049 // This calls Copy directly rather than using Heap::AllocateRaw so we
4050 // duplicate the check here.
4051 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
4053 // Check that the size of the boilerplate matches our
4054 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4055 // on the size being a known constant.
4056 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4058 // Do the allocation.
4060 { MaybeObject* maybe_result =
4061 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4062 if (!maybe_result->ToObject(&result)) return maybe_result;
4065 // Copy the content. The arguments boilerplate doesn't have any
4066 // fields that point to new space so it's safe to skip the write
4068 CopyBlock(HeapObject::cast(result)->address(),
4069 boilerplate->address(),
4070 JSObject::kHeaderSize);
4072 // Set the length property.
4073 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4074 Smi::FromInt(length),
4075 SKIP_WRITE_BARRIER);
4076 // Set the callee property for non-strict mode arguments object only.
4077 if (!strict_mode_callee) {
4078 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4082 // Check the state of the object
4083 ASSERT(JSObject::cast(result)->HasFastProperties());
4084 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4090 static bool HasDuplicates(DescriptorArray* descriptors) {
4091 int count = descriptors->number_of_descriptors();
4093 Name* prev_key = descriptors->GetKey(0);
4094 for (int i = 1; i != count; i++) {
4095 Name* current_key = descriptors->GetKey(i);
4096 if (prev_key == current_key) return true;
4097 prev_key = current_key;
4104 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4105 ASSERT(!fun->has_initial_map());
4107 // First create a new map with the size and number of in-object properties
4108 // suggested by the function.
4109 InstanceType instance_type;
4111 int in_object_properties;
4112 if (fun->shared()->is_generator()) {
4113 instance_type = JS_GENERATOR_OBJECT_TYPE;
4114 instance_size = JSGeneratorObject::kSize;
4115 in_object_properties = 0;
4117 instance_type = JS_OBJECT_TYPE;
4118 instance_size = fun->shared()->CalculateInstanceSize();
4119 in_object_properties = fun->shared()->CalculateInObjectProperties();
4122 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4123 if (!maybe_map->To(&map)) return maybe_map;
4125 // Fetch or allocate prototype.
4127 if (fun->has_instance_prototype()) {
4128 prototype = fun->instance_prototype();
4130 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4131 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4133 map->set_inobject_properties(in_object_properties);
4134 map->set_unused_property_fields(in_object_properties);
4135 map->set_prototype(prototype);
4136 ASSERT(map->has_fast_object_elements());
4138 // If the function has only simple this property assignments add
4139 // field descriptors for these to the initial map as the object
4140 // cannot be constructed without having these properties. Guard by
4141 // the inline_new flag so we only change the map if we generate a
4142 // specialized construct stub.
4143 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4144 if (instance_type == JS_OBJECT_TYPE &&
4145 fun->shared()->CanGenerateInlineConstructor(prototype)) {
4146 int count = fun->shared()->this_property_assignments_count();
4147 if (count > in_object_properties) {
4148 // Inline constructor can only handle inobject properties.
4149 fun->shared()->ForbidInlineConstructor();
4151 DescriptorArray* descriptors;
4152 MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4153 if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4155 DescriptorArray::WhitenessWitness witness(descriptors);
4156 for (int i = 0; i < count; i++) {
4157 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4158 ASSERT(name->IsInternalizedString());
4159 FieldDescriptor field(name, i, NONE, i + 1);
4160 descriptors->Set(i, &field, witness);
4162 descriptors->Sort();
4164 // The descriptors may contain duplicates because the compiler does not
4165 // guarantee the uniqueness of property names (it would have required
4166 // quadratic time). Once the descriptors are sorted we can check for
4167 // duplicates in linear time.
4168 if (HasDuplicates(descriptors)) {
4169 fun->shared()->ForbidInlineConstructor();
4171 map->InitializeDescriptors(descriptors);
4172 map->set_pre_allocated_property_fields(count);
4173 map->set_unused_property_fields(in_object_properties - count);
4178 if (instance_type == JS_OBJECT_TYPE) {
4179 fun->shared()->StartInobjectSlackTracking(map);
4186 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4187 FixedArray* properties,
4189 obj->set_properties(properties);
4190 obj->initialize_elements();
4191 // TODO(1240798): Initialize the object's body using valid initial values
4192 // according to the object's initial map. For example, if the map's
4193 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4194 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4195 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4196 // verification code has to cope with (temporarily) invalid objects. See
4197 // for example, JSArray::JSArrayVerify).
4199 // We cannot always fill with one_pointer_filler_map because objects
4200 // created from API functions expect their internal fields to be initialized
4201 // with undefined_value.
4202 // Pre-allocated fields need to be initialized with undefined_value as well
4203 // so that object accesses before the constructor completes (e.g. in the
4204 // debugger) will not cause a crash.
4205 if (map->constructor()->IsJSFunction() &&
4206 JSFunction::cast(map->constructor())->shared()->
4207 IsInobjectSlackTrackingInProgress()) {
4208 // We might want to shrink the object later.
4209 ASSERT(obj->GetInternalFieldCount() == 0);
4210 filler = Heap::one_pointer_filler_map();
4212 filler = Heap::undefined_value();
4214 obj->InitializeBody(map, Heap::undefined_value(), filler);
4218 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4219 // JSFunctions should be allocated using AllocateFunction to be
4220 // properly initialized.
4221 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4223 // Both types of global objects should be allocated using
4224 // AllocateGlobalObject to be properly initialized.
4225 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4226 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4228 // Allocate the backing storage for the properties.
4230 map->pre_allocated_property_fields() +
4231 map->unused_property_fields() -
4232 map->inobject_properties();
4233 ASSERT(prop_size >= 0);
4235 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4236 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4239 // Allocate the JSObject.
4240 AllocationSpace space =
4241 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4242 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4244 MaybeObject* maybe_obj = Allocate(map, space);
4245 if (!maybe_obj->To(&obj)) return maybe_obj;
4247 // Initialize the JSObject.
4248 InitializeJSObjectFromMap(JSObject::cast(obj),
4249 FixedArray::cast(properties),
4251 ASSERT(JSObject::cast(obj)->HasFastElements());
4256 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4257 Handle<Object> allocation_site_info_payload) {
4258 // JSFunctions should be allocated using AllocateFunction to be
4259 // properly initialized.
4260 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4262 // Both types of global objects should be allocated using
4263 // AllocateGlobalObject to be properly initialized.
4264 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4265 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4267 // Allocate the backing storage for the properties.
4269 map->pre_allocated_property_fields() +
4270 map->unused_property_fields() -
4271 map->inobject_properties();
4272 ASSERT(prop_size >= 0);
4274 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4275 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4278 // Allocate the JSObject.
4279 AllocationSpace space = NEW_SPACE;
4280 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4282 MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4283 allocation_site_info_payload);
4284 if (!maybe_obj->To(&obj)) return maybe_obj;
4286 // Initialize the JSObject.
4287 InitializeJSObjectFromMap(JSObject::cast(obj),
4288 FixedArray::cast(properties),
4290 ASSERT(JSObject::cast(obj)->HasFastElements());
4295 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4296 PretenureFlag pretenure) {
4297 // Allocate the initial map if absent.
4298 if (!constructor->has_initial_map()) {
4299 Object* initial_map;
4300 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4301 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4303 constructor->set_initial_map(Map::cast(initial_map));
4304 Map::cast(initial_map)->set_constructor(constructor);
4306 // Allocate the object based on the constructors initial map.
4307 MaybeObject* result = AllocateJSObjectFromMap(
4308 constructor->initial_map(), pretenure);
4310 // Make sure result is NOT a global object if valid.
4311 Object* non_failure;
4312 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4318 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4319 Handle<Object> allocation_site_info_payload) {
4320 // Allocate the initial map if absent.
4321 if (!constructor->has_initial_map()) {
4322 Object* initial_map;
4323 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4324 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4326 constructor->set_initial_map(Map::cast(initial_map));
4327 Map::cast(initial_map)->set_constructor(constructor);
4329 // Allocate the object based on the constructors initial map, or the payload
4331 Map* initial_map = constructor->initial_map();
4333 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4334 *allocation_site_info_payload);
4335 Smi* smi = Smi::cast(cell->value());
4336 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4337 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4338 if (to_kind != initial_map->elements_kind()) {
4339 MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
4340 isolate(), to_kind);
4341 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4342 // Possibly alter the mode, since we found an updated elements kind
4343 // in the type info cell.
4344 mode = AllocationSiteInfo::GetMode(to_kind);
4347 MaybeObject* result;
4348 if (mode == TRACK_ALLOCATION_SITE) {
4349 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4350 allocation_site_info_payload);
4352 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4355 // Make sure result is NOT a global object if valid.
4356 Object* non_failure;
4357 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4363 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4364 ASSERT(function->shared()->is_generator());
4366 if (function->has_initial_map()) {
4367 map = function->initial_map();
4369 // Allocate the initial map if absent.
4370 MaybeObject* maybe_map = AllocateInitialMap(function);
4371 if (!maybe_map->To(&map)) return maybe_map;
4372 function->set_initial_map(map);
4373 map->set_constructor(function);
4375 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4376 return AllocateJSObjectFromMap(map);
4380 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4381 // Allocate a fresh map. Modules do not have a prototype.
4383 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4384 if (!maybe_map->To(&map)) return maybe_map;
4385 // Allocate the object based on the map.
4387 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4388 if (!maybe_module->To(&module)) return maybe_module;
4389 module->set_context(context);
4390 module->set_scope_info(scope_info);
4395 MaybeObject* Heap::AllocateJSArrayAndStorage(
4396 ElementsKind elements_kind,
4399 ArrayStorageAllocationMode mode,
4400 PretenureFlag pretenure) {
4401 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4403 if (!maybe_array->To(&array)) return maybe_array;
4405 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4406 // for performance reasons.
4407 ASSERT(capacity >= length);
4409 if (capacity == 0) {
4410 array->set_length(Smi::FromInt(0));
4411 array->set_elements(empty_fixed_array());
4415 FixedArrayBase* elms;
4416 MaybeObject* maybe_elms = NULL;
4417 if (IsFastDoubleElementsKind(elements_kind)) {
4418 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4419 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4421 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4422 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4425 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4426 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4427 maybe_elms = AllocateUninitializedFixedArray(capacity);
4429 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4430 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4433 if (!maybe_elms->To(&elms)) return maybe_elms;
4435 array->set_elements(elms);
4436 array->set_length(Smi::FromInt(length));
4441 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4442 ElementsKind elements_kind,
4445 Handle<Object> allocation_site_payload,
4446 ArrayStorageAllocationMode mode) {
4447 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4448 allocation_site_payload);
4450 if (!maybe_array->To(&array)) return maybe_array;
4451 return AllocateJSArrayStorage(array, length, capacity, mode);
4455 MaybeObject* Heap::AllocateJSArrayStorage(
4459 ArrayStorageAllocationMode mode) {
4460 ASSERT(capacity >= length);
4462 if (capacity == 0) {
4463 array->set_length(Smi::FromInt(0));
4464 array->set_elements(empty_fixed_array());
4468 FixedArrayBase* elms;
4469 MaybeObject* maybe_elms = NULL;
4470 ElementsKind elements_kind = array->GetElementsKind();
4471 if (IsFastDoubleElementsKind(elements_kind)) {
4472 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4473 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4475 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4476 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4479 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4480 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4481 maybe_elms = AllocateUninitializedFixedArray(capacity);
4483 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4484 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4487 if (!maybe_elms->To(&elms)) return maybe_elms;
4489 array->set_elements(elms);
4490 array->set_length(Smi::FromInt(length));
4495 MaybeObject* Heap::AllocateJSArrayWithElements(
4496 FixedArrayBase* elements,
4497 ElementsKind elements_kind,
4499 PretenureFlag pretenure) {
4500 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4502 if (!maybe_array->To(&array)) return maybe_array;
4504 array->set_elements(elements);
4505 array->set_length(Smi::FromInt(length));
4506 array->ValidateElements();
4511 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4513 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4514 // maps. Will probably depend on the identity of the handler object, too.
4516 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4517 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4518 map->set_prototype(prototype);
4520 // Allocate the proxy object.
4522 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4523 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4524 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4525 result->set_handler(handler);
4526 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4531 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4533 Object* construct_trap,
4534 Object* prototype) {
4536 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4537 // maps. Will probably depend on the identity of the handler object, too.
4539 MaybeObject* maybe_map_obj =
4540 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4541 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4542 map->set_prototype(prototype);
4544 // Allocate the proxy object.
4545 JSFunctionProxy* result;
4546 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4547 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4548 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4549 result->set_handler(handler);
4550 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4551 result->set_call_trap(call_trap);
4552 result->set_construct_trap(construct_trap);
4557 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4558 ASSERT(constructor->has_initial_map());
4559 Map* map = constructor->initial_map();
4560 ASSERT(map->is_dictionary_map());
4562 // Make sure no field properties are described in the initial map.
4563 // This guarantees us that normalizing the properties does not
4564 // require us to change property values to JSGlobalPropertyCells.
4565 ASSERT(map->NextFreePropertyIndex() == 0);
4567 // Make sure we don't have a ton of pre-allocated slots in the
4568 // global objects. They will be unused once we normalize the object.
4569 ASSERT(map->unused_property_fields() == 0);
4570 ASSERT(map->inobject_properties() == 0);
4572 // Initial size of the backing store to avoid resize of the storage during
4573 // bootstrapping. The size differs between the JS global object ad the
4575 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4577 // Allocate a dictionary object for backing storage.
4578 NameDictionary* dictionary;
4579 MaybeObject* maybe_dictionary =
4580 NameDictionary::Allocate(
4582 map->NumberOfOwnDescriptors() * 2 + initial_size);
4583 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4585 // The global object might be created from an object template with accessors.
4586 // Fill these accessors into the dictionary.
4587 DescriptorArray* descs = map->instance_descriptors();
4588 for (int i = 0; i < descs->number_of_descriptors(); i++) {
4589 PropertyDetails details = descs->GetDetails(i);
4590 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4591 PropertyDetails d = PropertyDetails(details.attributes(),
4593 details.descriptor_index());
4594 Object* value = descs->GetCallbacksObject(i);
4595 MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4596 if (!maybe_value->ToObject(&value)) return maybe_value;
4598 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4599 if (!maybe_added->To(&dictionary)) return maybe_added;
4602 // Allocate the global object and initialize it with the backing store.
4604 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4605 if (!maybe_global->To(&global)) return maybe_global;
4607 InitializeJSObjectFromMap(global, dictionary, map);
4609 // Create a new map for the global object.
4611 MaybeObject* maybe_map = map->CopyDropDescriptors();
4612 if (!maybe_map->To(&new_map)) return maybe_map;
4613 new_map->set_dictionary_map(true);
4615 // Set up the global object as a normalized object.
4616 global->set_map(new_map);
4617 global->set_properties(dictionary);
4619 // Make sure result is a global object with properties in dictionary.
4620 ASSERT(global->IsGlobalObject());
4621 ASSERT(!global->HasFastProperties());
4626 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4627 // Never used to copy functions. If functions need to be copied we
4628 // have to be careful to clear the literals array.
4629 SLOW_ASSERT(!source->IsJSFunction());
4632 Map* map = source->map();
4633 int object_size = map->instance_size();
4636 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4638 // If we're forced to always allocate, we use the general allocation
4639 // functions which may leave us with an object in old space.
4640 if (always_allocate()) {
4641 { MaybeObject* maybe_clone =
4642 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4643 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4645 Address clone_address = HeapObject::cast(clone)->address();
4646 CopyBlock(clone_address,
4649 // Update write barrier for all fields that lie beyond the header.
4650 RecordWrites(clone_address,
4651 JSObject::kHeaderSize,
4652 (object_size - JSObject::kHeaderSize) / kPointerSize);
4654 wb_mode = SKIP_WRITE_BARRIER;
4656 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4657 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4659 SLOW_ASSERT(InNewSpace(clone));
4660 // Since we know the clone is allocated in new space, we can copy
4661 // the contents without worrying about updating the write barrier.
4662 CopyBlock(HeapObject::cast(clone)->address(),
4668 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4669 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4670 FixedArray* properties = FixedArray::cast(source->properties());
4671 // Update elements if necessary.
4672 if (elements->length() > 0) {
4674 { MaybeObject* maybe_elem;
4675 if (elements->map() == fixed_cow_array_map()) {
4676 maybe_elem = FixedArray::cast(elements);
4677 } else if (source->HasFastDoubleElements()) {
4678 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4680 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4682 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4684 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4686 // Update properties if necessary.
4687 if (properties->length() > 0) {
4689 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4690 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4692 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4694 // Return the new clone.
4699 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4700 // Never used to copy functions. If functions need to be copied we
4701 // have to be careful to clear the literals array.
4702 SLOW_ASSERT(!source->IsJSFunction());
4705 Map* map = source->map();
4706 int object_size = map->instance_size();
4709 ASSERT(map->CanTrackAllocationSite());
4710 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4711 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4713 // If we're forced to always allocate, we use the general allocation
4714 // functions which may leave us with an object in old space.
4715 int adjusted_object_size = object_size;
4716 if (always_allocate()) {
4717 // We'll only track origin if we are certain to allocate in new space
4718 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4719 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4720 adjusted_object_size += AllocationSiteInfo::kSize;
4723 { MaybeObject* maybe_clone =
4724 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4725 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4727 Address clone_address = HeapObject::cast(clone)->address();
4728 CopyBlock(clone_address,
4731 // Update write barrier for all fields that lie beyond the header.
4732 int write_barrier_offset = adjusted_object_size > object_size
4733 ? JSArray::kSize + AllocationSiteInfo::kSize
4734 : JSObject::kHeaderSize;
4735 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4736 RecordWrites(clone_address,
4737 write_barrier_offset,
4738 (object_size - write_barrier_offset) / kPointerSize);
4741 // Track allocation site information, if we failed to allocate it inline.
4742 if (InNewSpace(clone) &&
4743 adjusted_object_size == object_size) {
4744 MaybeObject* maybe_alloc_info =
4745 AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4746 AllocationSiteInfo* alloc_info;
4747 if (maybe_alloc_info->To(&alloc_info)) {
4748 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4749 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4753 wb_mode = SKIP_WRITE_BARRIER;
4754 adjusted_object_size += AllocationSiteInfo::kSize;
4756 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4757 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4759 SLOW_ASSERT(InNewSpace(clone));
4760 // Since we know the clone is allocated in new space, we can copy
4761 // the contents without worrying about updating the write barrier.
4762 CopyBlock(HeapObject::cast(clone)->address(),
4767 if (adjusted_object_size > object_size) {
4768 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4769 reinterpret_cast<Address>(clone) + object_size);
4770 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4771 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4775 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4776 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4777 FixedArray* properties = FixedArray::cast(source->properties());
4778 // Update elements if necessary.
4779 if (elements->length() > 0) {
4781 { MaybeObject* maybe_elem;
4782 if (elements->map() == fixed_cow_array_map()) {
4783 maybe_elem = FixedArray::cast(elements);
4784 } else if (source->HasFastDoubleElements()) {
4785 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4787 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4789 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4791 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4793 // Update properties if necessary.
4794 if (properties->length() > 0) {
4796 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4797 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4799 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4801 // Return the new clone.
4806 MaybeObject* Heap::ReinitializeJSReceiver(
4807 JSReceiver* object, InstanceType type, int size) {
4808 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4810 // Allocate fresh map.
4811 // TODO(rossberg): Once we optimize proxies, cache these maps.
4813 MaybeObject* maybe = AllocateMap(type, size);
4814 if (!maybe->To<Map>(&map)) return maybe;
4816 // Check that the receiver has at least the size of the fresh object.
4817 int size_difference = object->map()->instance_size() - map->instance_size();
4818 ASSERT(size_difference >= 0);
4820 map->set_prototype(object->map()->prototype());
4822 // Allocate the backing storage for the properties.
4823 int prop_size = map->unused_property_fields() - map->inobject_properties();
4825 maybe = AllocateFixedArray(prop_size, TENURED);
4826 if (!maybe->ToObject(&properties)) return maybe;
4828 // Functions require some allocation, which might fail here.
4829 SharedFunctionInfo* shared = NULL;
4830 if (type == JS_FUNCTION_TYPE) {
4833 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4834 if (!maybe->To<String>(&name)) return maybe;
4835 maybe = AllocateSharedFunctionInfo(name);
4836 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4839 // Because of possible retries of this function after failure,
4840 // we must NOT fail after this point, where we have changed the type!
4842 // Reset the map for the object.
4843 object->set_map(map);
4844 JSObject* jsobj = JSObject::cast(object);
4846 // Reinitialize the object from the constructor map.
4847 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4849 // Functions require some minimal initialization.
4850 if (type == JS_FUNCTION_TYPE) {
4851 map->set_function_with_prototype(true);
4852 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4853 JSFunction::cast(object)->set_context(
4854 isolate()->context()->native_context());
4857 // Put in filler if the new object is smaller than the old.
4858 if (size_difference > 0) {
4859 CreateFillerObjectAt(
4860 object->address() + map->instance_size(), size_difference);
4867 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4868 JSGlobalProxy* object) {
4869 ASSERT(constructor->has_initial_map());
4870 Map* map = constructor->initial_map();
4872 // Check that the already allocated object has the same size and type as
4873 // objects allocated using the constructor.
4874 ASSERT(map->instance_size() == object->map()->instance_size());
4875 ASSERT(map->instance_type() == object->map()->instance_type());
4877 // Allocate the backing storage for the properties.
4878 int prop_size = map->unused_property_fields() - map->inobject_properties();
4880 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4881 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4884 // Reset the map for the object.
4885 object->set_map(constructor->initial_map());
4887 // Reinitialize the object from the constructor map.
4888 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4893 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4894 PretenureFlag pretenure) {
4895 int length = string.length();
4897 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4900 { MaybeObject* maybe_result =
4901 AllocateRawOneByteString(string.length(), pretenure);
4902 if (!maybe_result->ToObject(&result)) return maybe_result;
4905 // Copy the characters into the new object.
4906 CopyChars(SeqOneByteString::cast(result)->GetChars(),
4913 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4914 int non_ascii_start,
4915 PretenureFlag pretenure) {
4916 // Continue counting the number of characters in the UTF-8 string, starting
4917 // from the first non-ascii character or word.
4918 Access<UnicodeCache::Utf8Decoder>
4919 decoder(isolate_->unicode_cache()->utf8_decoder());
4920 decoder->Reset(string.start() + non_ascii_start,
4921 string.length() - non_ascii_start);
4922 int utf16_length = decoder->Utf16Length();
4923 ASSERT(utf16_length > 0);
4927 int chars = non_ascii_start + utf16_length;
4928 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4929 if (!maybe_result->ToObject(&result)) return maybe_result;
4931 // Convert and copy the characters into the new object.
4932 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4933 // Copy ascii portion.
4934 uint16_t* data = twobyte->GetChars();
4935 if (non_ascii_start != 0) {
4936 const char* ascii_data = string.start();
4937 for (int i = 0; i < non_ascii_start; i++) {
4938 *data++ = *ascii_data++;
4941 // Now write the remainder.
4942 decoder->WriteUtf16(data, utf16_length);
4947 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4948 PretenureFlag pretenure) {
4949 // Check if the string is an ASCII string.
4951 int length = string.length();
4952 const uc16* start = string.start();
4954 if (String::IsOneByte(start, length)) {
4955 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4956 if (!maybe_result->ToObject(&result)) return maybe_result;
4957 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4958 } else { // It's not a one byte string.
4959 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4960 if (!maybe_result->ToObject(&result)) return maybe_result;
4961 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4967 Map* Heap::InternalizedStringMapForString(String* string) {
4968 // If the string is in new space it cannot be used as internalized.
4969 if (InNewSpace(string)) return NULL;
4971 // Find the corresponding internalized string map for strings.
4972 switch (string->map()->instance_type()) {
4973 case STRING_TYPE: return internalized_string_map();
4974 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4975 case CONS_STRING_TYPE: return cons_internalized_string_map();
4976 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4977 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4978 case EXTERNAL_ASCII_STRING_TYPE:
4979 return external_ascii_internalized_string_map();
4980 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4981 return external_internalized_string_with_one_byte_data_map();
4982 case SHORT_EXTERNAL_STRING_TYPE:
4983 return short_external_internalized_string_map();
4984 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4985 return short_external_ascii_internalized_string_map();
4986 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4987 return short_external_internalized_string_with_one_byte_data_map();
4988 default: return NULL; // No match found.
4993 static inline void WriteOneByteData(Vector<const char> vector,
4996 // Only works for ascii.
4997 ASSERT(vector.length() == len);
4998 OS::MemCopy(chars, vector.start(), len);
5001 static inline void WriteTwoByteData(Vector<const char> vector,
5004 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5005 unsigned stream_length = vector.length();
5006 while (stream_length != 0) {
5007 unsigned consumed = 0;
5008 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5009 ASSERT(c != unibrow::Utf8::kBadChar);
5010 ASSERT(consumed <= stream_length);
5011 stream_length -= consumed;
5013 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5016 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5017 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5024 ASSERT(stream_length == 0);
5029 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5030 ASSERT(s->length() == len);
5031 String::WriteToFlat(s, chars, 0, len);
5034 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5035 ASSERT(s->length() == len);
5036 String::WriteToFlat(s, chars, 0, len);
5040 template<bool is_one_byte, typename T>
5041 MaybeObject* Heap::AllocateInternalizedStringImpl(
5042 T t, int chars, uint32_t hash_field) {
5044 // Compute map and object size.
5049 if (chars > SeqOneByteString::kMaxLength) {
5050 return Failure::OutOfMemoryException(0x9);
5052 map = ascii_internalized_string_map();
5053 size = SeqOneByteString::SizeFor(chars);
5055 if (chars > SeqTwoByteString::kMaxLength) {
5056 return Failure::OutOfMemoryException(0xa);
5058 map = internalized_string_map();
5059 size = SeqTwoByteString::SizeFor(chars);
5064 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5065 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5066 : old_data_space_->AllocateRaw(size);
5067 if (!maybe_result->ToObject(&result)) return maybe_result;
5070 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5071 // Set length and hash fields of the allocated string.
5072 String* answer = String::cast(result);
5073 answer->set_length(chars);
5074 answer->set_hash_field(hash_field);
5076 ASSERT_EQ(size, answer->Size());
5079 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5081 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5087 // Need explicit instantiations.
5089 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5091 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5092 String*, int, uint32_t);
5094 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5095 Vector<const char>, int, uint32_t);
5098 MaybeObject* Heap::AllocateRawOneByteString(int length,
5099 PretenureFlag pretenure) {
5100 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5101 return Failure::OutOfMemoryException(0xb);
5104 int size = SeqOneByteString::SizeFor(length);
5105 ASSERT(size <= SeqOneByteString::kMaxSize);
5107 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5108 AllocationSpace retry_space = OLD_DATA_SPACE;
5110 if (space == NEW_SPACE) {
5111 if (size > kMaxObjectSizeInNewSpace) {
5112 // Allocate in large object space, retry space will be ignored.
5114 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5115 // Allocate in new space, retry in large object space.
5116 retry_space = LO_SPACE;
5118 } else if (space == OLD_DATA_SPACE &&
5119 size > Page::kMaxNonCodeHeapObjectSize) {
5123 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5124 if (!maybe_result->ToObject(&result)) return maybe_result;
5127 // Partially initialize the object.
5128 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5129 String::cast(result)->set_length(length);
5130 String::cast(result)->set_hash_field(String::kEmptyHashField);
5131 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5137 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5138 PretenureFlag pretenure) {
5139 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5140 return Failure::OutOfMemoryException(0xc);
5142 int size = SeqTwoByteString::SizeFor(length);
5143 ASSERT(size <= SeqTwoByteString::kMaxSize);
5144 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5145 AllocationSpace retry_space = OLD_DATA_SPACE;
5147 if (space == NEW_SPACE) {
5148 if (size > kMaxObjectSizeInNewSpace) {
5149 // Allocate in large object space, retry space will be ignored.
5151 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5152 // Allocate in new space, retry in large object space.
5153 retry_space = LO_SPACE;
5155 } else if (space == OLD_DATA_SPACE &&
5156 size > Page::kMaxNonCodeHeapObjectSize) {
5160 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5161 if (!maybe_result->ToObject(&result)) return maybe_result;
5164 // Partially initialize the object.
5165 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5166 String::cast(result)->set_length(length);
5167 String::cast(result)->set_hash_field(String::kEmptyHashField);
5168 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5173 MaybeObject* Heap::AllocateJSArray(
5174 ElementsKind elements_kind,
5175 PretenureFlag pretenure) {
5176 Context* native_context = isolate()->context()->native_context();
5177 JSFunction* array_function = native_context->array_function();
5178 Map* map = array_function->initial_map();
5179 Object* maybe_map_array = native_context->js_array_maps();
5180 if (!maybe_map_array->IsUndefined()) {
5181 Object* maybe_transitioned_map =
5182 FixedArray::cast(maybe_map_array)->get(elements_kind);
5183 if (!maybe_transitioned_map->IsUndefined()) {
5184 map = Map::cast(maybe_transitioned_map);
5188 return AllocateJSObjectFromMap(map, pretenure);
5192 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5193 ElementsKind elements_kind,
5194 Handle<Object> allocation_site_info_payload) {
5195 Context* native_context = isolate()->context()->native_context();
5196 JSFunction* array_function = native_context->array_function();
5197 Map* map = array_function->initial_map();
5198 Object* maybe_map_array = native_context->js_array_maps();
5199 if (!maybe_map_array->IsUndefined()) {
5200 Object* maybe_transitioned_map =
5201 FixedArray::cast(maybe_map_array)->get(elements_kind);
5202 if (!maybe_transitioned_map->IsUndefined()) {
5203 map = Map::cast(maybe_transitioned_map);
5206 return AllocateJSObjectFromMapWithAllocationSite(map,
5207 allocation_site_info_payload);
5211 MaybeObject* Heap::AllocateEmptyFixedArray() {
5212 int size = FixedArray::SizeFor(0);
5214 { MaybeObject* maybe_result =
5215 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5216 if (!maybe_result->ToObject(&result)) return maybe_result;
5218 // Initialize the object.
5219 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5221 reinterpret_cast<FixedArray*>(result)->set_length(0);
5226 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5227 if (length < 0 || length > FixedArray::kMaxLength) {
5228 return Failure::OutOfMemoryException(0xd);
5231 // Use the general function if we're forced to always allocate.
5232 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5233 // Allocate the raw data for a fixed array.
5234 int size = FixedArray::SizeFor(length);
5235 return size <= kMaxObjectSizeInNewSpace
5236 ? new_space_.AllocateRaw(size)
5237 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5241 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5242 int len = src->length();
5244 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5245 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5247 if (InNewSpace(obj)) {
5248 HeapObject* dst = HeapObject::cast(obj);
5249 dst->set_map_no_write_barrier(map);
5250 CopyBlock(dst->address() + kPointerSize,
5251 src->address() + kPointerSize,
5252 FixedArray::SizeFor(len) - kPointerSize);
5255 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5256 FixedArray* result = FixedArray::cast(obj);
5257 result->set_length(len);
5260 AssertNoAllocation no_gc;
5261 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5262 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5267 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5269 int len = src->length();
5271 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5272 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5274 HeapObject* dst = HeapObject::cast(obj);
5275 dst->set_map_no_write_barrier(map);
5277 dst->address() + FixedDoubleArray::kLengthOffset,
5278 src->address() + FixedDoubleArray::kLengthOffset,
5279 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5284 MaybeObject* Heap::AllocateFixedArray(int length) {
5285 ASSERT(length >= 0);
5286 if (length == 0) return empty_fixed_array();
5288 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5289 if (!maybe_result->ToObject(&result)) return maybe_result;
5291 // Initialize header.
5292 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5293 array->set_map_no_write_barrier(fixed_array_map());
5294 array->set_length(length);
5296 ASSERT(!InNewSpace(undefined_value()));
5297 MemsetPointer(array->data_start(), undefined_value(), length);
5302 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5303 if (length < 0 || length > FixedArray::kMaxLength) {
5304 return Failure::OutOfMemoryException(0xe);
5307 AllocationSpace space =
5308 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5309 int size = FixedArray::SizeFor(length);
5310 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5311 // Too big for new space.
5313 } else if (space == OLD_POINTER_SPACE &&
5314 size > Page::kMaxNonCodeHeapObjectSize) {
5315 // Too big for old pointer space.
5319 AllocationSpace retry_space =
5320 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5322 return AllocateRaw(size, space, retry_space);
5326 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5329 PretenureFlag pretenure,
5331 ASSERT(length >= 0);
5332 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5333 if (length == 0) return heap->empty_fixed_array();
5335 ASSERT(!heap->InNewSpace(filler));
5337 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5338 if (!maybe_result->ToObject(&result)) return maybe_result;
5341 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5342 FixedArray* array = FixedArray::cast(result);
5343 array->set_length(length);
5344 MemsetPointer(array->data_start(), filler, length);
5349 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5350 return AllocateFixedArrayWithFiller(this,
5357 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5358 PretenureFlag pretenure) {
5359 return AllocateFixedArrayWithFiller(this,
5366 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5367 if (length == 0) return empty_fixed_array();
5370 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5371 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5374 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5376 FixedArray::cast(obj)->set_length(length);
5381 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5382 int size = FixedDoubleArray::SizeFor(0);
5384 { MaybeObject* maybe_result =
5385 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5386 if (!maybe_result->ToObject(&result)) return maybe_result;
5388 // Initialize the object.
5389 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5390 fixed_double_array_map());
5391 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5396 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5398 PretenureFlag pretenure) {
5399 if (length == 0) return empty_fixed_array();
5401 Object* elements_object;
5402 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5403 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5404 FixedDoubleArray* elements =
5405 reinterpret_cast<FixedDoubleArray*>(elements_object);
5407 elements->set_map_no_write_barrier(fixed_double_array_map());
5408 elements->set_length(length);
5413 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5415 PretenureFlag pretenure) {
5416 if (length == 0) return empty_fixed_array();
5418 Object* elements_object;
5419 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5420 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5421 FixedDoubleArray* elements =
5422 reinterpret_cast<FixedDoubleArray*>(elements_object);
5424 for (int i = 0; i < length; ++i) {
5425 elements->set_the_hole(i);
5428 elements->set_map_no_write_barrier(fixed_double_array_map());
5429 elements->set_length(length);
5434 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5435 PretenureFlag pretenure) {
5436 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5437 return Failure::OutOfMemoryException(0xf);
5440 AllocationSpace space =
5441 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5442 int size = FixedDoubleArray::SizeFor(length);
5444 #ifndef V8_HOST_ARCH_64_BIT
5445 size += kPointerSize;
5448 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5449 // Too big for new space.
5451 } else if (space == OLD_DATA_SPACE &&
5452 size > Page::kMaxNonCodeHeapObjectSize) {
5453 // Too big for old data space.
5457 AllocationSpace retry_space =
5458 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5461 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5462 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5465 return EnsureDoubleAligned(this, object, size);
5469 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5471 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5472 if (!maybe_result->ToObject(&result)) return maybe_result;
5474 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5476 ASSERT(result->IsHashTable());
5481 MaybeObject* Heap::AllocateSymbol() {
5482 // Statically ensure that it is safe to allocate symbols in paged spaces.
5483 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5486 MaybeObject* maybe =
5487 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5488 if (!maybe->ToObject(&result)) return maybe;
5490 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5492 // Generate a random hash value.
5496 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5498 } while (hash == 0 && attempts < 30);
5499 if (hash == 0) hash = 1; // never return 0
5501 Symbol::cast(result)->set_hash_field(
5502 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5503 Symbol::cast(result)->set_name(undefined_value());
5505 ASSERT(result->IsSymbol());
5510 MaybeObject* Heap::AllocateNativeContext() {
5512 { MaybeObject* maybe_result =
5513 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5514 if (!maybe_result->ToObject(&result)) return maybe_result;
5516 Context* context = reinterpret_cast<Context*>(result);
5517 context->set_map_no_write_barrier(native_context_map());
5518 context->set_js_array_maps(undefined_value());
5519 ASSERT(context->IsNativeContext());
5520 ASSERT(result->IsContext());
5525 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5526 ScopeInfo* scope_info) {
5528 { MaybeObject* maybe_result =
5529 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5530 if (!maybe_result->ToObject(&result)) return maybe_result;
5532 Context* context = reinterpret_cast<Context*>(result);
5533 context->set_map_no_write_barrier(global_context_map());
5534 context->set_closure(function);
5535 context->set_previous(function->context());
5536 context->set_extension(scope_info);
5537 context->set_global_object(function->context()->global_object());
5538 ASSERT(context->IsGlobalContext());
5539 ASSERT(result->IsContext());
5544 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5546 { MaybeObject* maybe_result =
5547 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5548 if (!maybe_result->ToObject(&result)) return maybe_result;
5550 Context* context = reinterpret_cast<Context*>(result);
5551 context->set_map_no_write_barrier(module_context_map());
5552 // Instance link will be set later.
5553 context->set_extension(Smi::FromInt(0));
5558 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5559 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5561 { MaybeObject* maybe_result = AllocateFixedArray(length);
5562 if (!maybe_result->ToObject(&result)) return maybe_result;
5564 Context* context = reinterpret_cast<Context*>(result);
5565 context->set_map_no_write_barrier(function_context_map());
5566 context->set_closure(function);
5567 context->set_previous(function->context());
5568 context->set_extension(Smi::FromInt(0));
5569 context->set_global_object(function->context()->global_object());
5574 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5577 Object* thrown_object) {
5578 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5580 { MaybeObject* maybe_result =
5581 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5582 if (!maybe_result->ToObject(&result)) return maybe_result;
5584 Context* context = reinterpret_cast<Context*>(result);
5585 context->set_map_no_write_barrier(catch_context_map());
5586 context->set_closure(function);
5587 context->set_previous(previous);
5588 context->set_extension(name);
5589 context->set_global_object(previous->global_object());
5590 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5595 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5597 JSObject* extension) {
5599 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5600 if (!maybe_result->ToObject(&result)) return maybe_result;
5602 Context* context = reinterpret_cast<Context*>(result);
5603 context->set_map_no_write_barrier(with_context_map());
5604 context->set_closure(function);
5605 context->set_previous(previous);
5606 context->set_extension(extension);
5607 context->set_global_object(previous->global_object());
5612 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5614 ScopeInfo* scope_info) {
5616 { MaybeObject* maybe_result =
5617 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5618 if (!maybe_result->ToObject(&result)) return maybe_result;
5620 Context* context = reinterpret_cast<Context*>(result);
5621 context->set_map_no_write_barrier(block_context_map());
5622 context->set_closure(function);
5623 context->set_previous(previous);
5624 context->set_extension(scope_info);
5625 context->set_global_object(previous->global_object());
5630 MaybeObject* Heap::AllocateScopeInfo(int length) {
5631 FixedArray* scope_info;
5632 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5633 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5634 scope_info->set_map_no_write_barrier(scope_info_map());
5639 MaybeObject* Heap::AllocateExternal(void* value) {
5641 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5642 if (!maybe_result->To(&foreign)) return maybe_result;
5645 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5646 if (!maybe_result->To(&external)) return maybe_result;
5648 external->SetInternalField(0, foreign);
5653 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5656 #define MAKE_CASE(NAME, Name, name) \
5657 case NAME##_TYPE: map = name##_map(); break;
5658 STRUCT_LIST(MAKE_CASE)
5662 return Failure::InternalError();
5664 int size = map->instance_size();
5665 AllocationSpace space =
5666 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5668 { MaybeObject* maybe_result = Allocate(map, space);
5669 if (!maybe_result->ToObject(&result)) return maybe_result;
5671 Struct::cast(result)->InitializeBody(size);
5676 bool Heap::IsHeapIterable() {
5677 return (!old_pointer_space()->was_swept_conservatively() &&
5678 !old_data_space()->was_swept_conservatively());
5682 void Heap::EnsureHeapIsIterable() {
5683 ASSERT(IsAllocationAllowed());
5684 if (!IsHeapIterable()) {
5685 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5687 ASSERT(IsHeapIterable());
5691 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5692 incremental_marking()->Step(step_size,
5693 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5695 if (incremental_marking()->IsComplete()) {
5696 bool uncommit = false;
5697 if (gc_count_at_last_idle_gc_ == gc_count_) {
5698 // No GC since the last full GC, the mutator is probably not active.
5699 isolate_->compilation_cache()->Clear();
5702 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5703 gc_count_at_last_idle_gc_ = gc_count_;
5705 new_space_.Shrink();
5706 UncommitFromSpace();
5712 bool Heap::IdleNotification(int hint) {
5713 // Hints greater than this value indicate that
5714 // the embedder is requesting a lot of GC work.
5715 const int kMaxHint = 1000;
5716 // Minimal hint that allows to do full GC.
5717 const int kMinHintForFullGC = 100;
5718 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5719 // The size factor is in range [5..250]. The numbers here are chosen from
5720 // experiments. If you changes them, make sure to test with
5721 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5722 intptr_t step_size =
5723 size_factor * IncrementalMarking::kAllocatedThreshold;
5725 if (contexts_disposed_ > 0) {
5726 if (hint >= kMaxHint) {
5727 // The embedder is requesting a lot of GC work after context disposal,
5728 // we age inline caches so that they don't keep objects from
5729 // the old context alive.
5732 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5733 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5734 incremental_marking()->IsStopped()) {
5735 HistogramTimerScope scope(isolate_->counters()->gc_context());
5736 CollectAllGarbage(kReduceMemoryFootprintMask,
5737 "idle notification: contexts disposed");
5739 AdvanceIdleIncrementalMarking(step_size);
5740 contexts_disposed_ = 0;
5742 // After context disposal there is likely a lot of garbage remaining, reset
5743 // the idle notification counters in order to trigger more incremental GCs
5744 // on subsequent idle notifications.
5749 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5750 return IdleGlobalGC();
5753 // By doing small chunks of GC work in each IdleNotification,
5754 // perform a round of incremental GCs and after that wait until
5755 // the mutator creates enough garbage to justify a new round.
5756 // An incremental GC progresses as follows:
5757 // 1. many incremental marking steps,
5758 // 2. one old space mark-sweep-compact,
5759 // 3. many lazy sweep steps.
5760 // Use mark-sweep-compact events to count incremental GCs in a round.
5762 if (incremental_marking()->IsStopped()) {
5763 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5764 !IsSweepingComplete() &&
5765 !AdvanceSweepers(static_cast<int>(step_size))) {
5770 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5771 if (EnoughGarbageSinceLastIdleRound()) {
5778 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5779 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5780 ms_count_at_last_idle_notification_ = ms_count_;
5782 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5783 mark_sweeps_since_idle_round_started_;
5785 if (remaining_mark_sweeps <= 0) {
5790 if (incremental_marking()->IsStopped()) {
5791 // If there are no more than two GCs left in this idle round and we are
5792 // allowed to do a full GC, then make those GCs full in order to compact
5794 // TODO(ulan): Once we enable code compaction for incremental marking,
5795 // we can get rid of this special case and always start incremental marking.
5796 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5797 CollectAllGarbage(kReduceMemoryFootprintMask,
5798 "idle notification: finalize idle round");
5800 incremental_marking()->Start();
5803 if (!incremental_marking()->IsStopped()) {
5804 AdvanceIdleIncrementalMarking(step_size);
5810 bool Heap::IdleGlobalGC() {
5811 static const int kIdlesBeforeScavenge = 4;
5812 static const int kIdlesBeforeMarkSweep = 7;
5813 static const int kIdlesBeforeMarkCompact = 8;
5814 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5815 static const unsigned int kGCsBetweenCleanup = 4;
5817 if (!last_idle_notification_gc_count_init_) {
5818 last_idle_notification_gc_count_ = gc_count_;
5819 last_idle_notification_gc_count_init_ = true;
5822 bool uncommit = true;
5823 bool finished = false;
5825 // Reset the number of idle notifications received when a number of
5826 // GCs have taken place. This allows another round of cleanup based
5827 // on idle notifications if enough work has been carried out to
5828 // provoke a number of garbage collections.
5829 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5830 number_idle_notifications_ =
5831 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5833 number_idle_notifications_ = 0;
5834 last_idle_notification_gc_count_ = gc_count_;
5837 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5838 CollectGarbage(NEW_SPACE, "idle notification");
5839 new_space_.Shrink();
5840 last_idle_notification_gc_count_ = gc_count_;
5841 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5842 // Before doing the mark-sweep collections we clear the
5843 // compilation cache to avoid hanging on to source code and
5844 // generated code for cached functions.
5845 isolate_->compilation_cache()->Clear();
5847 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5848 new_space_.Shrink();
5849 last_idle_notification_gc_count_ = gc_count_;
5851 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5852 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5853 new_space_.Shrink();
5854 last_idle_notification_gc_count_ = gc_count_;
5855 number_idle_notifications_ = 0;
5857 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5858 // If we have received more than kIdlesBeforeMarkCompact idle
5859 // notifications we do not perform any cleanup because we don't
5860 // expect to gain much by doing so.
5864 if (uncommit) UncommitFromSpace();
5872 void Heap::Print() {
5873 if (!HasBeenSetUp()) return;
5874 isolate()->PrintStack();
5875 AllSpaces spaces(this);
5876 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5882 void Heap::ReportCodeStatistics(const char* title) {
5883 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5884 PagedSpace::ResetCodeStatistics();
5885 // We do not look for code in new space, map space, or old space. If code
5886 // somehow ends up in those spaces, we would miss it here.
5887 code_space_->CollectCodeStatistics();
5888 lo_space_->CollectCodeStatistics();
5889 PagedSpace::ReportCodeStatistics();
5893 // This function expects that NewSpace's allocated objects histogram is
5894 // populated (via a call to CollectStatistics or else as a side effect of a
5895 // just-completed scavenge collection).
5896 void Heap::ReportHeapStatistics(const char* title) {
5898 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5900 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5901 old_gen_promotion_limit_);
5902 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5903 old_gen_allocation_limit_);
5904 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5907 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5908 isolate_->global_handles()->PrintStats();
5911 PrintF("Heap statistics : ");
5912 isolate_->memory_allocator()->ReportStatistics();
5913 PrintF("To space : ");
5914 new_space_.ReportStatistics();
5915 PrintF("Old pointer space : ");
5916 old_pointer_space_->ReportStatistics();
5917 PrintF("Old data space : ");
5918 old_data_space_->ReportStatistics();
5919 PrintF("Code space : ");
5920 code_space_->ReportStatistics();
5921 PrintF("Map space : ");
5922 map_space_->ReportStatistics();
5923 PrintF("Cell space : ");
5924 cell_space_->ReportStatistics();
5925 PrintF("Large object space : ");
5926 lo_space_->ReportStatistics();
5927 PrintF(">>>>>> ========================================= >>>>>>\n");
5932 bool Heap::Contains(HeapObject* value) {
5933 return Contains(value->address());
5937 bool Heap::Contains(Address addr) {
5938 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5939 return HasBeenSetUp() &&
5940 (new_space_.ToSpaceContains(addr) ||
5941 old_pointer_space_->Contains(addr) ||
5942 old_data_space_->Contains(addr) ||
5943 code_space_->Contains(addr) ||
5944 map_space_->Contains(addr) ||
5945 cell_space_->Contains(addr) ||
5946 lo_space_->SlowContains(addr));
5950 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5951 return InSpace(value->address(), space);
5955 bool Heap::InSpace(Address addr, AllocationSpace space) {
5956 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5957 if (!HasBeenSetUp()) return false;
5961 return new_space_.ToSpaceContains(addr);
5962 case OLD_POINTER_SPACE:
5963 return old_pointer_space_->Contains(addr);
5964 case OLD_DATA_SPACE:
5965 return old_data_space_->Contains(addr);
5967 return code_space_->Contains(addr);
5969 return map_space_->Contains(addr);
5971 return cell_space_->Contains(addr);
5973 return lo_space_->SlowContains(addr);
5981 void Heap::Verify() {
5982 CHECK(HasBeenSetUp());
5984 store_buffer()->Verify();
5986 VerifyPointersVisitor visitor;
5987 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5989 new_space_.Verify();
5991 old_pointer_space_->Verify(&visitor);
5992 map_space_->Verify(&visitor);
5994 VerifyPointersVisitor no_dirty_regions_visitor;
5995 old_data_space_->Verify(&no_dirty_regions_visitor);
5996 code_space_->Verify(&no_dirty_regions_visitor);
5997 cell_space_->Verify(&no_dirty_regions_visitor);
5999 lo_space_->Verify();
6004 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6005 Object* result = NULL;
6007 { MaybeObject* maybe_new_table =
6008 string_table()->LookupUtf8String(string, &result);
6009 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6011 // Can't use set_string_table because StringTable::cast knows that
6012 // StringTable is a singleton and checks for identity.
6013 roots_[kStringTableRootIndex] = new_table;
6014 ASSERT(result != NULL);
6019 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6020 Object* result = NULL;
6022 { MaybeObject* maybe_new_table =
6023 string_table()->LookupOneByteString(string, &result);
6024 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6026 // Can't use set_string_table because StringTable::cast knows that
6027 // StringTable is a singleton and checks for identity.
6028 roots_[kStringTableRootIndex] = new_table;
6029 ASSERT(result != NULL);
6034 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6037 Object* result = NULL;
6039 { MaybeObject* maybe_new_table =
6040 string_table()->LookupSubStringOneByteString(string,
6044 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6046 // Can't use set_string_table because StringTable::cast knows that
6047 // StringTable is a singleton and checks for identity.
6048 roots_[kStringTableRootIndex] = new_table;
6049 ASSERT(result != NULL);
6054 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6055 Object* result = NULL;
6057 { MaybeObject* maybe_new_table =
6058 string_table()->LookupTwoByteString(string, &result);
6059 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6061 // Can't use set_string_table because StringTable::cast knows that
6062 // StringTable is a singleton and checks for identity.
6063 roots_[kStringTableRootIndex] = new_table;
6064 ASSERT(result != NULL);
6069 MaybeObject* Heap::InternalizeString(String* string) {
6070 if (string->IsInternalizedString()) return string;
6071 Object* result = NULL;
6073 { MaybeObject* maybe_new_table =
6074 string_table()->LookupString(string, &result);
6075 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6077 // Can't use set_string_table because StringTable::cast knows that
6078 // StringTable is a singleton and checks for identity.
6079 roots_[kStringTableRootIndex] = new_table;
6080 ASSERT(result != NULL);
6085 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6086 if (string->IsInternalizedString()) {
6090 return string_table()->LookupStringIfExists(string, result);
6094 void Heap::ZapFromSpace() {
6095 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6096 new_space_.FromSpaceEnd());
6097 while (it.has_next()) {
6098 NewSpacePage* page = it.next();
6099 for (Address cursor = page->area_start(), limit = page->area_end();
6101 cursor += kPointerSize) {
6102 Memory::Address_at(cursor) = kFromSpaceZapValue;
6108 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6110 ObjectSlotCallback callback) {
6111 Address slot_address = start;
6113 // We are not collecting slots on new space objects during mutation
6114 // thus we have to scan for pointers to evacuation candidates when we
6115 // promote objects. But we should not record any slots in non-black
6116 // objects. Grey object's slots would be rescanned.
6117 // White object might not survive until the end of collection
6118 // it would be a violation of the invariant to record it's slots.
6119 bool record_slots = false;
6120 if (incremental_marking()->IsCompacting()) {
6121 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6122 record_slots = Marking::IsBlack(mark_bit);
6125 while (slot_address < end) {
6126 Object** slot = reinterpret_cast<Object**>(slot_address);
6127 Object* object = *slot;
6128 // If the store buffer becomes overfull we mark pages as being exempt from
6129 // the store buffer. These pages are scanned to find pointers that point
6130 // to the new space. In that case we may hit newly promoted objects and
6131 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6132 if (object->IsHeapObject()) {
6133 if (Heap::InFromSpace(object)) {
6134 callback(reinterpret_cast<HeapObject**>(slot),
6135 HeapObject::cast(object));
6136 Object* new_object = *slot;
6137 if (InNewSpace(new_object)) {
6138 SLOW_ASSERT(Heap::InToSpace(new_object));
6139 SLOW_ASSERT(new_object->IsHeapObject());
6140 store_buffer_.EnterDirectlyIntoStoreBuffer(
6141 reinterpret_cast<Address>(slot));
6143 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6144 } else if (record_slots &&
6145 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6146 mark_compact_collector()->RecordSlot(slot, slot, object);
6149 slot_address += kPointerSize;
6155 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6158 bool IsAMapPointerAddress(Object** addr) {
6159 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6160 int mod = a % Map::kSize;
6161 return mod >= Map::kPointerFieldsBeginOffset &&
6162 mod < Map::kPointerFieldsEndOffset;
6166 bool EverythingsAPointer(Object** addr) {
6171 static void CheckStoreBuffer(Heap* heap,
6174 Object**** store_buffer_position,
6175 Object*** store_buffer_top,
6176 CheckStoreBufferFilter filter,
6177 Address special_garbage_start,
6178 Address special_garbage_end) {
6179 Map* free_space_map = heap->free_space_map();
6180 for ( ; current < limit; current++) {
6181 Object* o = *current;
6182 Address current_address = reinterpret_cast<Address>(current);
6184 if (o == free_space_map) {
6185 Address current_address = reinterpret_cast<Address>(current);
6186 FreeSpace* free_space =
6187 FreeSpace::cast(HeapObject::FromAddress(current_address));
6188 int skip = free_space->Size();
6189 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6191 current_address += skip - kPointerSize;
6192 current = reinterpret_cast<Object**>(current_address);
6195 // Skip the current linear allocation space between top and limit which is
6196 // unmarked with the free space map, but can contain junk.
6197 if (current_address == special_garbage_start &&
6198 special_garbage_end != special_garbage_start) {
6199 current_address = special_garbage_end - kPointerSize;
6200 current = reinterpret_cast<Object**>(current_address);
6203 if (!(*filter)(current)) continue;
6204 ASSERT(current_address < special_garbage_start ||
6205 current_address >= special_garbage_end);
6206 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6207 // We have to check that the pointer does not point into new space
6208 // without trying to cast it to a heap object since the hash field of
6209 // a string can contain values like 1 and 3 which are tagged null
6211 if (!heap->InNewSpace(o)) continue;
6212 while (**store_buffer_position < current &&
6213 *store_buffer_position < store_buffer_top) {
6214 (*store_buffer_position)++;
6216 if (**store_buffer_position != current ||
6217 *store_buffer_position == store_buffer_top) {
6218 Object** obj_start = current;
6219 while (!(*obj_start)->IsMap()) obj_start--;
6226 // Check that the store buffer contains all intergenerational pointers by
6227 // scanning a page and ensuring that all pointers to young space are in the
6229 void Heap::OldPointerSpaceCheckStoreBuffer() {
6230 OldSpace* space = old_pointer_space();
6231 PageIterator pages(space);
6233 store_buffer()->SortUniq();
6235 while (pages.has_next()) {
6236 Page* page = pages.next();
6237 Object** current = reinterpret_cast<Object**>(page->area_start());
6239 Address end = page->area_end();
6241 Object*** store_buffer_position = store_buffer()->Start();
6242 Object*** store_buffer_top = store_buffer()->Top();
6244 Object** limit = reinterpret_cast<Object**>(end);
6245 CheckStoreBuffer(this,
6248 &store_buffer_position,
6250 &EverythingsAPointer,
6257 void Heap::MapSpaceCheckStoreBuffer() {
6258 MapSpace* space = map_space();
6259 PageIterator pages(space);
6261 store_buffer()->SortUniq();
6263 while (pages.has_next()) {
6264 Page* page = pages.next();
6265 Object** current = reinterpret_cast<Object**>(page->area_start());
6267 Address end = page->area_end();
6269 Object*** store_buffer_position = store_buffer()->Start();
6270 Object*** store_buffer_top = store_buffer()->Top();
6272 Object** limit = reinterpret_cast<Object**>(end);
6273 CheckStoreBuffer(this,
6276 &store_buffer_position,
6278 &IsAMapPointerAddress,
6285 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6286 LargeObjectIterator it(lo_space());
6287 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6288 // We only have code, sequential strings, or fixed arrays in large
6289 // object space, and only fixed arrays can possibly contain pointers to
6290 // the young generation.
6291 if (object->IsFixedArray()) {
6292 Object*** store_buffer_position = store_buffer()->Start();
6293 Object*** store_buffer_top = store_buffer()->Top();
6294 Object** current = reinterpret_cast<Object**>(object->address());
6296 reinterpret_cast<Object**>(object->address() + object->Size());
6297 CheckStoreBuffer(this,
6300 &store_buffer_position,
6302 &EverythingsAPointer,
6311 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6312 IterateStrongRoots(v, mode);
6313 IterateWeakRoots(v, mode);
6317 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6318 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6319 v->Synchronize(VisitorSynchronization::kStringTable);
6320 if (mode != VISIT_ALL_IN_SCAVENGE &&
6321 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6322 // Scavenge collections have special processing for this.
6323 external_string_table_.Iterate(v);
6324 error_object_list_.Iterate(v);
6326 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6330 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6331 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6332 v->Synchronize(VisitorSynchronization::kStrongRootList);
6334 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6335 v->Synchronize(VisitorSynchronization::kInternalizedString);
6337 isolate_->bootstrapper()->Iterate(v);
6338 v->Synchronize(VisitorSynchronization::kBootstrapper);
6339 isolate_->Iterate(v);
6340 v->Synchronize(VisitorSynchronization::kTop);
6341 Relocatable::Iterate(v);
6342 v->Synchronize(VisitorSynchronization::kRelocatable);
6344 #ifdef ENABLE_DEBUGGER_SUPPORT
6345 isolate_->debug()->Iterate(v);
6346 if (isolate_->deoptimizer_data() != NULL) {
6347 isolate_->deoptimizer_data()->Iterate(v);
6350 v->Synchronize(VisitorSynchronization::kDebug);
6351 isolate_->compilation_cache()->Iterate(v);
6352 v->Synchronize(VisitorSynchronization::kCompilationCache);
6354 // Iterate over local handles in handle scopes.
6355 isolate_->handle_scope_implementer()->Iterate(v);
6356 isolate_->IterateDeferredHandles(v);
6357 v->Synchronize(VisitorSynchronization::kHandleScope);
6359 // Iterate over the builtin code objects and code stubs in the
6360 // heap. Note that it is not necessary to iterate over code objects
6361 // on scavenge collections.
6362 if (mode != VISIT_ALL_IN_SCAVENGE) {
6363 isolate_->builtins()->IterateBuiltins(v);
6365 v->Synchronize(VisitorSynchronization::kBuiltins);
6367 // Iterate over global handles.
6369 case VISIT_ONLY_STRONG:
6370 isolate_->global_handles()->IterateStrongRoots(v);
6372 case VISIT_ALL_IN_SCAVENGE:
6373 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6375 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6377 isolate_->global_handles()->IterateAllRoots(v);
6380 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6382 // Iterate over pointers being held by inactive threads.
6383 isolate_->thread_manager()->Iterate(v);
6384 v->Synchronize(VisitorSynchronization::kThreadManager);
6386 // Iterate over the pointers the Serialization/Deserialization code is
6388 // During garbage collection this keeps the partial snapshot cache alive.
6389 // During deserialization of the startup snapshot this creates the partial
6390 // snapshot cache and deserializes the objects it refers to. During
6391 // serialization this does nothing, since the partial snapshot cache is
6392 // empty. However the next thing we do is create the partial snapshot,
6393 // filling up the partial snapshot cache with objects it needs as we go.
6394 SerializerDeserializer::Iterate(v);
6395 // We don't do a v->Synchronize call here, because in debug mode that will
6396 // output a flag to the snapshot. However at this point the serializer and
6397 // deserializer are deliberately a little unsynchronized (see above) so the
6398 // checking of the sync flag in the snapshot would fail.
6402 // TODO(1236194): Since the heap size is configurable on the command line
6403 // and through the API, we should gracefully handle the case that the heap
6404 // size is not big enough to fit all the initial objects.
6405 bool Heap::ConfigureHeap(int max_semispace_size,
6406 intptr_t max_old_gen_size,
6407 intptr_t max_executable_size) {
6408 if (HasBeenSetUp()) return false;
6410 if (FLAG_stress_compaction) {
6411 // This will cause more frequent GCs when stressing.
6412 max_semispace_size_ = Page::kPageSize;
6415 if (max_semispace_size > 0) {
6416 if (max_semispace_size < Page::kPageSize) {
6417 max_semispace_size = Page::kPageSize;
6418 if (FLAG_trace_gc) {
6419 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6420 Page::kPageSize >> 10);
6423 max_semispace_size_ = max_semispace_size;
6426 if (Snapshot::IsEnabled()) {
6427 // If we are using a snapshot we always reserve the default amount
6428 // of memory for each semispace because code in the snapshot has
6429 // write-barrier code that relies on the size and alignment of new
6430 // space. We therefore cannot use a larger max semispace size
6431 // than the default reserved semispace size.
6432 if (max_semispace_size_ > reserved_semispace_size_) {
6433 max_semispace_size_ = reserved_semispace_size_;
6434 if (FLAG_trace_gc) {
6435 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6436 reserved_semispace_size_ >> 10);
6440 // If we are not using snapshots we reserve space for the actual
6441 // max semispace size.
6442 reserved_semispace_size_ = max_semispace_size_;
6445 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6446 if (max_executable_size > 0) {
6447 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6450 // The max executable size must be less than or equal to the max old
6452 if (max_executable_size_ > max_old_generation_size_) {
6453 max_executable_size_ = max_old_generation_size_;
6456 // The new space size must be a power of two to support single-bit testing
6458 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6459 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6460 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6461 external_allocation_limit_ = 16 * max_semispace_size_;
6463 // The old generation is paged and needs at least one page for each space.
6464 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6465 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6467 RoundUp(max_old_generation_size_,
6475 bool Heap::ConfigureHeapDefault() {
6476 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6477 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6478 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6482 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6483 *stats->start_marker = HeapStats::kStartMarker;
6484 *stats->end_marker = HeapStats::kEndMarker;
6485 *stats->new_space_size = new_space_.SizeAsInt();
6486 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6487 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6488 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6489 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6490 *stats->old_data_space_capacity = old_data_space_->Capacity();
6491 *stats->code_space_size = code_space_->SizeOfObjects();
6492 *stats->code_space_capacity = code_space_->Capacity();
6493 *stats->map_space_size = map_space_->SizeOfObjects();
6494 *stats->map_space_capacity = map_space_->Capacity();
6495 *stats->cell_space_size = cell_space_->SizeOfObjects();
6496 *stats->cell_space_capacity = cell_space_->Capacity();
6497 *stats->lo_space_size = lo_space_->Size();
6498 isolate_->global_handles()->RecordStats(stats);
6499 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6500 *stats->memory_allocator_capacity =
6501 isolate()->memory_allocator()->Size() +
6502 isolate()->memory_allocator()->Available();
6503 *stats->os_error = OS::GetLastError();
6504 isolate()->memory_allocator()->Available();
6505 if (take_snapshot) {
6506 HeapIterator iterator(this);
6507 for (HeapObject* obj = iterator.next();
6509 obj = iterator.next()) {
6510 InstanceType type = obj->map()->instance_type();
6511 ASSERT(0 <= type && type <= LAST_TYPE);
6512 stats->objects_per_type[type]++;
6513 stats->size_per_type[type] += obj->Size();
6519 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6520 return old_pointer_space_->SizeOfObjects()
6521 + old_data_space_->SizeOfObjects()
6522 + code_space_->SizeOfObjects()
6523 + map_space_->SizeOfObjects()
6524 + cell_space_->SizeOfObjects()
6525 + lo_space_->SizeOfObjects();
6529 intptr_t Heap::PromotedExternalMemorySize() {
6530 if (amount_of_external_allocated_memory_
6531 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6532 return amount_of_external_allocated_memory_
6533 - amount_of_external_allocated_memory_at_last_global_gc_;
6537 V8_DECLARE_ONCE(initialize_gc_once);
6539 static void InitializeGCOnce() {
6540 InitializeScavengingVisitorsTables();
6541 NewSpaceScavenger::Initialize();
6542 MarkCompactCollector::Initialize();
6545 bool Heap::SetUp() {
6547 allocation_timeout_ = FLAG_gc_interval;
6550 // Initialize heap spaces and initial maps and objects. Whenever something
6551 // goes wrong, just return false. The caller should check the results and
6552 // call Heap::TearDown() to release allocated memory.
6554 // If the heap is not yet configured (e.g. through the API), configure it.
6555 // Configuration is based on the flags new-space-size (really the semispace
6556 // size) and old-space-size if set or the initial values of semispace_size_
6557 // and old_generation_size_ otherwise.
6559 if (!ConfigureHeapDefault()) return false;
6562 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6564 MarkMapPointersAsEncoded(false);
6566 // Set up memory allocator.
6567 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6570 // Set up new space.
6571 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6575 // Initialize old pointer space.
6576 old_pointer_space_ =
6578 max_old_generation_size_,
6581 if (old_pointer_space_ == NULL) return false;
6582 if (!old_pointer_space_->SetUp()) return false;
6584 // Initialize old data space.
6587 max_old_generation_size_,
6590 if (old_data_space_ == NULL) return false;
6591 if (!old_data_space_->SetUp()) return false;
6593 // Initialize the code space, set its maximum capacity to the old
6594 // generation size. It needs executable memory.
6595 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6596 // virtual address space, so that they can call each other with near calls.
6597 if (code_range_size_ > 0) {
6598 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6604 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6605 if (code_space_ == NULL) return false;
6606 if (!code_space_->SetUp()) return false;
6608 // Initialize map space.
6609 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6610 if (map_space_ == NULL) return false;
6611 if (!map_space_->SetUp()) return false;
6613 // Initialize global property cell space.
6614 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6615 if (cell_space_ == NULL) return false;
6616 if (!cell_space_->SetUp()) return false;
6618 // The large object code space may contain code or data. We set the memory
6619 // to be non-executable here for safety, but this means we need to enable it
6620 // explicitly when allocating large code objects.
6621 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6622 if (lo_space_ == NULL) return false;
6623 if (!lo_space_->SetUp()) return false;
6625 // Set up the seed that is used to randomize the string hash function.
6626 ASSERT(hash_seed() == 0);
6627 if (FLAG_randomize_hashes) {
6628 if (FLAG_hash_seed == 0) {
6630 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6632 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6636 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6637 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6639 store_buffer()->SetUp();
6641 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6643 relocation_mutex_locked_by_optimizer_thread_ = false;
6649 bool Heap::CreateHeapObjects() {
6650 // Create initial maps.
6651 if (!CreateInitialMaps()) return false;
6652 if (!CreateApiObjects()) return false;
6654 // Create initial objects
6655 if (!CreateInitialObjects()) return false;
6657 native_contexts_list_ = undefined_value();
6662 void Heap::SetStackLimits() {
6663 ASSERT(isolate_ != NULL);
6664 ASSERT(isolate_ == isolate());
6665 // On 64 bit machines, pointers are generally out of range of Smis. We write
6666 // something that looks like an out of range Smi to the GC.
6668 // Set up the special root array entries containing the stack limits.
6669 // These are actually addresses, but the tag makes the GC ignore it.
6670 roots_[kStackLimitRootIndex] =
6671 reinterpret_cast<Object*>(
6672 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6673 roots_[kRealStackLimitRootIndex] =
6674 reinterpret_cast<Object*>(
6675 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6679 void Heap::TearDown() {
6681 if (FLAG_verify_heap) {
6686 if (FLAG_print_cumulative_gc_stat) {
6688 PrintF("gc_count=%d ", gc_count_);
6689 PrintF("mark_sweep_count=%d ", ms_count_);
6690 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6691 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6692 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6693 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6694 get_max_alive_after_gc());
6695 PrintF("total_marking_time=%.1f ", marking_time());
6696 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6700 isolate_->global_handles()->TearDown();
6702 external_string_table_.TearDown();
6704 error_object_list_.TearDown();
6706 new_space_.TearDown();
6708 if (old_pointer_space_ != NULL) {
6709 old_pointer_space_->TearDown();
6710 delete old_pointer_space_;
6711 old_pointer_space_ = NULL;
6714 if (old_data_space_ != NULL) {
6715 old_data_space_->TearDown();
6716 delete old_data_space_;
6717 old_data_space_ = NULL;
6720 if (code_space_ != NULL) {
6721 code_space_->TearDown();
6726 if (map_space_ != NULL) {
6727 map_space_->TearDown();
6732 if (cell_space_ != NULL) {
6733 cell_space_->TearDown();
6738 if (lo_space_ != NULL) {
6739 lo_space_->TearDown();
6744 store_buffer()->TearDown();
6745 incremental_marking()->TearDown();
6747 isolate_->memory_allocator()->TearDown();
6749 delete relocation_mutex_;
6753 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6754 ASSERT(callback != NULL);
6755 GCPrologueCallbackPair pair(callback, gc_type);
6756 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6757 return gc_prologue_callbacks_.Add(pair);
6761 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6762 ASSERT(callback != NULL);
6763 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6764 if (gc_prologue_callbacks_[i].callback == callback) {
6765 gc_prologue_callbacks_.Remove(i);
6773 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6774 ASSERT(callback != NULL);
6775 GCEpilogueCallbackPair pair(callback, gc_type);
6776 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6777 return gc_epilogue_callbacks_.Add(pair);
6781 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6782 ASSERT(callback != NULL);
6783 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6784 if (gc_epilogue_callbacks_[i].callback == callback) {
6785 gc_epilogue_callbacks_.Remove(i);
6795 class PrintHandleVisitor: public ObjectVisitor {
6797 void VisitPointers(Object** start, Object** end) {
6798 for (Object** p = start; p < end; p++)
6799 PrintF(" handle %p to %p\n",
6800 reinterpret_cast<void*>(p),
6801 reinterpret_cast<void*>(*p));
6805 void Heap::PrintHandles() {
6806 PrintF("Handles:\n");
6807 PrintHandleVisitor v;
6808 isolate_->handle_scope_implementer()->Iterate(&v);
6814 Space* AllSpaces::next() {
6815 switch (counter_++) {
6817 return heap_->new_space();
6818 case OLD_POINTER_SPACE:
6819 return heap_->old_pointer_space();
6820 case OLD_DATA_SPACE:
6821 return heap_->old_data_space();
6823 return heap_->code_space();
6825 return heap_->map_space();
6827 return heap_->cell_space();
6829 return heap_->lo_space();
6836 PagedSpace* PagedSpaces::next() {
6837 switch (counter_++) {
6838 case OLD_POINTER_SPACE:
6839 return heap_->old_pointer_space();
6840 case OLD_DATA_SPACE:
6841 return heap_->old_data_space();
6843 return heap_->code_space();
6845 return heap_->map_space();
6847 return heap_->cell_space();
6855 OldSpace* OldSpaces::next() {
6856 switch (counter_++) {
6857 case OLD_POINTER_SPACE:
6858 return heap_->old_pointer_space();
6859 case OLD_DATA_SPACE:
6860 return heap_->old_data_space();
6862 return heap_->code_space();
6869 SpaceIterator::SpaceIterator(Heap* heap)
6871 current_space_(FIRST_SPACE),
6877 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6879 current_space_(FIRST_SPACE),
6881 size_func_(size_func) {
6885 SpaceIterator::~SpaceIterator() {
6886 // Delete active iterator if any.
6891 bool SpaceIterator::has_next() {
6892 // Iterate until no more spaces.
6893 return current_space_ != LAST_SPACE;
6897 ObjectIterator* SpaceIterator::next() {
6898 if (iterator_ != NULL) {
6901 // Move to the next space
6903 if (current_space_ > LAST_SPACE) {
6908 // Return iterator for the new current space.
6909 return CreateIterator();
6913 // Create an iterator for the space to iterate.
6914 ObjectIterator* SpaceIterator::CreateIterator() {
6915 ASSERT(iterator_ == NULL);
6917 switch (current_space_) {
6919 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6921 case OLD_POINTER_SPACE:
6923 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6925 case OLD_DATA_SPACE:
6926 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6929 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6932 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6935 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6938 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6942 // Return the newly allocated iterator;
6943 ASSERT(iterator_ != NULL);
6948 class HeapObjectsFilter {
6950 virtual ~HeapObjectsFilter() {}
6951 virtual bool SkipObject(HeapObject* object) = 0;
6955 class UnreachableObjectsFilter : public HeapObjectsFilter {
6957 UnreachableObjectsFilter() {
6958 MarkReachableObjects();
6961 ~UnreachableObjectsFilter() {
6962 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6965 bool SkipObject(HeapObject* object) {
6966 MarkBit mark_bit = Marking::MarkBitFrom(object);
6967 return !mark_bit.Get();
6971 class MarkingVisitor : public ObjectVisitor {
6973 MarkingVisitor() : marking_stack_(10) {}
6975 void VisitPointers(Object** start, Object** end) {
6976 for (Object** p = start; p < end; p++) {
6977 if (!(*p)->IsHeapObject()) continue;
6978 HeapObject* obj = HeapObject::cast(*p);
6979 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6980 if (!mark_bit.Get()) {
6982 marking_stack_.Add(obj);
6987 void TransitiveClosure() {
6988 while (!marking_stack_.is_empty()) {
6989 HeapObject* obj = marking_stack_.RemoveLast();
6995 List<HeapObject*> marking_stack_;
6998 void MarkReachableObjects() {
6999 Heap* heap = Isolate::Current()->heap();
7000 MarkingVisitor visitor;
7001 heap->IterateRoots(&visitor, VISIT_ALL);
7002 visitor.TransitiveClosure();
7005 AssertNoAllocation no_alloc;
7009 HeapIterator::HeapIterator(Heap* heap)
7011 filtering_(HeapIterator::kNoFiltering),
7017 HeapIterator::HeapIterator(Heap* heap,
7018 HeapIterator::HeapObjectsFiltering filtering)
7020 filtering_(filtering),
7026 HeapIterator::~HeapIterator() {
7031 void HeapIterator::Init() {
7032 // Start the iteration.
7033 space_iterator_ = new SpaceIterator(heap_);
7034 switch (filtering_) {
7035 case kFilterUnreachable:
7036 filter_ = new UnreachableObjectsFilter;
7041 object_iterator_ = space_iterator_->next();
7045 void HeapIterator::Shutdown() {
7047 // Assert that in filtering mode we have iterated through all
7048 // objects. Otherwise, heap will be left in an inconsistent state.
7049 if (filtering_ != kNoFiltering) {
7050 ASSERT(object_iterator_ == NULL);
7053 // Make sure the last iterator is deallocated.
7054 delete space_iterator_;
7055 space_iterator_ = NULL;
7056 object_iterator_ = NULL;
7062 HeapObject* HeapIterator::next() {
7063 if (filter_ == NULL) return NextObject();
7065 HeapObject* obj = NextObject();
7066 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7071 HeapObject* HeapIterator::NextObject() {
7072 // No iterator means we are done.
7073 if (object_iterator_ == NULL) return NULL;
7075 if (HeapObject* obj = object_iterator_->next_object()) {
7076 // If the current iterator has more objects we are fine.
7079 // Go though the spaces looking for one that has objects.
7080 while (space_iterator_->has_next()) {
7081 object_iterator_ = space_iterator_->next();
7082 if (HeapObject* obj = object_iterator_->next_object()) {
7087 // Done with the last space.
7088 object_iterator_ = NULL;
7093 void HeapIterator::reset() {
7094 // Restart the iterator.
7102 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7104 class PathTracer::MarkVisitor: public ObjectVisitor {
7106 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7107 void VisitPointers(Object** start, Object** end) {
7108 // Scan all HeapObject pointers in [start, end)
7109 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7110 if ((*p)->IsHeapObject())
7111 tracer_->MarkRecursively(p, this);
7116 PathTracer* tracer_;
7120 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7122 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7123 void VisitPointers(Object** start, Object** end) {
7124 // Scan all HeapObject pointers in [start, end)
7125 for (Object** p = start; p < end; p++) {
7126 if ((*p)->IsHeapObject())
7127 tracer_->UnmarkRecursively(p, this);
7132 PathTracer* tracer_;
7136 void PathTracer::VisitPointers(Object** start, Object** end) {
7137 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7138 // Visit all HeapObject pointers in [start, end)
7139 for (Object** p = start; !done && (p < end); p++) {
7140 if ((*p)->IsHeapObject()) {
7142 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7148 void PathTracer::Reset() {
7149 found_target_ = false;
7150 object_stack_.Clear();
7154 void PathTracer::TracePathFrom(Object** root) {
7155 ASSERT((search_target_ == kAnyGlobalObject) ||
7156 search_target_->IsHeapObject());
7157 found_target_in_trace_ = false;
7160 MarkVisitor mark_visitor(this);
7161 MarkRecursively(root, &mark_visitor);
7163 UnmarkVisitor unmark_visitor(this);
7164 UnmarkRecursively(root, &unmark_visitor);
7170 static bool SafeIsNativeContext(HeapObject* obj) {
7171 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7175 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7176 if (!(*p)->IsHeapObject()) return;
7178 HeapObject* obj = HeapObject::cast(*p);
7180 Object* map = obj->map();
7182 if (!map->IsHeapObject()) return; // visited before
7184 if (found_target_in_trace_) return; // stop if target found
7185 object_stack_.Add(obj);
7186 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7187 (obj == search_target_)) {
7188 found_target_in_trace_ = true;
7189 found_target_ = true;
7193 bool is_native_context = SafeIsNativeContext(obj);
7196 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7198 Address map_addr = map_p->address();
7200 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7202 // Scan the object body.
7203 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7204 // This is specialized to scan Context's properly.
7205 Object** start = reinterpret_cast<Object**>(obj->address() +
7206 Context::kHeaderSize);
7207 Object** end = reinterpret_cast<Object**>(obj->address() +
7208 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7209 mark_visitor->VisitPointers(start, end);
7211 obj->IterateBody(map_p->instance_type(),
7212 obj->SizeFromMap(map_p),
7216 // Scan the map after the body because the body is a lot more interesting
7217 // when doing leak detection.
7218 MarkRecursively(&map, mark_visitor);
7220 if (!found_target_in_trace_) // don't pop if found the target
7221 object_stack_.RemoveLast();
7225 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7226 if (!(*p)->IsHeapObject()) return;
7228 HeapObject* obj = HeapObject::cast(*p);
7230 Object* map = obj->map();
7232 if (map->IsHeapObject()) return; // unmarked already
7234 Address map_addr = reinterpret_cast<Address>(map);
7236 map_addr -= kMarkTag;
7238 ASSERT_TAG_ALIGNED(map_addr);
7240 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7242 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7244 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7246 obj->IterateBody(Map::cast(map_p)->instance_type(),
7247 obj->SizeFromMap(Map::cast(map_p)),
7252 void PathTracer::ProcessResults() {
7253 if (found_target_) {
7254 PrintF("=====================================\n");
7255 PrintF("==== Path to object ====\n");
7256 PrintF("=====================================\n\n");
7258 ASSERT(!object_stack_.is_empty());
7259 for (int i = 0; i < object_stack_.length(); i++) {
7260 if (i > 0) PrintF("\n |\n |\n V\n\n");
7261 Object* obj = object_stack_[i];
7264 PrintF("=====================================\n");
7269 // Triggers a depth-first traversal of reachable objects from one
7270 // given root object and finds a path to a specific heap object and
7272 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7273 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7274 tracer.VisitPointer(&root);
7278 // Triggers a depth-first traversal of reachable objects from roots
7279 // and finds a path to a specific heap object and prints it.
7280 void Heap::TracePathToObject(Object* target) {
7281 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7282 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7286 // Triggers a depth-first traversal of reachable objects from roots
7287 // and finds a path to any global object and prints it. Useful for
7288 // determining the source for leaks of global objects.
7289 void Heap::TracePathToGlobal() {
7290 PathTracer tracer(PathTracer::kAnyGlobalObject,
7291 PathTracer::FIND_ALL,
7293 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7298 static intptr_t CountTotalHolesSize(Heap* heap) {
7299 intptr_t holes_size = 0;
7300 OldSpaces spaces(heap);
7301 for (OldSpace* space = spaces.next();
7303 space = spaces.next()) {
7304 holes_size += space->Waste() + space->Available();
7310 GCTracer::GCTracer(Heap* heap,
7311 const char* gc_reason,
7312 const char* collector_reason)
7314 start_object_size_(0),
7315 start_memory_size_(0),
7318 allocated_since_last_gc_(0),
7319 spent_in_mutator_(0),
7320 promoted_objects_size_(0),
7321 nodes_died_in_new_space_(0),
7322 nodes_copied_in_new_space_(0),
7325 gc_reason_(gc_reason),
7326 collector_reason_(collector_reason) {
7327 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7328 start_time_ = OS::TimeCurrentMillis();
7329 start_object_size_ = heap_->SizeOfObjects();
7330 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7332 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7336 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7338 allocated_since_last_gc_ =
7339 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7341 if (heap_->last_gc_end_timestamp_ > 0) {
7342 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7345 steps_count_ = heap_->incremental_marking()->steps_count();
7346 steps_took_ = heap_->incremental_marking()->steps_took();
7347 longest_step_ = heap_->incremental_marking()->longest_step();
7348 steps_count_since_last_gc_ =
7349 heap_->incremental_marking()->steps_count_since_last_gc();
7350 steps_took_since_last_gc_ =
7351 heap_->incremental_marking()->steps_took_since_last_gc();
7355 GCTracer::~GCTracer() {
7356 // Printf ONE line iff flag is set.
7357 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7359 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7361 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7362 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7364 double time = heap_->last_gc_end_timestamp_ - start_time_;
7366 // Update cumulative GC statistics if required.
7367 if (FLAG_print_cumulative_gc_stat) {
7368 heap_->total_gc_time_ms_ += time;
7369 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7370 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7371 heap_->alive_after_last_gc_);
7373 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7376 } else if (FLAG_trace_gc_verbose) {
7377 heap_->total_gc_time_ms_ += time;
7380 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7382 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7384 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7385 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7387 if (!FLAG_trace_gc_nvp) {
7388 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7390 double end_memory_size_mb =
7391 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7393 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7395 static_cast<double>(start_object_size_) / MB,
7396 static_cast<double>(start_memory_size_) / MB,
7397 SizeOfHeapObjects(),
7398 end_memory_size_mb);
7400 if (external_time > 0) PrintF("%d / ", external_time);
7401 PrintF("%.1f ms", time);
7402 if (steps_count_ > 0) {
7403 if (collector_ == SCAVENGER) {
7404 PrintF(" (+ %.1f ms in %d steps since last GC)",
7405 steps_took_since_last_gc_,
7406 steps_count_since_last_gc_);
7408 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7409 "biggest step %.1f ms)",
7416 if (gc_reason_ != NULL) {
7417 PrintF(" [%s]", gc_reason_);
7420 if (collector_reason_ != NULL) {
7421 PrintF(" [%s]", collector_reason_);
7426 PrintF("pause=%.1f ", time);
7427 PrintF("mutator=%.1f ", spent_in_mutator_);
7429 switch (collector_) {
7433 case MARK_COMPACTOR:
7441 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7442 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7443 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7444 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7445 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7446 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7447 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7448 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7449 PrintF("compaction_ptrs=%.1f ",
7450 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7451 PrintF("intracompaction_ptrs=%.1f ",
7452 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7453 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7455 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7456 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7457 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7458 in_free_list_or_wasted_before_gc_);
7459 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7461 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7462 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7463 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7464 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7465 PrintF("nodes_promoted=%d ", nodes_promoted_);
7467 if (collector_ == SCAVENGER) {
7468 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7469 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7471 PrintF("stepscount=%d ", steps_count_);
7472 PrintF("stepstook=%.1f ", steps_took_);
7473 PrintF("longeststep=%.1f ", longest_step_);
7479 heap_->PrintShortHeapStatistics();
7483 const char* GCTracer::CollectorString() {
7484 switch (collector_) {
7487 case MARK_COMPACTOR:
7488 return "Mark-sweep";
7490 return "Unknown GC";
7494 int KeyedLookupCache::Hash(Map* map, Name* name) {
7495 // Uses only lower 32 bits if pointers are larger.
7496 uintptr_t addr_hash =
7497 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7498 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7502 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7503 int index = (Hash(map, name) & kHashMask);
7504 for (int i = 0; i < kEntriesPerBucket; i++) {
7505 Key& key = keys_[index + i];
7506 if ((key.map == map) && key.name->Equals(name)) {
7507 return field_offsets_[index + i];
7514 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7515 if (!name->IsUniqueName()) {
7516 String* internalized_string;
7517 if (!HEAP->InternalizeStringIfExists(
7518 String::cast(name), &internalized_string)) {
7521 name = internalized_string;
7523 // This cache is cleared only between mark compact passes, so we expect the
7524 // cache to only contain old space names.
7525 ASSERT(!HEAP->InNewSpace(name));
7527 int index = (Hash(map, name) & kHashMask);
7528 // After a GC there will be free slots, so we use them in order (this may
7529 // help to get the most frequently used one in position 0).
7530 for (int i = 0; i< kEntriesPerBucket; i++) {
7531 Key& key = keys_[index];
7532 Object* free_entry_indicator = NULL;
7533 if (key.map == free_entry_indicator) {
7536 field_offsets_[index + i] = field_offset;
7540 // No free entry found in this bucket, so we move them all down one and
7541 // put the new entry at position zero.
7542 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7543 Key& key = keys_[index + i];
7544 Key& key2 = keys_[index + i - 1];
7546 field_offsets_[index + i] = field_offsets_[index + i - 1];
7549 // Write the new first entry.
7550 Key& key = keys_[index];
7553 field_offsets_[index] = field_offset;
7557 void KeyedLookupCache::Clear() {
7558 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7562 void DescriptorLookupCache::Clear() {
7563 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7568 void Heap::GarbageCollectionGreedyCheck() {
7569 ASSERT(FLAG_gc_greedy);
7570 if (isolate_->bootstrapper()->IsActive()) return;
7571 if (disallow_allocation_failure()) return;
7572 CollectGarbage(NEW_SPACE);
7577 TranscendentalCache::SubCache::SubCache(Type t)
7579 isolate_(Isolate::Current()) {
7580 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7581 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7582 for (int i = 0; i < kCacheSize; i++) {
7583 elements_[i].in[0] = in0;
7584 elements_[i].in[1] = in1;
7585 elements_[i].output = NULL;
7590 void TranscendentalCache::Clear() {
7591 for (int i = 0; i < kNumberOfCaches; i++) {
7592 if (caches_[i] != NULL) {
7600 void ExternalStringTable::CleanUp() {
7602 for (int i = 0; i < new_space_strings_.length(); ++i) {
7603 if (new_space_strings_[i] == heap_->the_hole_value()) {
7606 if (heap_->InNewSpace(new_space_strings_[i])) {
7607 new_space_strings_[last++] = new_space_strings_[i];
7609 old_space_strings_.Add(new_space_strings_[i]);
7612 new_space_strings_.Rewind(last);
7613 new_space_strings_.Trim();
7616 for (int i = 0; i < old_space_strings_.length(); ++i) {
7617 if (old_space_strings_[i] == heap_->the_hole_value()) {
7620 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7621 old_space_strings_[last++] = old_space_strings_[i];
7623 old_space_strings_.Rewind(last);
7624 old_space_strings_.Trim();
7626 if (FLAG_verify_heap) {
7633 void ExternalStringTable::TearDown() {
7634 new_space_strings_.Free();
7635 old_space_strings_.Free();
7639 // Update all references.
7640 void ErrorObjectList::UpdateReferences() {
7641 for (int i = 0; i < list_.length(); i++) {
7642 HeapObject* object = HeapObject::cast(list_[i]);
7643 MapWord first_word = object->map_word();
7644 if (first_word.IsForwardingAddress()) {
7645 list_[i] = first_word.ToForwardingAddress();
7651 // Unforwarded objects in new space are dead and removed from the list.
7652 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7653 if (list_.is_empty()) return;
7655 int write_index = 0;
7656 for (int i = 0; i < list_.length(); i++) {
7657 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7658 if (first_word.IsForwardingAddress()) {
7659 list_[write_index++] = first_word.ToForwardingAddress();
7662 list_.Rewind(write_index);
7664 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7665 // objects in the list, just remove dead ones, as to not confuse the
7666 // loop in DeferredFormatStackTrace.
7667 for (int i = 0; i < list_.length(); i++) {
7668 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7669 list_[i] = first_word.IsForwardingAddress()
7670 ? first_word.ToForwardingAddress()
7671 : heap->the_hole_value();
7677 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7678 // If formatting the stack trace causes a GC, this method will be
7679 // recursively called. In that case, skip the recursive call, since
7680 // the loop modifies the list while iterating over it.
7681 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7683 HandleScope scope(isolate);
7684 Handle<String> stack_key = isolate->factory()->stack_string();
7685 int write_index = 0;
7686 int budget = kBudgetPerGC;
7687 for (int i = 0; i < list_.length(); i++) {
7688 Object* object = list_[i];
7689 JSFunction* getter_fun;
7691 { AssertNoAllocation assert;
7692 // Skip possible holes in the list.
7693 if (object->IsTheHole()) continue;
7694 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7695 list_[write_index++] = object;
7699 // Check whether the stack property is backed by the original getter.
7700 LookupResult lookup(isolate);
7701 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7702 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7703 Object* callback = lookup.GetCallbackObject();
7704 if (!callback->IsAccessorPair()) continue;
7705 Object* getter_obj = AccessorPair::cast(callback)->getter();
7706 if (!getter_obj->IsJSFunction()) continue;
7707 getter_fun = JSFunction::cast(getter_obj);
7708 String* key = isolate->heap()->hidden_stack_trace_string();
7709 Object* value = getter_fun->GetHiddenProperty(key);
7710 if (key != value) continue;
7714 HandleScope scope(isolate);
7715 bool has_exception = false;
7717 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7719 Handle<Object> object_handle(object, isolate);
7720 Handle<Object> getter_handle(getter_fun, isolate);
7721 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7722 ASSERT(*map == HeapObject::cast(*object_handle)->map());
7723 if (has_exception) {
7724 // Hit an exception (most likely a stack overflow).
7725 // Wrap up this pass and retry after another GC.
7726 isolate->clear_pending_exception();
7727 // We use the handle since calling the getter might have caused a GC.
7728 list_[write_index++] = *object_handle;
7732 list_.Rewind(write_index);
7738 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7739 for (int i = 0; i < list_.length(); i++) {
7740 HeapObject* object = HeapObject::cast(list_[i]);
7741 if (!Marking::MarkBitFrom(object).Get()) {
7742 list_[i] = heap->the_hole_value();
7748 void ErrorObjectList::TearDown() {
7753 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7754 chunk->set_next_chunk(chunks_queued_for_free_);
7755 chunks_queued_for_free_ = chunk;
7759 void Heap::FreeQueuedChunks() {
7760 if (chunks_queued_for_free_ == NULL) return;
7763 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7764 next = chunk->next_chunk();
7765 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7767 if (chunk->owner()->identity() == LO_SPACE) {
7768 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7769 // If FromAnyPointerAddress encounters a slot that belongs to a large
7770 // chunk queued for deletion it will fail to find the chunk because
7771 // it try to perform a search in the list of pages owned by of the large
7772 // object space and queued chunks were detached from that list.
7773 // To work around this we split large chunk into normal kPageSize aligned
7774 // pieces and initialize size, owner and flags field of every piece.
7775 // If FromAnyPointerAddress encounters a slot that belongs to one of
7776 // these smaller pieces it will treat it as a slot on a normal Page.
7777 Address chunk_end = chunk->address() + chunk->size();
7778 MemoryChunk* inner = MemoryChunk::FromAddress(
7779 chunk->address() + Page::kPageSize);
7780 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7781 while (inner <= inner_last) {
7782 // Size of a large chunk is always a multiple of
7783 // OS::AllocateAlignment() so there is always
7784 // enough space for a fake MemoryChunk header.
7785 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7786 // Guard against overflow.
7787 if (area_end < inner->address()) area_end = chunk_end;
7788 inner->SetArea(inner->address(), area_end);
7789 inner->set_size(Page::kPageSize);
7790 inner->set_owner(lo_space());
7791 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7792 inner = MemoryChunk::FromAddress(
7793 inner->address() + Page::kPageSize);
7797 isolate_->heap()->store_buffer()->Compact();
7798 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7799 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7800 next = chunk->next_chunk();
7801 isolate_->memory_allocator()->Free(chunk);
7803 chunks_queued_for_free_ = NULL;
7807 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7808 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7809 // Tag the page pointer to make it findable in the dump file.
7811 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7813 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7815 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7816 reinterpret_cast<Address>(p);
7817 remembered_unmapped_pages_index_++;
7818 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7822 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7823 memset(object_counts_, 0, sizeof(object_counts_));
7824 memset(object_sizes_, 0, sizeof(object_sizes_));
7825 if (clear_last_time_stats) {
7826 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7827 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7832 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7835 void Heap::CheckpointObjectStats() {
7836 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7837 Counters* counters = isolate()->counters();
7838 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7839 counters->count_of_##name()->Increment( \
7840 static_cast<int>(object_counts_[name])); \
7841 counters->count_of_##name()->Decrement( \
7842 static_cast<int>(object_counts_last_time_[name])); \
7843 counters->size_of_##name()->Increment( \
7844 static_cast<int>(object_sizes_[name])); \
7845 counters->size_of_##name()->Decrement( \
7846 static_cast<int>(object_sizes_last_time_[name]));
7847 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7848 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7850 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7851 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7852 counters->count_of_CODE_TYPE_##name()->Increment( \
7853 static_cast<int>(object_counts_[index])); \
7854 counters->count_of_CODE_TYPE_##name()->Decrement( \
7855 static_cast<int>(object_counts_last_time_[index])); \
7856 counters->size_of_CODE_TYPE_##name()->Increment( \
7857 static_cast<int>(object_sizes_[index])); \
7858 counters->size_of_CODE_TYPE_##name()->Decrement( \
7859 static_cast<int>(object_sizes_last_time_[index]));
7860 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7861 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7862 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7863 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7864 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7865 static_cast<int>(object_counts_[index])); \
7866 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7867 static_cast<int>(object_counts_last_time_[index])); \
7868 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7869 static_cast<int>(object_sizes_[index])); \
7870 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7871 static_cast<int>(object_sizes_last_time_[index]));
7872 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7873 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7875 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7876 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7881 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7882 if (FLAG_parallel_recompilation) {
7883 heap_->relocation_mutex_->Lock();
7885 heap_->relocation_mutex_locked_by_optimizer_thread_ =
7886 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7891 } } // namespace v8::internal