1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71 code_range_size_(512*MB),
73 #define LUMP_OF_MEMORY MB
77 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 initial_semispace_size_(Page::kPageSize),
80 max_old_generation_size_(192*MB),
81 max_executable_size_(max_old_generation_size_),
83 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 initial_semispace_size_(Page::kPageSize),
86 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87 max_executable_size_(256l * LUMP_OF_MEMORY),
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94 survived_since_last_expansion_(0),
96 always_allocate_scope_depth_(0),
97 linear_allocation_scope_depth_(0),
98 contexts_disposed_(0),
100 flush_monomorphic_ics_(false),
101 scan_on_scavenge_pages_(0),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 gc_post_processing_depth_(0),
113 remembered_unmapped_pages_index_(0),
114 unflattened_strings_length_(0),
116 allocation_allowed_(true),
117 allocation_timeout_(0),
118 disallow_allocation_failure_(false),
120 new_space_high_promotion_mode_active_(false),
121 old_gen_promotion_limit_(kMinimumPromotionLimit),
122 old_gen_allocation_limit_(kMinimumAllocationLimit),
123 old_gen_limit_factor_(1),
124 size_of_old_gen_at_last_old_space_gc_(0),
125 external_allocation_limit_(0),
126 amount_of_external_allocated_memory_(0),
127 amount_of_external_allocated_memory_at_last_global_gc_(0),
128 old_gen_exhausted_(false),
129 store_buffer_rebuilder_(store_buffer()),
130 hidden_string_(NULL),
131 global_gc_prologue_callback_(NULL),
132 global_gc_epilogue_callback_(NULL),
133 gc_safe_size_of_old_object_(NULL),
134 total_regexp_code_generated_(0),
136 young_survivors_after_last_gc_(0),
137 high_survival_rate_period_length_(0),
138 low_survival_rate_period_length_(0),
140 previous_survival_rate_trend_(Heap::STABLE),
141 survival_rate_trend_(Heap::STABLE),
143 total_gc_time_ms_(0.0),
144 max_alive_after_gc_(0),
145 min_in_mutator_(kMaxInt),
146 alive_after_last_gc_(0),
147 last_gc_end_timestamp_(0.0),
152 incremental_marking_(this),
153 number_idle_notifications_(0),
154 last_idle_notification_gc_count_(0),
155 last_idle_notification_gc_count_init_(false),
156 mark_sweeps_since_idle_round_started_(0),
157 ms_count_at_last_idle_notification_(0),
158 gc_count_at_last_idle_gc_(0),
159 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
160 gcs_since_last_deopt_(0),
162 no_weak_embedded_maps_verification_scope_depth_(0),
164 promotion_queue_(this),
166 chunks_queued_for_free_(NULL),
167 relocation_mutex_(NULL) {
168 // Allow build-time customization of the max semispace size. Building
169 // V8 with snapshots and a non-default max semispace size is much
170 // easier if you can define it as part of the build environment.
171 #if defined(V8_MAX_SEMISPACE_SIZE)
172 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
175 intptr_t max_virtual = OS::MaxVirtualMemory();
177 if (max_virtual > 0) {
178 if (code_range_size_ > 0) {
179 // Reserve no more than 1/8 of the memory for the code range.
180 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
184 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
185 native_contexts_list_ = NULL;
186 mark_compact_collector_.heap_ = this;
187 external_string_table_.heap_ = this;
188 // Put a dummy entry in the remembered pages so we can find the list the
189 // minidump even if there are no real unmapped pages.
190 RememberUnmappedPage(NULL, false);
192 ClearObjectStats(true);
196 intptr_t Heap::Capacity() {
197 if (!HasBeenSetUp()) return 0;
199 return new_space_.Capacity() +
200 old_pointer_space_->Capacity() +
201 old_data_space_->Capacity() +
202 code_space_->Capacity() +
203 map_space_->Capacity() +
204 cell_space_->Capacity();
208 intptr_t Heap::CommittedMemory() {
209 if (!HasBeenSetUp()) return 0;
211 return new_space_.CommittedMemory() +
212 old_pointer_space_->CommittedMemory() +
213 old_data_space_->CommittedMemory() +
214 code_space_->CommittedMemory() +
215 map_space_->CommittedMemory() +
216 cell_space_->CommittedMemory() +
221 size_t Heap::CommittedPhysicalMemory() {
222 if (!HasBeenSetUp()) return 0;
224 return new_space_.CommittedPhysicalMemory() +
225 old_pointer_space_->CommittedPhysicalMemory() +
226 old_data_space_->CommittedPhysicalMemory() +
227 code_space_->CommittedPhysicalMemory() +
228 map_space_->CommittedPhysicalMemory() +
229 cell_space_->CommittedPhysicalMemory() +
230 lo_space_->CommittedPhysicalMemory();
234 intptr_t Heap::CommittedMemoryExecutable() {
235 if (!HasBeenSetUp()) return 0;
237 return isolate()->memory_allocator()->SizeExecutable();
241 intptr_t Heap::Available() {
242 if (!HasBeenSetUp()) return 0;
244 return new_space_.Available() +
245 old_pointer_space_->Available() +
246 old_data_space_->Available() +
247 code_space_->Available() +
248 map_space_->Available() +
249 cell_space_->Available();
253 bool Heap::HasBeenSetUp() {
254 return old_pointer_space_ != NULL &&
255 old_data_space_ != NULL &&
256 code_space_ != NULL &&
257 map_space_ != NULL &&
258 cell_space_ != NULL &&
263 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
264 if (IntrusiveMarking::IsMarked(object)) {
265 return IntrusiveMarking::SizeOfMarkedObject(object);
267 return object->SizeFromMap(object->map());
271 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
272 const char** reason) {
273 // Is global GC requested?
274 if (space != NEW_SPACE) {
275 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
276 *reason = "GC in old space requested";
277 return MARK_COMPACTOR;
280 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
281 *reason = "GC in old space forced by flags";
282 return MARK_COMPACTOR;
285 // Is enough data promoted to justify a global GC?
286 if (OldGenerationPromotionLimitReached()) {
287 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
288 *reason = "promotion limit reached";
289 return MARK_COMPACTOR;
292 // Have allocation in OLD and LO failed?
293 if (old_gen_exhausted_) {
294 isolate_->counters()->
295 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
296 *reason = "old generations exhausted";
297 return MARK_COMPACTOR;
300 // Is there enough space left in OLD to guarantee that a scavenge can
303 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
304 // for object promotion. It counts only the bytes that the memory
305 // allocator has not yet allocated from the OS and assigned to any space,
306 // and does not count available bytes already in the old space or code
307 // space. Undercounting is safe---we may get an unrequested full GC when
308 // a scavenge would have succeeded.
309 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
310 isolate_->counters()->
311 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
312 *reason = "scavenge might not succeed";
313 return MARK_COMPACTOR;
322 // TODO(1238405): Combine the infrastructure for --heap-stats and
323 // --log-gc to avoid the complicated preprocessor and flag testing.
324 void Heap::ReportStatisticsBeforeGC() {
325 // Heap::ReportHeapStatistics will also log NewSpace statistics when
326 // compiled --log-gc is set. The following logic is used to avoid
329 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
330 if (FLAG_heap_stats) {
331 ReportHeapStatistics("Before GC");
332 } else if (FLAG_log_gc) {
333 new_space_.ReportStatistics();
335 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
338 new_space_.CollectStatistics();
339 new_space_.ReportStatistics();
340 new_space_.ClearHistograms();
346 void Heap::PrintShortHeapStatistics() {
347 if (!FLAG_trace_gc_verbose) return;
348 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
349 ", available: %6" V8_PTR_PREFIX "d KB\n",
350 isolate_->memory_allocator()->Size() / KB,
351 isolate_->memory_allocator()->Available() / KB);
352 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
353 ", available: %6" V8_PTR_PREFIX "d KB"
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 new_space_.Size() / KB,
356 new_space_.Available() / KB,
357 new_space_.CommittedMemory() / KB);
358 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
359 ", available: %6" V8_PTR_PREFIX "d KB"
360 ", committed: %6" V8_PTR_PREFIX "d KB\n",
361 old_pointer_space_->SizeOfObjects() / KB,
362 old_pointer_space_->Available() / KB,
363 old_pointer_space_->CommittedMemory() / KB);
364 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
365 ", available: %6" V8_PTR_PREFIX "d KB"
366 ", committed: %6" V8_PTR_PREFIX "d KB\n",
367 old_data_space_->SizeOfObjects() / KB,
368 old_data_space_->Available() / KB,
369 old_data_space_->CommittedMemory() / KB);
370 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
371 ", available: %6" V8_PTR_PREFIX "d KB"
372 ", committed: %6" V8_PTR_PREFIX "d KB\n",
373 code_space_->SizeOfObjects() / KB,
374 code_space_->Available() / KB,
375 code_space_->CommittedMemory() / KB);
376 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
377 ", available: %6" V8_PTR_PREFIX "d KB"
378 ", committed: %6" V8_PTR_PREFIX "d KB\n",
379 map_space_->SizeOfObjects() / KB,
380 map_space_->Available() / KB,
381 map_space_->CommittedMemory() / KB);
382 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
383 ", available: %6" V8_PTR_PREFIX "d KB"
384 ", committed: %6" V8_PTR_PREFIX "d KB\n",
385 cell_space_->SizeOfObjects() / KB,
386 cell_space_->Available() / KB,
387 cell_space_->CommittedMemory() / KB);
388 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
389 ", available: %6" V8_PTR_PREFIX "d KB"
390 ", committed: %6" V8_PTR_PREFIX "d KB\n",
391 lo_space_->SizeOfObjects() / KB,
392 lo_space_->Available() / KB,
393 lo_space_->CommittedMemory() / KB);
394 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
395 ", available: %6" V8_PTR_PREFIX "d KB"
396 ", committed: %6" V8_PTR_PREFIX "d KB\n",
397 this->SizeOfObjects() / KB,
398 this->Available() / KB,
399 this->CommittedMemory() / KB);
400 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407 // Similar to the before GC, we use some complicated logic to ensure that
408 // NewSpace statistics are logged exactly once when --log-gc is turned on.
410 if (FLAG_heap_stats) {
411 new_space_.CollectStatistics();
412 ReportHeapStatistics("After GC");
413 } else if (FLAG_log_gc) {
414 new_space_.ReportStatistics();
417 if (FLAG_log_gc) new_space_.ReportStatistics();
422 void Heap::GarbageCollectionPrologue() {
423 isolate_->transcendental_cache()->Clear();
424 ClearJSFunctionResultCaches();
426 unflattened_strings_length_ = 0;
428 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
429 mark_compact_collector()->EnableCodeFlushing(true);
433 if (FLAG_verify_heap) {
439 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
440 allow_allocation(false);
442 if (FLAG_gc_verbose) Print();
444 ReportStatisticsBeforeGC();
447 store_buffer()->GCPrologue();
451 intptr_t Heap::SizeOfObjects() {
453 AllSpaces spaces(this);
454 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
455 total += space->SizeOfObjects();
461 void Heap::RepairFreeListsAfterBoot() {
462 PagedSpaces spaces(this);
463 for (PagedSpace* space = spaces.next();
465 space = spaces.next()) {
466 space->RepairFreeListsAfterBoot();
471 void Heap::GarbageCollectionEpilogue() {
472 store_buffer()->GCEpilogue();
474 // In release mode, we only zap the from space under heap verification.
475 if (Heap::ShouldZapGarbage()) {
480 if (FLAG_verify_heap) {
486 allow_allocation(true);
487 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488 if (FLAG_print_handles) PrintHandles();
489 if (FLAG_gc_verbose) Print();
490 if (FLAG_code_stats) ReportCodeStatistics("After GC");
492 if (FLAG_deopt_every_n_garbage_collections > 0) {
493 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494 Deoptimizer::DeoptimizeAll(isolate());
495 gcs_since_last_deopt_ = 0;
499 isolate_->counters()->alive_after_last_gc()->Set(
500 static_cast<int>(SizeOfObjects()));
502 isolate_->counters()->string_table_capacity()->Set(
503 string_table()->Capacity());
504 isolate_->counters()->number_of_symbols()->Set(
505 string_table()->NumberOfElements());
507 if (CommittedMemory() > 0) {
508 isolate_->counters()->external_fragmentation_total()->AddSample(
509 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
511 isolate_->counters()->heap_fraction_map_space()->AddSample(
513 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514 isolate_->counters()->heap_fraction_cell_space()->AddSample(
516 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
518 isolate_->counters()->heap_sample_total_committed()->AddSample(
519 static_cast<int>(CommittedMemory() / KB));
520 isolate_->counters()->heap_sample_total_used()->AddSample(
521 static_cast<int>(SizeOfObjects() / KB));
522 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523 static_cast<int>(map_space()->CommittedMemory() / KB));
524 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525 static_cast<int>(cell_space()->CommittedMemory() / KB));
528 #define UPDATE_COUNTERS_FOR_SPACE(space) \
529 isolate_->counters()->space##_bytes_available()->Set( \
530 static_cast<int>(space()->Available())); \
531 isolate_->counters()->space##_bytes_committed()->Set( \
532 static_cast<int>(space()->CommittedMemory())); \
533 isolate_->counters()->space##_bytes_used()->Set( \
534 static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
536 if (space()->CommittedMemory() > 0) { \
537 isolate_->counters()->external_fragmentation_##space()->AddSample( \
538 static_cast<int>(100 - \
539 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
542 UPDATE_COUNTERS_FOR_SPACE(space) \
543 UPDATE_FRAGMENTATION_FOR_SPACE(space)
545 UPDATE_COUNTERS_FOR_SPACE(new_space)
546 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
557 ReportStatisticsAfterGC();
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560 isolate_->debug()->AfterGarbageCollection();
561 #endif // ENABLE_DEBUGGER_SUPPORT
563 error_object_list_.DeferredFormatStackTrace(isolate());
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568 // Since we are ignoring the return value, the exact choice of space does
569 // not matter, so long as we do not specify NEW_SPACE, which would not
571 mark_compact_collector_.SetFlags(flags);
572 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573 mark_compact_collector_.SetFlags(kNoGCFlags);
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578 // Since we are ignoring the return value, the exact choice of space does
579 // not matter, so long as we do not specify NEW_SPACE, which would not
581 // Major GC would invoke weak handle callbacks on weakly reachable
582 // handles, but won't collect weakly reachable objects until next
583 // major GC. Therefore if we collect aggressively and weak handle callback
584 // has been invoked, we rerun major GC to release objects which become
586 // Note: as weak callbacks can execute arbitrary code, we cannot
587 // hope that eventually there will be no weak callbacks invocations.
588 // Therefore stop recollecting after several attempts.
589 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590 kReduceMemoryFootprintMask);
591 isolate_->compilation_cache()->Clear();
592 const int kMaxNumberOfAttempts = 7;
593 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
598 mark_compact_collector()->SetFlags(kNoGCFlags);
601 incremental_marking()->UncommitMarkingDeque();
605 bool Heap::CollectGarbage(AllocationSpace space,
606 GarbageCollector collector,
607 const char* gc_reason,
608 const char* collector_reason) {
609 // The VM is in the GC state until exiting this function.
610 VMState<GC> state(isolate_);
613 // Reset the allocation timeout to the GC interval, but make sure to
614 // allow at least a few allocations after a collection. The reason
615 // for this is that we have a lot of allocation sequences and we
616 // assume that a garbage collection will allow the subsequent
617 // allocation attempts to go through.
618 allocation_timeout_ = Max(6, FLAG_gc_interval);
621 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622 if (FLAG_trace_incremental_marking) {
623 PrintF("[IncrementalMarking] Scavenge during marking.\n");
627 if (collector == MARK_COMPACTOR &&
628 !mark_compact_collector()->abort_incremental_marking() &&
629 !incremental_marking()->IsStopped() &&
630 !incremental_marking()->should_hurry() &&
631 FLAG_incremental_marking_steps) {
632 // Make progress in incremental marking.
633 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636 if (!incremental_marking()->IsComplete()) {
637 if (FLAG_trace_incremental_marking) {
638 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
640 collector = SCAVENGER;
641 collector_reason = "incremental marking delaying mark-sweep";
645 bool next_gc_likely_to_collect_more = false;
647 { GCTracer tracer(this, gc_reason, collector_reason);
648 GarbageCollectionPrologue();
649 // The GC count was incremented in the prologue. Tell the tracer about
651 tracer.set_gc_count(gc_count_);
653 // Tell the tracer which collector we've selected.
654 tracer.set_collector(collector);
657 HistogramTimerScope histogram_timer_scope(
658 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
659 : isolate_->counters()->gc_compactor());
660 next_gc_likely_to_collect_more =
661 PerformGarbageCollection(collector, &tracer);
664 GarbageCollectionEpilogue();
667 // Start incremental marking for the next cycle. The heap snapshot
668 // generator needs incremental marking to stay off after it aborted.
669 if (!mark_compact_collector()->abort_incremental_marking() &&
670 incremental_marking()->IsStopped() &&
671 incremental_marking()->WorthActivating() &&
672 NextGCIsLikelyToBeFull()) {
673 incremental_marking()->Start();
676 return next_gc_likely_to_collect_more;
680 void Heap::PerformScavenge() {
681 GCTracer tracer(this, NULL, NULL);
682 if (incremental_marking()->IsStopped()) {
683 PerformGarbageCollection(SCAVENGER, &tracer);
685 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
690 void Heap::MoveElements(FixedArray* array,
694 if (len == 0) return;
696 ASSERT(array->map() != HEAP->fixed_cow_array_map());
697 Object** dst_objects = array->data_start() + dst_index;
698 OS::MemMove(dst_objects,
699 array->data_start() + src_index,
701 if (!InNewSpace(array)) {
702 for (int i = 0; i < len; i++) {
703 // TODO(hpayer): check store buffer for entries
704 if (InNewSpace(dst_objects[i])) {
705 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
709 incremental_marking()->RecordWrites(array);
714 // Helper class for verifying the string table.
715 class StringTableVerifier : public ObjectVisitor {
717 void VisitPointers(Object** start, Object** end) {
718 // Visit all HeapObject pointers in [start, end).
719 for (Object** p = start; p < end; p++) {
720 if ((*p)->IsHeapObject()) {
721 // Check that the string is actually internalized.
722 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
723 (*p)->IsInternalizedString());
730 static void VerifyStringTable() {
731 StringTableVerifier verifier;
732 HEAP->string_table()->IterateElements(&verifier);
734 #endif // VERIFY_HEAP
737 static bool AbortIncrementalMarkingAndCollectGarbage(
739 AllocationSpace space,
740 const char* gc_reason = NULL) {
741 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
742 bool result = heap->CollectGarbage(space, gc_reason);
743 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
748 void Heap::ReserveSpace(
750 Address *locations_out) {
751 bool gc_performed = true;
753 static const int kThreshold = 20;
754 while (gc_performed && counter++ < kThreshold) {
755 gc_performed = false;
756 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
757 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
758 if (sizes[space] != 0) {
759 MaybeObject* allocation;
760 if (space == NEW_SPACE) {
761 allocation = new_space()->AllocateRaw(sizes[space]);
763 allocation = paged_space(space)->AllocateRaw(sizes[space]);
766 if (!allocation->To<FreeListNode>(&node)) {
767 if (space == NEW_SPACE) {
768 Heap::CollectGarbage(NEW_SPACE,
769 "failed to reserve space in the new space");
771 AbortIncrementalMarkingAndCollectGarbage(
773 static_cast<AllocationSpace>(space),
774 "failed to reserve space in paged space");
779 // Mark with a free list node, in case we have a GC before
781 node->set_size(this, sizes[space]);
782 locations_out[space] = node->address();
789 // Failed to reserve the space after several attempts.
790 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
795 void Heap::EnsureFromSpaceIsCommitted() {
796 if (new_space_.CommitFromSpaceIfNeeded()) return;
798 // Committing memory to from space failed.
799 // Memory is exhausted and we will die.
800 V8::FatalProcessOutOfMemory("Committing semi space failed.");
804 void Heap::ClearJSFunctionResultCaches() {
805 if (isolate_->bootstrapper()->IsActive()) return;
807 Object* context = native_contexts_list_;
808 while (!context->IsUndefined()) {
809 // Get the caches for this context. GC can happen when the context
810 // is not fully initialized, so the caches can be undefined.
811 Object* caches_or_undefined =
812 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
813 if (!caches_or_undefined->IsUndefined()) {
814 FixedArray* caches = FixedArray::cast(caches_or_undefined);
816 int length = caches->length();
817 for (int i = 0; i < length; i++) {
818 JSFunctionResultCache::cast(caches->get(i))->Clear();
821 // Get the next context:
822 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
827 void Heap::ClearNormalizedMapCaches() {
828 if (isolate_->bootstrapper()->IsActive() &&
829 !incremental_marking()->IsMarking()) {
833 Object* context = native_contexts_list_;
834 while (!context->IsUndefined()) {
835 // GC can happen when the context is not fully initialized,
836 // so the cache can be undefined.
838 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
839 if (!cache->IsUndefined()) {
840 NormalizedMapCache::cast(cache)->Clear();
842 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
847 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
848 double survival_rate =
849 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
850 start_new_space_size;
852 if (survival_rate > kYoungSurvivalRateHighThreshold) {
853 high_survival_rate_period_length_++;
855 high_survival_rate_period_length_ = 0;
858 if (survival_rate < kYoungSurvivalRateLowThreshold) {
859 low_survival_rate_period_length_++;
861 low_survival_rate_period_length_ = 0;
864 double survival_rate_diff = survival_rate_ - survival_rate;
866 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
867 set_survival_rate_trend(DECREASING);
868 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
869 set_survival_rate_trend(INCREASING);
871 set_survival_rate_trend(STABLE);
874 survival_rate_ = survival_rate;
877 bool Heap::PerformGarbageCollection(GarbageCollector collector,
879 bool next_gc_likely_to_collect_more = false;
881 if (collector != SCAVENGER) {
882 PROFILE(isolate_, CodeMovingGCEvent());
886 if (FLAG_verify_heap) {
892 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
895 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
896 VMState<EXTERNAL> state(isolate_);
897 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
900 EnsureFromSpaceIsCommitted();
902 int start_new_space_size = Heap::new_space()->SizeAsInt();
904 if (IsHighSurvivalRate()) {
905 // We speed up the incremental marker if it is running so that it
906 // does not fall behind the rate of promotion, which would cause a
907 // constantly growing old space.
908 incremental_marking()->NotifyOfHighPromotionRate();
911 if (collector == MARK_COMPACTOR) {
912 // Perform mark-sweep with optional compaction.
915 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
916 IsStableOrIncreasingSurvivalTrend();
918 UpdateSurvivalRateTrend(start_new_space_size);
920 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
922 if (high_survival_rate_during_scavenges &&
923 IsStableOrIncreasingSurvivalTrend()) {
924 // Stable high survival rates of young objects both during partial and
925 // full collection indicate that mutator is either building or modifying
926 // a structure with a long lifetime.
927 // In this case we aggressively raise old generation memory limits to
928 // postpone subsequent mark-sweep collection and thus trade memory
929 // space for the mutation speed.
930 old_gen_limit_factor_ = 2;
932 old_gen_limit_factor_ = 1;
935 old_gen_promotion_limit_ =
936 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
937 old_gen_allocation_limit_ =
938 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
940 old_gen_exhausted_ = false;
946 UpdateSurvivalRateTrend(start_new_space_size);
949 if (!new_space_high_promotion_mode_active_ &&
950 new_space_.Capacity() == new_space_.MaximumCapacity() &&
951 IsStableOrIncreasingSurvivalTrend() &&
952 IsHighSurvivalRate()) {
953 // Stable high survival rates even though young generation is at
954 // maximum capacity indicates that most objects will be promoted.
955 // To decrease scavenger pauses and final mark-sweep pauses, we
956 // have to limit maximal capacity of the young generation.
957 new_space_high_promotion_mode_active_ = true;
959 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
960 new_space_.InitialCapacity() / MB);
962 // Support for global pre-tenuring uses the high promotion mode as a
963 // heuristic indicator of whether to pretenure or not, we trigger
964 // deoptimization here to take advantage of pre-tenuring as soon as
966 if (FLAG_pretenure_literals) {
967 isolate_->stack_guard()->FullDeopt();
969 } else if (new_space_high_promotion_mode_active_ &&
970 IsStableOrDecreasingSurvivalTrend() &&
971 IsLowSurvivalRate()) {
972 // Decreasing low survival rates might indicate that the above high
973 // promotion mode is over and we should allow the young generation
975 new_space_high_promotion_mode_active_ = false;
977 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
978 new_space_.MaximumCapacity() / MB);
980 // Trigger deoptimization here to turn off pre-tenuring as soon as
982 if (FLAG_pretenure_literals) {
983 isolate_->stack_guard()->FullDeopt();
987 if (new_space_high_promotion_mode_active_ &&
988 new_space_.Capacity() > new_space_.InitialCapacity()) {
992 isolate_->counters()->objs_since_last_young()->Set(0);
994 // Callbacks that fire after this point might trigger nested GCs and
995 // restart incremental marking, the assertion can't be moved down.
996 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
998 gc_post_processing_depth_++;
999 { DisableAssertNoAllocation allow_allocation;
1000 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1001 next_gc_likely_to_collect_more =
1002 isolate_->global_handles()->PostGarbageCollectionProcessing(
1005 gc_post_processing_depth_--;
1007 // Update relocatables.
1008 Relocatable::PostGarbageCollectionProcessing();
1010 if (collector == MARK_COMPACTOR) {
1011 // Register the amount of external allocated memory.
1012 amount_of_external_allocated_memory_at_last_global_gc_ =
1013 amount_of_external_allocated_memory_;
1017 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1018 VMState<EXTERNAL> state(isolate_);
1019 CallGCEpilogueCallbacks(gc_type);
1023 if (FLAG_verify_heap) {
1024 VerifyStringTable();
1028 return next_gc_likely_to_collect_more;
1032 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1033 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1034 global_gc_prologue_callback_();
1036 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1037 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1038 gc_prologue_callbacks_[i].callback(gc_type, flags);
1044 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1045 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1046 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1047 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1050 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1051 global_gc_epilogue_callback_();
1056 void Heap::MarkCompact(GCTracer* tracer) {
1057 gc_state_ = MARK_COMPACT;
1058 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1060 mark_compact_collector_.Prepare(tracer);
1063 tracer->set_full_gc_count(ms_count_);
1065 MarkCompactPrologue();
1067 mark_compact_collector_.CollectGarbage();
1069 LOG(isolate_, ResourceEvent("markcompact", "end"));
1071 gc_state_ = NOT_IN_GC;
1073 isolate_->counters()->objs_since_last_full()->Set(0);
1075 contexts_disposed_ = 0;
1077 flush_monomorphic_ics_ = false;
1081 void Heap::MarkCompactPrologue() {
1082 // At any old GC clear the keyed lookup cache to enable collection of unused
1084 isolate_->keyed_lookup_cache()->Clear();
1085 isolate_->context_slot_cache()->Clear();
1086 isolate_->descriptor_lookup_cache()->Clear();
1087 RegExpResultsCache::Clear(string_split_cache());
1088 RegExpResultsCache::Clear(regexp_multiple_cache());
1090 isolate_->compilation_cache()->MarkCompactPrologue();
1092 CompletelyClearInstanceofCache();
1094 FlushNumberStringCache();
1095 if (FLAG_cleanup_code_caches_at_gc) {
1096 polymorphic_code_cache()->set_cache(undefined_value());
1099 ClearNormalizedMapCaches();
1103 Object* Heap::FindCodeObject(Address a) {
1104 return isolate()->inner_pointer_to_code_cache()->
1105 GcSafeFindCodeForInnerPointer(a);
1109 // Helper class for copying HeapObjects
1110 class ScavengeVisitor: public ObjectVisitor {
1112 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1114 void VisitPointer(Object** p) { ScavengePointer(p); }
1116 void VisitPointers(Object** start, Object** end) {
1117 // Copy all HeapObject pointers in [start, end)
1118 for (Object** p = start; p < end; p++) ScavengePointer(p);
1122 void ScavengePointer(Object** p) {
1123 Object* object = *p;
1124 if (!heap_->InNewSpace(object)) return;
1125 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1126 reinterpret_cast<HeapObject*>(object));
1134 // Visitor class to verify pointers in code or data space do not point into
1136 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1138 void VisitPointers(Object** start, Object**end) {
1139 for (Object** current = start; current < end; current++) {
1140 if ((*current)->IsHeapObject()) {
1141 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1148 static void VerifyNonPointerSpacePointers() {
1149 // Verify that there are no pointers to new space in spaces where we
1150 // do not expect them.
1151 VerifyNonPointerSpacePointersVisitor v;
1152 HeapObjectIterator code_it(HEAP->code_space());
1153 for (HeapObject* object = code_it.Next();
1154 object != NULL; object = code_it.Next())
1155 object->Iterate(&v);
1157 // The old data space was normally swept conservatively so that the iterator
1158 // doesn't work, so we normally skip the next bit.
1159 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1160 HeapObjectIterator data_it(HEAP->old_data_space());
1161 for (HeapObject* object = data_it.Next();
1162 object != NULL; object = data_it.Next())
1163 object->Iterate(&v);
1166 #endif // VERIFY_HEAP
1169 void Heap::CheckNewSpaceExpansionCriteria() {
1170 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1171 survived_since_last_expansion_ > new_space_.Capacity() &&
1172 !new_space_high_promotion_mode_active_) {
1173 // Grow the size of new space if there is room to grow, enough data
1174 // has survived scavenge since the last expansion and we are not in
1175 // high promotion mode.
1177 survived_since_last_expansion_ = 0;
1182 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1183 return heap->InNewSpace(*p) &&
1184 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1188 void Heap::ScavengeStoreBufferCallback(
1191 StoreBufferEvent event) {
1192 heap->store_buffer_rebuilder_.Callback(page, event);
1196 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1197 if (event == kStoreBufferStartScanningPagesEvent) {
1198 start_of_current_page_ = NULL;
1199 current_page_ = NULL;
1200 } else if (event == kStoreBufferScanningPageEvent) {
1201 if (current_page_ != NULL) {
1202 // If this page already overflowed the store buffer during this iteration.
1203 if (current_page_->scan_on_scavenge()) {
1204 // Then we should wipe out the entries that have been added for it.
1205 store_buffer_->SetTop(start_of_current_page_);
1206 } else if (store_buffer_->Top() - start_of_current_page_ >=
1207 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1208 // Did we find too many pointers in the previous page? The heuristic is
1209 // that no page can take more then 1/5 the remaining slots in the store
1211 current_page_->set_scan_on_scavenge(true);
1212 store_buffer_->SetTop(start_of_current_page_);
1214 // In this case the page we scanned took a reasonable number of slots in
1215 // the store buffer. It has now been rehabilitated and is no longer
1216 // marked scan_on_scavenge.
1217 ASSERT(!current_page_->scan_on_scavenge());
1220 start_of_current_page_ = store_buffer_->Top();
1221 current_page_ = page;
1222 } else if (event == kStoreBufferFullEvent) {
1223 // The current page overflowed the store buffer again. Wipe out its entries
1224 // in the store buffer and mark it scan-on-scavenge again. This may happen
1225 // several times while scanning.
1226 if (current_page_ == NULL) {
1227 // Store Buffer overflowed while scanning promoted objects. These are not
1228 // in any particular page, though they are likely to be clustered by the
1229 // allocation routines.
1230 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1232 // Store Buffer overflowed while scanning a particular old space page for
1233 // pointers to new space.
1234 ASSERT(current_page_ == page);
1235 ASSERT(page != NULL);
1236 current_page_->set_scan_on_scavenge(true);
1237 ASSERT(start_of_current_page_ != store_buffer_->Top());
1238 store_buffer_->SetTop(start_of_current_page_);
1246 void PromotionQueue::Initialize() {
1247 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1248 // entries (where each is a pair of intptr_t). This allows us to simplify
1249 // the test fpr when to switch pages.
1250 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1252 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1254 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1255 emergency_stack_ = NULL;
1260 void PromotionQueue::RelocateQueueHead() {
1261 ASSERT(emergency_stack_ == NULL);
1263 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1264 intptr_t* head_start = rear_;
1265 intptr_t* head_end =
1266 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1269 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1271 emergency_stack_ = new List<Entry>(2 * entries_count);
1273 while (head_start != head_end) {
1274 int size = static_cast<int>(*(head_start++));
1275 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1276 emergency_stack_->Add(Entry(obj, size));
1282 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1284 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1286 virtual Object* RetainAs(Object* object) {
1287 if (!heap_->InFromSpace(object)) {
1291 MapWord map_word = HeapObject::cast(object)->map_word();
1292 if (map_word.IsForwardingAddress()) {
1293 return map_word.ToForwardingAddress();
1303 void Heap::Scavenge() {
1304 RelocationLock relocation_lock(this);
1307 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1310 gc_state_ = SCAVENGE;
1312 // Implements Cheney's copying algorithm
1313 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1315 // Clear descriptor cache.
1316 isolate_->descriptor_lookup_cache()->Clear();
1318 // Used for updating survived_since_last_expansion_ at function end.
1319 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1321 CheckNewSpaceExpansionCriteria();
1323 SelectScavengingVisitorsTable();
1325 incremental_marking()->PrepareForScavenge();
1327 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1328 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1330 // Flip the semispaces. After flipping, to space is empty, from space has
1333 new_space_.ResetAllocationInfo();
1335 // We need to sweep newly copied objects which can be either in the
1336 // to space or promoted to the old generation. For to-space
1337 // objects, we treat the bottom of the to space as a queue. Newly
1338 // copied and unswept objects lie between a 'front' mark and the
1339 // allocation pointer.
1341 // Promoted objects can go into various old-generation spaces, and
1342 // can be allocated internally in the spaces (from the free list).
1343 // We treat the top of the to space as a queue of addresses of
1344 // promoted objects. The addresses of newly promoted and unswept
1345 // objects lie between a 'front' mark and a 'rear' mark that is
1346 // updated as a side effect of promoting an object.
1348 // There is guaranteed to be enough room at the top of the to space
1349 // for the addresses of promoted objects: every object promoted
1350 // frees up its size in bytes from the top of the new space, and
1351 // objects are at least one pointer in size.
1352 Address new_space_front = new_space_.ToSpaceStart();
1353 promotion_queue_.Initialize();
1356 store_buffer()->Clean();
1359 ScavengeVisitor scavenge_visitor(this);
1361 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1363 // Copy objects reachable from the old generation.
1365 StoreBufferRebuildScope scope(this,
1367 &ScavengeStoreBufferCallback);
1368 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1371 // Copy objects reachable from cells by scavenging cell values directly.
1372 HeapObjectIterator cell_iterator(cell_space_);
1373 for (HeapObject* heap_object = cell_iterator.Next();
1374 heap_object != NULL;
1375 heap_object = cell_iterator.Next()) {
1376 if (heap_object->IsJSGlobalPropertyCell()) {
1377 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1378 Address value_address = cell->ValueAddress();
1379 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1383 // Copy objects reachable from the code flushing candidates list.
1384 MarkCompactCollector* collector = mark_compact_collector();
1385 if (collector->is_code_flushing_enabled()) {
1386 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1389 // Scavenge object reachable from the native contexts list directly.
1390 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1392 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1394 while (isolate()->global_handles()->IterateObjectGroups(
1395 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1396 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1398 isolate()->global_handles()->RemoveObjectGroups();
1399 isolate()->global_handles()->RemoveImplicitRefGroups();
1401 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1402 &IsUnscavengedHeapObject);
1403 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1405 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1407 UpdateNewSpaceReferencesInExternalStringTable(
1408 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1410 error_object_list_.UpdateReferencesInNewSpace(this);
1412 promotion_queue_.Destroy();
1414 if (!FLAG_watch_ic_patching) {
1415 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1417 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1419 ScavengeWeakObjectRetainer weak_object_retainer(this);
1420 ProcessWeakReferences(&weak_object_retainer);
1422 ASSERT(new_space_front == new_space_.top());
1425 new_space_.set_age_mark(new_space_.top());
1427 new_space_.LowerInlineAllocationLimit(
1428 new_space_.inline_allocation_limit_step());
1430 // Update how much has survived scavenge.
1431 IncrementYoungSurvivorsCounter(static_cast<int>(
1432 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1434 LOG(isolate_, ResourceEvent("scavenge", "end"));
1436 gc_state_ = NOT_IN_GC;
1438 scavenges_since_last_idle_round_++;
1442 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1444 MapWord first_word = HeapObject::cast(*p)->map_word();
1446 if (!first_word.IsForwardingAddress()) {
1447 // Unreachable external string can be finalized.
1448 heap->FinalizeExternalString(String::cast(*p));
1452 // String is still reachable.
1453 return String::cast(first_word.ToForwardingAddress());
1457 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1458 ExternalStringTableUpdaterCallback updater_func) {
1460 if (FLAG_verify_heap) {
1461 external_string_table_.Verify();
1465 if (external_string_table_.new_space_strings_.is_empty()) return;
1467 Object** start = &external_string_table_.new_space_strings_[0];
1468 Object** end = start + external_string_table_.new_space_strings_.length();
1469 Object** last = start;
1471 for (Object** p = start; p < end; ++p) {
1472 ASSERT(InFromSpace(*p));
1473 String* target = updater_func(this, p);
1475 if (target == NULL) continue;
1477 ASSERT(target->IsExternalString());
1479 if (InNewSpace(target)) {
1480 // String is still in new space. Update the table entry.
1484 // String got promoted. Move it to the old string list.
1485 external_string_table_.AddOldString(target);
1489 ASSERT(last <= end);
1490 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1494 void Heap::UpdateReferencesInExternalStringTable(
1495 ExternalStringTableUpdaterCallback updater_func) {
1497 // Update old space string references.
1498 if (external_string_table_.old_space_strings_.length() > 0) {
1499 Object** start = &external_string_table_.old_space_strings_[0];
1500 Object** end = start + external_string_table_.old_space_strings_.length();
1501 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1504 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1508 static Object* ProcessFunctionWeakReferences(Heap* heap,
1510 WeakObjectRetainer* retainer,
1511 bool record_slots) {
1512 Object* undefined = heap->undefined_value();
1513 Object* head = undefined;
1514 JSFunction* tail = NULL;
1515 Object* candidate = function;
1516 while (candidate != undefined) {
1517 // Check whether to keep the candidate in the list.
1518 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1519 Object* retain = retainer->RetainAs(candidate);
1520 if (retain != NULL) {
1521 if (head == undefined) {
1522 // First element in the list.
1525 // Subsequent elements in the list.
1526 ASSERT(tail != NULL);
1527 tail->set_next_function_link(retain);
1529 Object** next_function =
1530 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1531 heap->mark_compact_collector()->RecordSlot(
1532 next_function, next_function, retain);
1535 // Retained function is new tail.
1536 candidate_function = reinterpret_cast<JSFunction*>(retain);
1537 tail = candidate_function;
1539 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1541 if (retain == undefined) break;
1544 // Move to next element in the list.
1545 candidate = candidate_function->next_function_link();
1548 // Terminate the list if there is one or more elements.
1550 tail->set_next_function_link(undefined);
1557 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1558 Object* undefined = undefined_value();
1559 Object* head = undefined;
1560 Context* tail = NULL;
1561 Object* candidate = native_contexts_list_;
1563 // We don't record weak slots during marking or scavenges.
1564 // Instead we do it once when we complete mark-compact cycle.
1565 // Note that write barrier has no effect if we are already in the middle of
1566 // compacting mark-sweep cycle and we have to record slots manually.
1568 gc_state() == MARK_COMPACT &&
1569 mark_compact_collector()->is_compacting();
1571 while (candidate != undefined) {
1572 // Check whether to keep the candidate in the list.
1573 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1574 Object* retain = retainer->RetainAs(candidate);
1575 if (retain != NULL) {
1576 if (head == undefined) {
1577 // First element in the list.
1580 // Subsequent elements in the list.
1581 ASSERT(tail != NULL);
1582 tail->set_unchecked(this,
1583 Context::NEXT_CONTEXT_LINK,
1585 UPDATE_WRITE_BARRIER);
1588 Object** next_context =
1589 HeapObject::RawField(
1590 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1591 mark_compact_collector()->RecordSlot(
1592 next_context, next_context, retain);
1595 // Retained context is new tail.
1596 candidate_context = reinterpret_cast<Context*>(retain);
1597 tail = candidate_context;
1599 if (retain == undefined) break;
1601 // Process the weak list of optimized functions for the context.
1602 Object* function_list_head =
1603 ProcessFunctionWeakReferences(
1605 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1608 candidate_context->set_unchecked(this,
1609 Context::OPTIMIZED_FUNCTIONS_LIST,
1611 UPDATE_WRITE_BARRIER);
1613 Object** optimized_functions =
1614 HeapObject::RawField(
1615 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1616 mark_compact_collector()->RecordSlot(
1617 optimized_functions, optimized_functions, function_list_head);
1621 // Move to next element in the list.
1622 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1625 // Terminate the list if there is one or more elements.
1627 tail->set_unchecked(this,
1628 Context::NEXT_CONTEXT_LINK,
1629 Heap::undefined_value(),
1630 UPDATE_WRITE_BARRIER);
1633 // Update the head of the list of contexts.
1634 native_contexts_list_ = head;
1638 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1639 AssertNoAllocation no_allocation;
1641 // Both the external string table and the string table may contain
1642 // external strings, but neither lists them exhaustively, nor is the
1643 // intersection set empty. Therefore we iterate over the external string
1644 // table first, ignoring internalized strings, and then over the
1645 // internalized string table.
1647 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1649 explicit ExternalStringTableVisitorAdapter(
1650 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1651 virtual void VisitPointers(Object** start, Object** end) {
1652 for (Object** p = start; p < end; p++) {
1653 // Visit non-internalized external strings,
1654 // since internalized strings are listed in the string table.
1655 if (!(*p)->IsInternalizedString()) {
1656 ASSERT((*p)->IsExternalString());
1657 visitor_->VisitExternalString(Utils::ToLocal(
1658 Handle<String>(String::cast(*p))));
1663 v8::ExternalResourceVisitor* visitor_;
1664 } external_string_table_visitor(visitor);
1666 external_string_table_.Iterate(&external_string_table_visitor);
1668 class StringTableVisitorAdapter : public ObjectVisitor {
1670 explicit StringTableVisitorAdapter(
1671 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1672 virtual void VisitPointers(Object** start, Object** end) {
1673 for (Object** p = start; p < end; p++) {
1674 if ((*p)->IsExternalString()) {
1675 ASSERT((*p)->IsInternalizedString());
1676 visitor_->VisitExternalString(Utils::ToLocal(
1677 Handle<String>(String::cast(*p))));
1682 v8::ExternalResourceVisitor* visitor_;
1683 } string_table_visitor(visitor);
1685 string_table()->IterateElements(&string_table_visitor);
1689 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1691 static inline void VisitPointer(Heap* heap, Object** p) {
1692 Object* object = *p;
1693 if (!heap->InNewSpace(object)) return;
1694 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1695 reinterpret_cast<HeapObject*>(object));
1700 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1701 Address new_space_front) {
1703 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1704 // The addresses new_space_front and new_space_.top() define a
1705 // queue of unprocessed copied objects. Process them until the
1707 while (new_space_front != new_space_.top()) {
1708 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1709 HeapObject* object = HeapObject::FromAddress(new_space_front);
1711 NewSpaceScavenger::IterateBody(object->map(), object);
1714 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1718 // Promote and process all the to-be-promoted objects.
1720 StoreBufferRebuildScope scope(this,
1722 &ScavengeStoreBufferCallback);
1723 while (!promotion_queue()->is_empty()) {
1726 promotion_queue()->remove(&target, &size);
1728 // Promoted object might be already partially visited
1729 // during old space pointer iteration. Thus we search specificly
1730 // for pointers to from semispace instead of looking for pointers
1732 ASSERT(!target->IsMap());
1733 IterateAndMarkPointersToFromSpace(target->address(),
1734 target->address() + size,
1739 // Take another spin if there are now unswept objects in new space
1740 // (there are currently no more unswept promoted objects).
1741 } while (new_space_front != new_space_.top());
1743 return new_space_front;
1747 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1750 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1754 static HeapObject* EnsureDoubleAligned(Heap* heap,
1757 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1758 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1759 return HeapObject::FromAddress(object->address() + kPointerSize);
1761 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1768 enum LoggingAndProfiling {
1769 LOGGING_AND_PROFILING_ENABLED,
1770 LOGGING_AND_PROFILING_DISABLED
1774 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1777 template<MarksHandling marks_handling,
1778 LoggingAndProfiling logging_and_profiling_mode>
1779 class ScavengingVisitor : public StaticVisitorBase {
1781 static void Initialize() {
1782 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1783 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1784 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1785 table_.Register(kVisitByteArray, &EvacuateByteArray);
1786 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1787 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1789 table_.Register(kVisitNativeContext,
1790 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1791 template VisitSpecialized<Context::kSize>);
1793 table_.Register(kVisitConsString,
1794 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1795 template VisitSpecialized<ConsString::kSize>);
1797 table_.Register(kVisitSlicedString,
1798 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1799 template VisitSpecialized<SlicedString::kSize>);
1801 table_.Register(kVisitSymbol,
1802 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1803 template VisitSpecialized<Symbol::kSize>);
1805 table_.Register(kVisitSharedFunctionInfo,
1806 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1807 template VisitSpecialized<SharedFunctionInfo::kSize>);
1809 table_.Register(kVisitJSWeakMap,
1810 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1813 table_.Register(kVisitJSRegExp,
1814 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1817 if (marks_handling == IGNORE_MARKS) {
1818 table_.Register(kVisitJSFunction,
1819 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1820 template VisitSpecialized<JSFunction::kSize>);
1822 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1825 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1827 kVisitDataObjectGeneric>();
1829 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1831 kVisitJSObjectGeneric>();
1833 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1835 kVisitStructGeneric>();
1838 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1843 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1844 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1846 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1847 bool should_record = false;
1849 should_record = FLAG_heap_stats;
1851 should_record = should_record || FLAG_log_gc;
1852 if (should_record) {
1853 if (heap->new_space()->Contains(obj)) {
1854 heap->new_space()->RecordAllocation(obj);
1856 heap->new_space()->RecordPromotion(obj);
1861 // Helper function used by CopyObject to copy a source object to an
1862 // allocated target object and update the forwarding pointer in the source
1863 // object. Returns the target object.
1864 INLINE(static void MigrateObject(Heap* heap,
1868 // Copy the content of source to target.
1869 heap->CopyBlock(target->address(), source->address(), size);
1871 // Set the forwarding address.
1872 source->set_map_word(MapWord::FromForwardingAddress(target));
1874 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1875 // Update NewSpace stats if necessary.
1876 RecordCopiedObject(heap, target);
1877 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1878 Isolate* isolate = heap->isolate();
1879 if (isolate->logger()->is_logging_code_events() ||
1880 isolate->cpu_profiler()->is_profiling()) {
1881 if (target->IsSharedFunctionInfo()) {
1882 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1883 source->address(), target->address()));
1888 if (marks_handling == TRANSFER_MARKS) {
1889 if (Marking::TransferColor(source, target)) {
1890 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1896 template<ObjectContents object_contents,
1897 SizeRestriction size_restriction,
1899 static inline void EvacuateObject(Map* map,
1903 SLOW_ASSERT((size_restriction != SMALL) ||
1904 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1905 SLOW_ASSERT(object->Size() == object_size);
1907 int allocation_size = object_size;
1908 if (alignment != kObjectAlignment) {
1909 ASSERT(alignment == kDoubleAlignment);
1910 allocation_size += kPointerSize;
1913 Heap* heap = map->GetHeap();
1914 if (heap->ShouldBePromoted(object->address(), object_size)) {
1915 MaybeObject* maybe_result;
1917 if ((size_restriction != SMALL) &&
1918 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1919 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1922 if (object_contents == DATA_OBJECT) {
1923 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1926 heap->old_pointer_space()->AllocateRaw(allocation_size);
1930 Object* result = NULL; // Initialization to please compiler.
1931 if (maybe_result->ToObject(&result)) {
1932 HeapObject* target = HeapObject::cast(result);
1934 if (alignment != kObjectAlignment) {
1935 target = EnsureDoubleAligned(heap, target, allocation_size);
1938 // Order is important: slot might be inside of the target if target
1939 // was allocated over a dead object and slot comes from the store
1942 MigrateObject(heap, object, target, object_size);
1944 if (object_contents == POINTER_OBJECT) {
1945 if (map->instance_type() == JS_FUNCTION_TYPE) {
1946 heap->promotion_queue()->insert(
1947 target, JSFunction::kNonWeakFieldsEndOffset);
1949 heap->promotion_queue()->insert(target, object_size);
1953 heap->tracer()->increment_promoted_objects_size(object_size);
1957 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1958 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1959 Object* result = allocation->ToObjectUnchecked();
1960 HeapObject* target = HeapObject::cast(result);
1962 if (alignment != kObjectAlignment) {
1963 target = EnsureDoubleAligned(heap, target, allocation_size);
1966 // Order is important: slot might be inside of the target if target
1967 // was allocated over a dead object and slot comes from the store
1970 MigrateObject(heap, object, target, object_size);
1975 static inline void EvacuateJSFunction(Map* map,
1977 HeapObject* object) {
1978 ObjectEvacuationStrategy<POINTER_OBJECT>::
1979 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1981 HeapObject* target = *slot;
1982 MarkBit mark_bit = Marking::MarkBitFrom(target);
1983 if (Marking::IsBlack(mark_bit)) {
1984 // This object is black and it might not be rescanned by marker.
1985 // We should explicitly record code entry slot for compaction because
1986 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1987 // miss it as it is not HeapObject-tagged.
1988 Address code_entry_slot =
1989 target->address() + JSFunction::kCodeEntryOffset;
1990 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1991 map->GetHeap()->mark_compact_collector()->
1992 RecordCodeEntrySlot(code_entry_slot, code);
1997 static inline void EvacuateFixedArray(Map* map,
1999 HeapObject* object) {
2000 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2001 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2008 static inline void EvacuateFixedDoubleArray(Map* map,
2010 HeapObject* object) {
2011 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2012 int object_size = FixedDoubleArray::SizeFor(length);
2013 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2021 static inline void EvacuateByteArray(Map* map,
2023 HeapObject* object) {
2024 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2025 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2026 map, slot, object, object_size);
2030 static inline void EvacuateSeqOneByteString(Map* map,
2032 HeapObject* object) {
2033 int object_size = SeqOneByteString::cast(object)->
2034 SeqOneByteStringSize(map->instance_type());
2035 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2036 map, slot, object, object_size);
2040 static inline void EvacuateSeqTwoByteString(Map* map,
2042 HeapObject* object) {
2043 int object_size = SeqTwoByteString::cast(object)->
2044 SeqTwoByteStringSize(map->instance_type());
2045 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2046 map, slot, object, object_size);
2050 static inline bool IsShortcutCandidate(int type) {
2051 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2054 static inline void EvacuateShortcutCandidate(Map* map,
2056 HeapObject* object) {
2057 ASSERT(IsShortcutCandidate(map->instance_type()));
2059 Heap* heap = map->GetHeap();
2061 if (marks_handling == IGNORE_MARKS &&
2062 ConsString::cast(object)->unchecked_second() ==
2063 heap->empty_string()) {
2065 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2069 if (!heap->InNewSpace(first)) {
2070 object->set_map_word(MapWord::FromForwardingAddress(first));
2074 MapWord first_word = first->map_word();
2075 if (first_word.IsForwardingAddress()) {
2076 HeapObject* target = first_word.ToForwardingAddress();
2079 object->set_map_word(MapWord::FromForwardingAddress(target));
2083 heap->DoScavengeObject(first->map(), slot, first);
2084 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2088 int object_size = ConsString::kSize;
2089 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2090 map, slot, object, object_size);
2093 template<ObjectContents object_contents>
2094 class ObjectEvacuationStrategy {
2096 template<int object_size>
2097 static inline void VisitSpecialized(Map* map,
2099 HeapObject* object) {
2100 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2101 map, slot, object, object_size);
2104 static inline void Visit(Map* map,
2106 HeapObject* object) {
2107 int object_size = map->instance_size();
2108 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2109 map, slot, object, object_size);
2113 static VisitorDispatchTable<ScavengingCallback> table_;
2117 template<MarksHandling marks_handling,
2118 LoggingAndProfiling logging_and_profiling_mode>
2119 VisitorDispatchTable<ScavengingCallback>
2120 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2123 static void InitializeScavengingVisitorsTables() {
2124 ScavengingVisitor<TRANSFER_MARKS,
2125 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2126 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2127 ScavengingVisitor<TRANSFER_MARKS,
2128 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2129 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2133 void Heap::SelectScavengingVisitorsTable() {
2134 bool logging_and_profiling =
2135 isolate()->logger()->is_logging() ||
2136 isolate()->cpu_profiler()->is_profiling() ||
2137 (isolate()->heap_profiler() != NULL &&
2138 isolate()->heap_profiler()->is_profiling());
2140 if (!incremental_marking()->IsMarking()) {
2141 if (!logging_and_profiling) {
2142 scavenging_visitors_table_.CopyFrom(
2143 ScavengingVisitor<IGNORE_MARKS,
2144 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2146 scavenging_visitors_table_.CopyFrom(
2147 ScavengingVisitor<IGNORE_MARKS,
2148 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2151 if (!logging_and_profiling) {
2152 scavenging_visitors_table_.CopyFrom(
2153 ScavengingVisitor<TRANSFER_MARKS,
2154 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2156 scavenging_visitors_table_.CopyFrom(
2157 ScavengingVisitor<TRANSFER_MARKS,
2158 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2161 if (incremental_marking()->IsCompacting()) {
2162 // When compacting forbid short-circuiting of cons-strings.
2163 // Scavenging code relies on the fact that new space object
2164 // can't be evacuated into evacuation candidate but
2165 // short-circuiting violates this assumption.
2166 scavenging_visitors_table_.Register(
2167 StaticVisitorBase::kVisitShortcutCandidate,
2168 scavenging_visitors_table_.GetVisitorById(
2169 StaticVisitorBase::kVisitConsString));
2175 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2176 SLOW_ASSERT(HEAP->InFromSpace(object));
2177 MapWord first_word = object->map_word();
2178 SLOW_ASSERT(!first_word.IsForwardingAddress());
2179 Map* map = first_word.ToMap();
2180 map->GetHeap()->DoScavengeObject(map, p, object);
2184 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2185 int instance_size) {
2187 MaybeObject* maybe_result = AllocateRawMap();
2188 if (!maybe_result->ToObject(&result)) return maybe_result;
2190 // Map::cast cannot be used due to uninitialized map field.
2191 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2192 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2193 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2194 reinterpret_cast<Map*>(result)->set_visitor_id(
2195 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2196 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2197 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2198 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2199 reinterpret_cast<Map*>(result)->set_bit_field(0);
2200 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2201 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2202 Map::OwnsDescriptors::encode(true);
2203 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2208 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2210 ElementsKind elements_kind) {
2212 MaybeObject* maybe_result = AllocateRawMap();
2213 if (!maybe_result->To(&result)) return maybe_result;
2215 Map* map = reinterpret_cast<Map*>(result);
2216 map->set_map_no_write_barrier(meta_map());
2217 map->set_instance_type(instance_type);
2218 map->set_visitor_id(
2219 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2220 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2221 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2222 map->set_instance_size(instance_size);
2223 map->set_inobject_properties(0);
2224 map->set_pre_allocated_property_fields(0);
2225 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2226 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2227 SKIP_WRITE_BARRIER);
2228 map->init_back_pointer(undefined_value());
2229 map->set_unused_property_fields(0);
2230 map->set_instance_descriptors(empty_descriptor_array());
2231 map->set_bit_field(0);
2232 map->set_bit_field2(1 << Map::kIsExtensible);
2233 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2234 Map::OwnsDescriptors::encode(true);
2235 map->set_bit_field3(bit_field3);
2236 map->set_elements_kind(elements_kind);
2242 MaybeObject* Heap::AllocateCodeCache() {
2243 CodeCache* code_cache;
2244 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2245 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2247 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2248 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2253 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2254 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2258 MaybeObject* Heap::AllocateAccessorPair() {
2259 AccessorPair* accessors;
2260 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2261 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2263 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2264 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2269 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2270 TypeFeedbackInfo* info;
2271 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2272 if (!maybe_info->To(&info)) return maybe_info;
2274 info->initialize_storage();
2275 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2276 SKIP_WRITE_BARRIER);
2281 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2282 AliasedArgumentsEntry* entry;
2283 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2284 if (!maybe_entry->To(&entry)) return maybe_entry;
2286 entry->set_aliased_context_slot(aliased_context_slot);
2291 const Heap::StringTypeTable Heap::string_type_table[] = {
2292 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2293 {type, size, k##camel_name##MapRootIndex},
2294 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2295 #undef STRING_TYPE_ELEMENT
2299 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2300 #define CONSTANT_STRING_ELEMENT(name, contents) \
2301 {contents, k##name##RootIndex},
2302 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2303 #undef CONSTANT_STRING_ELEMENT
2307 const Heap::StructTable Heap::struct_table[] = {
2308 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2309 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2310 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2311 #undef STRUCT_TABLE_ELEMENT
2315 bool Heap::CreateInitialMaps() {
2317 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2318 if (!maybe_obj->ToObject(&obj)) return false;
2320 // Map::cast cannot be used due to uninitialized map field.
2321 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2322 set_meta_map(new_meta_map);
2323 new_meta_map->set_map(new_meta_map);
2325 { MaybeObject* maybe_obj =
2326 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2327 if (!maybe_obj->ToObject(&obj)) return false;
2329 set_fixed_array_map(Map::cast(obj));
2331 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2332 if (!maybe_obj->ToObject(&obj)) return false;
2334 set_oddball_map(Map::cast(obj));
2336 // Allocate the empty array.
2337 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2338 if (!maybe_obj->ToObject(&obj)) return false;
2340 set_empty_fixed_array(FixedArray::cast(obj));
2342 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2343 if (!maybe_obj->ToObject(&obj)) return false;
2345 set_null_value(Oddball::cast(obj));
2346 Oddball::cast(obj)->set_kind(Oddball::kNull);
2348 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2349 if (!maybe_obj->ToObject(&obj)) return false;
2351 set_undefined_value(Oddball::cast(obj));
2352 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2353 ASSERT(!InNewSpace(undefined_value()));
2355 // Allocate the empty descriptor array.
2356 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2357 if (!maybe_obj->ToObject(&obj)) return false;
2359 set_empty_descriptor_array(DescriptorArray::cast(obj));
2361 // Fix the instance_descriptors for the existing maps.
2362 meta_map()->set_code_cache(empty_fixed_array());
2363 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2364 meta_map()->init_back_pointer(undefined_value());
2365 meta_map()->set_instance_descriptors(empty_descriptor_array());
2367 fixed_array_map()->set_code_cache(empty_fixed_array());
2368 fixed_array_map()->set_dependent_code(
2369 DependentCode::cast(empty_fixed_array()));
2370 fixed_array_map()->init_back_pointer(undefined_value());
2371 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2373 oddball_map()->set_code_cache(empty_fixed_array());
2374 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2375 oddball_map()->init_back_pointer(undefined_value());
2376 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2378 // Fix prototype object for existing maps.
2379 meta_map()->set_prototype(null_value());
2380 meta_map()->set_constructor(null_value());
2382 fixed_array_map()->set_prototype(null_value());
2383 fixed_array_map()->set_constructor(null_value());
2385 oddball_map()->set_prototype(null_value());
2386 oddball_map()->set_constructor(null_value());
2388 { MaybeObject* maybe_obj =
2389 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2390 if (!maybe_obj->ToObject(&obj)) return false;
2392 set_fixed_cow_array_map(Map::cast(obj));
2393 ASSERT(fixed_array_map() != fixed_cow_array_map());
2395 { MaybeObject* maybe_obj =
2396 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2397 if (!maybe_obj->ToObject(&obj)) return false;
2399 set_scope_info_map(Map::cast(obj));
2401 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2402 if (!maybe_obj->ToObject(&obj)) return false;
2404 set_heap_number_map(Map::cast(obj));
2406 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2407 if (!maybe_obj->ToObject(&obj)) return false;
2409 set_symbol_map(Map::cast(obj));
2411 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2412 if (!maybe_obj->ToObject(&obj)) return false;
2414 set_foreign_map(Map::cast(obj));
2416 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2417 const StringTypeTable& entry = string_type_table[i];
2418 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2419 if (!maybe_obj->ToObject(&obj)) return false;
2421 roots_[entry.index] = Map::cast(obj);
2424 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2425 if (!maybe_obj->ToObject(&obj)) return false;
2427 set_undetectable_string_map(Map::cast(obj));
2428 Map::cast(obj)->set_is_undetectable();
2430 { MaybeObject* maybe_obj =
2431 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2432 if (!maybe_obj->ToObject(&obj)) return false;
2434 set_undetectable_ascii_string_map(Map::cast(obj));
2435 Map::cast(obj)->set_is_undetectable();
2437 { MaybeObject* maybe_obj =
2438 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2439 if (!maybe_obj->ToObject(&obj)) return false;
2441 set_fixed_double_array_map(Map::cast(obj));
2443 { MaybeObject* maybe_obj =
2444 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2445 if (!maybe_obj->ToObject(&obj)) return false;
2447 set_byte_array_map(Map::cast(obj));
2449 { MaybeObject* maybe_obj =
2450 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2451 if (!maybe_obj->ToObject(&obj)) return false;
2453 set_free_space_map(Map::cast(obj));
2455 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2456 if (!maybe_obj->ToObject(&obj)) return false;
2458 set_empty_byte_array(ByteArray::cast(obj));
2460 { MaybeObject* maybe_obj =
2461 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2462 if (!maybe_obj->ToObject(&obj)) return false;
2464 set_external_pixel_array_map(Map::cast(obj));
2466 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2467 ExternalArray::kAlignedSize);
2468 if (!maybe_obj->ToObject(&obj)) return false;
2470 set_external_byte_array_map(Map::cast(obj));
2472 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2473 ExternalArray::kAlignedSize);
2474 if (!maybe_obj->ToObject(&obj)) return false;
2476 set_external_unsigned_byte_array_map(Map::cast(obj));
2478 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2479 ExternalArray::kAlignedSize);
2480 if (!maybe_obj->ToObject(&obj)) return false;
2482 set_external_short_array_map(Map::cast(obj));
2484 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2485 ExternalArray::kAlignedSize);
2486 if (!maybe_obj->ToObject(&obj)) return false;
2488 set_external_unsigned_short_array_map(Map::cast(obj));
2490 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2491 ExternalArray::kAlignedSize);
2492 if (!maybe_obj->ToObject(&obj)) return false;
2494 set_external_int_array_map(Map::cast(obj));
2496 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2497 ExternalArray::kAlignedSize);
2498 if (!maybe_obj->ToObject(&obj)) return false;
2500 set_external_unsigned_int_array_map(Map::cast(obj));
2502 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2503 ExternalArray::kAlignedSize);
2504 if (!maybe_obj->ToObject(&obj)) return false;
2506 set_external_float_array_map(Map::cast(obj));
2508 { MaybeObject* maybe_obj =
2509 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2510 if (!maybe_obj->ToObject(&obj)) return false;
2512 set_non_strict_arguments_elements_map(Map::cast(obj));
2514 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2515 ExternalArray::kAlignedSize);
2516 if (!maybe_obj->ToObject(&obj)) return false;
2518 set_external_double_array_map(Map::cast(obj));
2520 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2521 if (!maybe_obj->ToObject(&obj)) return false;
2523 set_code_map(Map::cast(obj));
2525 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2526 JSGlobalPropertyCell::kSize);
2527 if (!maybe_obj->ToObject(&obj)) return false;
2529 set_global_property_cell_map(Map::cast(obj));
2531 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_one_pointer_filler_map(Map::cast(obj));
2536 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2537 if (!maybe_obj->ToObject(&obj)) return false;
2539 set_two_pointer_filler_map(Map::cast(obj));
2541 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2542 const StructTable& entry = struct_table[i];
2543 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2544 if (!maybe_obj->ToObject(&obj)) return false;
2546 roots_[entry.index] = Map::cast(obj);
2549 { MaybeObject* maybe_obj =
2550 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2551 if (!maybe_obj->ToObject(&obj)) return false;
2553 set_hash_table_map(Map::cast(obj));
2555 { MaybeObject* maybe_obj =
2556 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557 if (!maybe_obj->ToObject(&obj)) return false;
2559 set_function_context_map(Map::cast(obj));
2561 { MaybeObject* maybe_obj =
2562 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2563 if (!maybe_obj->ToObject(&obj)) return false;
2565 set_catch_context_map(Map::cast(obj));
2567 { MaybeObject* maybe_obj =
2568 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2569 if (!maybe_obj->ToObject(&obj)) return false;
2571 set_with_context_map(Map::cast(obj));
2573 { MaybeObject* maybe_obj =
2574 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2575 if (!maybe_obj->ToObject(&obj)) return false;
2577 set_block_context_map(Map::cast(obj));
2579 { MaybeObject* maybe_obj =
2580 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2581 if (!maybe_obj->ToObject(&obj)) return false;
2583 set_module_context_map(Map::cast(obj));
2585 { MaybeObject* maybe_obj =
2586 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2587 if (!maybe_obj->ToObject(&obj)) return false;
2589 set_global_context_map(Map::cast(obj));
2591 { MaybeObject* maybe_obj =
2592 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2593 if (!maybe_obj->ToObject(&obj)) return false;
2595 Map* native_context_map = Map::cast(obj);
2596 native_context_map->set_dictionary_map(true);
2597 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2598 set_native_context_map(native_context_map);
2600 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2601 SharedFunctionInfo::kAlignedSize);
2602 if (!maybe_obj->ToObject(&obj)) return false;
2604 set_shared_function_info_map(Map::cast(obj));
2606 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2607 JSMessageObject::kSize);
2608 if (!maybe_obj->ToObject(&obj)) return false;
2610 set_message_object_map(Map::cast(obj));
2613 { MaybeObject* maybe_obj =
2614 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2615 if (!maybe_obj->To(&external_map)) return false;
2617 external_map->set_is_extensible(false);
2618 set_external_map(external_map);
2620 ASSERT(!InNewSpace(empty_fixed_array()));
2625 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2626 // Statically ensure that it is safe to allocate heap numbers in paged
2628 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2629 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2632 { MaybeObject* maybe_result =
2633 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2634 if (!maybe_result->ToObject(&result)) return maybe_result;
2637 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2638 HeapNumber::cast(result)->set_value(value);
2643 MaybeObject* Heap::AllocateHeapNumber(double value) {
2644 // Use general version, if we're forced to always allocate.
2645 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2647 // This version of AllocateHeapNumber is optimized for
2648 // allocation in new space.
2649 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2650 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2652 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2653 if (!maybe_result->ToObject(&result)) return maybe_result;
2655 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2656 HeapNumber::cast(result)->set_value(value);
2661 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2663 { MaybeObject* maybe_result = AllocateRawCell();
2664 if (!maybe_result->ToObject(&result)) return maybe_result;
2666 HeapObject::cast(result)->set_map_no_write_barrier(
2667 global_property_cell_map());
2668 JSGlobalPropertyCell::cast(result)->set_value(value);
2673 MaybeObject* Heap::CreateOddball(const char* to_string,
2677 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2678 if (!maybe_result->ToObject(&result)) return maybe_result;
2680 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2684 bool Heap::CreateApiObjects() {
2687 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2688 if (!maybe_obj->ToObject(&obj)) return false;
2690 // Don't use Smi-only elements optimizations for objects with the neander
2691 // map. There are too many cases where element values are set directly with a
2692 // bottleneck to trap the Smi-only -> fast elements transition, and there
2693 // appears to be no benefit for optimize this case.
2694 Map* new_neander_map = Map::cast(obj);
2695 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2696 set_neander_map(new_neander_map);
2698 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2699 if (!maybe_obj->ToObject(&obj)) return false;
2702 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2703 if (!maybe_elements->ToObject(&elements)) return false;
2705 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2706 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2707 set_message_listeners(JSObject::cast(obj));
2713 void Heap::CreateJSEntryStub() {
2715 set_js_entry_code(*stub.GetCode(isolate()));
2719 void Heap::CreateJSConstructEntryStub() {
2720 JSConstructEntryStub stub;
2721 set_js_construct_entry_code(*stub.GetCode(isolate()));
2725 void Heap::CreateFixedStubs() {
2726 // Here we create roots for fixed stubs. They are needed at GC
2727 // for cooking and uncooking (check out frames.cc).
2728 // The eliminates the need for doing dictionary lookup in the
2729 // stub cache for these stubs.
2730 HandleScope scope(isolate());
2731 // gcc-4.4 has problem generating correct code of following snippet:
2732 // { JSEntryStub stub;
2733 // js_entry_code_ = *stub.GetCode();
2735 // { JSConstructEntryStub stub;
2736 // js_construct_entry_code_ = *stub.GetCode();
2738 // To workaround the problem, make separate functions without inlining.
2739 Heap::CreateJSEntryStub();
2740 Heap::CreateJSConstructEntryStub();
2742 // Create stubs that should be there, so we don't unexpectedly have to
2743 // create them if we need them during the creation of another stub.
2744 // Stub creation mixes raw pointers and handles in an unsafe manner so
2745 // we cannot create stubs while we are creating stubs.
2746 CodeStub::GenerateStubsAheadOfTime(isolate());
2750 bool Heap::CreateInitialObjects() {
2753 // The -0 value must be set before NumberFromDouble works.
2754 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2755 if (!maybe_obj->ToObject(&obj)) return false;
2757 set_minus_zero_value(HeapNumber::cast(obj));
2758 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2760 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2761 if (!maybe_obj->ToObject(&obj)) return false;
2763 set_nan_value(HeapNumber::cast(obj));
2765 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2766 if (!maybe_obj->ToObject(&obj)) return false;
2768 set_infinity_value(HeapNumber::cast(obj));
2770 // The hole has not been created yet, but we want to put something
2771 // predictable in the gaps in the string table, so lets make that Smi zero.
2772 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2774 // Allocate initial string table.
2775 { MaybeObject* maybe_obj =
2776 StringTable::Allocate(this, kInitialStringTableSize);
2777 if (!maybe_obj->ToObject(&obj)) return false;
2779 // Don't use set_string_table() due to asserts.
2780 roots_[kStringTableRootIndex] = obj;
2782 // Finish initializing oddballs after creating the string table.
2783 { MaybeObject* maybe_obj =
2784 undefined_value()->Initialize("undefined",
2786 Oddball::kUndefined);
2787 if (!maybe_obj->ToObject(&obj)) return false;
2790 // Initialize the null_value.
2791 { MaybeObject* maybe_obj =
2792 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2793 if (!maybe_obj->ToObject(&obj)) return false;
2796 { MaybeObject* maybe_obj = CreateOddball("true",
2799 if (!maybe_obj->ToObject(&obj)) return false;
2801 set_true_value(Oddball::cast(obj));
2803 { MaybeObject* maybe_obj = CreateOddball("false",
2806 if (!maybe_obj->ToObject(&obj)) return false;
2808 set_false_value(Oddball::cast(obj));
2810 { MaybeObject* maybe_obj = CreateOddball("hole",
2813 if (!maybe_obj->ToObject(&obj)) return false;
2815 set_the_hole_value(Oddball::cast(obj));
2817 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2819 Oddball::kArgumentMarker);
2820 if (!maybe_obj->ToObject(&obj)) return false;
2822 set_arguments_marker(Oddball::cast(obj));
2824 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2827 if (!maybe_obj->ToObject(&obj)) return false;
2829 set_no_interceptor_result_sentinel(obj);
2831 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2834 if (!maybe_obj->ToObject(&obj)) return false;
2836 set_termination_exception(obj);
2838 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2839 { MaybeObject* maybe_obj =
2840 InternalizeUtf8String(constant_string_table[i].contents);
2841 if (!maybe_obj->ToObject(&obj)) return false;
2843 roots_[constant_string_table[i].index] = String::cast(obj);
2846 // Allocate the hidden string which is used to identify the hidden properties
2847 // in JSObjects. The hash code has a special value so that it will not match
2848 // the empty string when searching for the property. It cannot be part of the
2849 // loop above because it needs to be allocated manually with the special
2850 // hash code in place. The hash code for the hidden_string is zero to ensure
2851 // that it will always be at the first entry in property descriptors.
2852 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
2853 OneByteVector("", 0), String::kEmptyStringHash);
2854 if (!maybe_obj->ToObject(&obj)) return false;
2856 hidden_string_ = String::cast(obj);
2858 // Allocate the code_stubs dictionary. The initial size is set to avoid
2859 // expanding the dictionary during bootstrapping.
2860 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
2861 if (!maybe_obj->ToObject(&obj)) return false;
2863 set_code_stubs(UnseededNumberDictionary::cast(obj));
2866 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2867 // is set to avoid expanding the dictionary during bootstrapping.
2868 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
2869 if (!maybe_obj->ToObject(&obj)) return false;
2871 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2873 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2874 if (!maybe_obj->ToObject(&obj)) return false;
2876 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2878 set_instanceof_cache_function(Smi::FromInt(0));
2879 set_instanceof_cache_map(Smi::FromInt(0));
2880 set_instanceof_cache_answer(Smi::FromInt(0));
2884 // Allocate the dictionary of intrinsic function names.
2885 { MaybeObject* maybe_obj =
2886 NameDictionary::Allocate(this, Runtime::kNumFunctions);
2887 if (!maybe_obj->ToObject(&obj)) return false;
2889 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2891 if (!maybe_obj->ToObject(&obj)) return false;
2893 set_intrinsic_function_names(NameDictionary::cast(obj));
2895 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2896 if (!maybe_obj->ToObject(&obj)) return false;
2898 set_number_string_cache(FixedArray::cast(obj));
2900 // Allocate cache for single character one byte strings.
2901 { MaybeObject* maybe_obj =
2902 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
2903 if (!maybe_obj->ToObject(&obj)) return false;
2905 set_single_character_string_cache(FixedArray::cast(obj));
2907 // Allocate cache for string split.
2908 { MaybeObject* maybe_obj = AllocateFixedArray(
2909 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2910 if (!maybe_obj->ToObject(&obj)) return false;
2912 set_string_split_cache(FixedArray::cast(obj));
2914 { MaybeObject* maybe_obj = AllocateFixedArray(
2915 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
2916 if (!maybe_obj->ToObject(&obj)) return false;
2918 set_regexp_multiple_cache(FixedArray::cast(obj));
2920 // Allocate cache for external strings pointing to native source code.
2921 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2922 if (!maybe_obj->ToObject(&obj)) return false;
2924 set_natives_source_cache(FixedArray::cast(obj));
2926 // Allocate object to hold object observation state.
2927 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2928 if (!maybe_obj->ToObject(&obj)) return false;
2930 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
2931 if (!maybe_obj->ToObject(&obj)) return false;
2933 set_observation_state(JSObject::cast(obj));
2935 // Handling of script id generation is in FACTORY->NewScript.
2936 set_last_script_id(undefined_value());
2938 // Initialize keyed lookup cache.
2939 isolate_->keyed_lookup_cache()->Clear();
2941 // Initialize context slot cache.
2942 isolate_->context_slot_cache()->Clear();
2944 // Initialize descriptor cache.
2945 isolate_->descriptor_lookup_cache()->Clear();
2947 // Initialize compilation cache.
2948 isolate_->compilation_cache()->Clear();
2954 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2955 RootListIndex writable_roots[] = {
2956 kStoreBufferTopRootIndex,
2957 kStackLimitRootIndex,
2958 kNumberStringCacheRootIndex,
2959 kInstanceofCacheFunctionRootIndex,
2960 kInstanceofCacheMapRootIndex,
2961 kInstanceofCacheAnswerRootIndex,
2962 kCodeStubsRootIndex,
2963 kNonMonomorphicCacheRootIndex,
2964 kPolymorphicCodeCacheRootIndex,
2965 kLastScriptIdRootIndex,
2966 kEmptyScriptRootIndex,
2967 kRealStackLimitRootIndex,
2968 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2969 kConstructStubDeoptPCOffsetRootIndex,
2970 kGetterStubDeoptPCOffsetRootIndex,
2971 kSetterStubDeoptPCOffsetRootIndex,
2972 kStringTableRootIndex,
2975 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2976 if (root_index == writable_roots[i])
2983 Object* RegExpResultsCache::Lookup(Heap* heap,
2985 Object* key_pattern,
2986 ResultsCacheType type) {
2988 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2989 if (type == STRING_SPLIT_SUBSTRINGS) {
2990 ASSERT(key_pattern->IsString());
2991 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2992 cache = heap->string_split_cache();
2994 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2995 ASSERT(key_pattern->IsFixedArray());
2996 cache = heap->regexp_multiple_cache();
2999 uint32_t hash = key_string->Hash();
3000 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3001 ~(kArrayEntriesPerCacheEntry - 1));
3002 if (cache->get(index + kStringOffset) == key_string &&
3003 cache->get(index + kPatternOffset) == key_pattern) {
3004 return cache->get(index + kArrayOffset);
3007 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3008 if (cache->get(index + kStringOffset) == key_string &&
3009 cache->get(index + kPatternOffset) == key_pattern) {
3010 return cache->get(index + kArrayOffset);
3012 return Smi::FromInt(0);
3016 void RegExpResultsCache::Enter(Heap* heap,
3018 Object* key_pattern,
3019 FixedArray* value_array,
3020 ResultsCacheType type) {
3022 if (!key_string->IsInternalizedString()) return;
3023 if (type == STRING_SPLIT_SUBSTRINGS) {
3024 ASSERT(key_pattern->IsString());
3025 if (!key_pattern->IsInternalizedString()) return;
3026 cache = heap->string_split_cache();
3028 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3029 ASSERT(key_pattern->IsFixedArray());
3030 cache = heap->regexp_multiple_cache();
3033 uint32_t hash = key_string->Hash();
3034 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3035 ~(kArrayEntriesPerCacheEntry - 1));
3036 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3037 cache->set(index + kStringOffset, key_string);
3038 cache->set(index + kPatternOffset, key_pattern);
3039 cache->set(index + kArrayOffset, value_array);
3042 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3043 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3044 cache->set(index2 + kStringOffset, key_string);
3045 cache->set(index2 + kPatternOffset, key_pattern);
3046 cache->set(index2 + kArrayOffset, value_array);
3048 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3049 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3050 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3051 cache->set(index + kStringOffset, key_string);
3052 cache->set(index + kPatternOffset, key_pattern);
3053 cache->set(index + kArrayOffset, value_array);
3056 // If the array is a reasonably short list of substrings, convert it into a
3057 // list of internalized strings.
3058 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3059 for (int i = 0; i < value_array->length(); i++) {
3060 String* str = String::cast(value_array->get(i));
3061 Object* internalized_str;
3062 MaybeObject* maybe_string = heap->InternalizeString(str);
3063 if (maybe_string->ToObject(&internalized_str)) {
3064 value_array->set(i, internalized_str);
3068 // Convert backing store to a copy-on-write array.
3069 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3073 void RegExpResultsCache::Clear(FixedArray* cache) {
3074 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3075 cache->set(i, Smi::FromInt(0));
3080 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3081 MaybeObject* maybe_obj =
3082 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3087 int Heap::FullSizeNumberStringCacheLength() {
3088 // Compute the size of the number string cache based on the max newspace size.
3089 // The number string cache has a minimum size based on twice the initial cache
3090 // size to ensure that it is bigger after being made 'full size'.
3091 int number_string_cache_size = max_semispace_size_ / 512;
3092 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3093 Min(0x4000, number_string_cache_size));
3094 // There is a string and a number per entry so the length is twice the number
3096 return number_string_cache_size * 2;
3100 void Heap::AllocateFullSizeNumberStringCache() {
3101 // The idea is to have a small number string cache in the snapshot to keep
3102 // boot-time memory usage down. If we expand the number string cache already
3103 // while creating the snapshot then that didn't work out.
3104 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3105 MaybeObject* maybe_obj =
3106 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3108 if (maybe_obj->ToObject(&new_cache)) {
3109 // We don't bother to repopulate the cache with entries from the old cache.
3110 // It will be repopulated soon enough with new strings.
3111 set_number_string_cache(FixedArray::cast(new_cache));
3113 // If allocation fails then we just return without doing anything. It is only
3114 // a cache, so best effort is OK here.
3118 void Heap::FlushNumberStringCache() {
3119 // Flush the number to string cache.
3120 int len = number_string_cache()->length();
3121 for (int i = 0; i < len; i++) {
3122 number_string_cache()->set_undefined(this, i);
3127 static inline int double_get_hash(double d) {
3128 DoubleRepresentation rep(d);
3129 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3133 static inline int smi_get_hash(Smi* smi) {
3134 return smi->value();
3138 Object* Heap::GetNumberStringCache(Object* number) {
3140 int mask = (number_string_cache()->length() >> 1) - 1;
3141 if (number->IsSmi()) {
3142 hash = smi_get_hash(Smi::cast(number)) & mask;
3144 hash = double_get_hash(number->Number()) & mask;
3146 Object* key = number_string_cache()->get(hash * 2);
3147 if (key == number) {
3148 return String::cast(number_string_cache()->get(hash * 2 + 1));
3149 } else if (key->IsHeapNumber() &&
3150 number->IsHeapNumber() &&
3151 key->Number() == number->Number()) {
3152 return String::cast(number_string_cache()->get(hash * 2 + 1));
3154 return undefined_value();
3158 void Heap::SetNumberStringCache(Object* number, String* string) {
3160 int mask = (number_string_cache()->length() >> 1) - 1;
3161 if (number->IsSmi()) {
3162 hash = smi_get_hash(Smi::cast(number)) & mask;
3164 hash = double_get_hash(number->Number()) & mask;
3166 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3167 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3168 // The first time we have a hash collision, we move to the full sized
3169 // number string cache.
3170 AllocateFullSizeNumberStringCache();
3173 number_string_cache()->set(hash * 2, number);
3174 number_string_cache()->set(hash * 2 + 1, string);
3178 MaybeObject* Heap::NumberToString(Object* number,
3179 bool check_number_string_cache,
3180 PretenureFlag pretenure) {
3181 isolate_->counters()->number_to_string_runtime()->Increment();
3182 if (check_number_string_cache) {
3183 Object* cached = GetNumberStringCache(number);
3184 if (cached != undefined_value()) {
3190 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3192 if (number->IsSmi()) {
3193 int num = Smi::cast(number)->value();
3194 str = IntToCString(num, buffer);
3196 double num = HeapNumber::cast(number)->value();
3197 str = DoubleToCString(num, buffer);
3201 MaybeObject* maybe_js_string =
3202 AllocateStringFromOneByte(CStrVector(str), pretenure);
3203 if (maybe_js_string->ToObject(&js_string)) {
3204 SetNumberStringCache(number, String::cast(js_string));
3206 return maybe_js_string;
3210 MaybeObject* Heap::Uint32ToString(uint32_t value,
3211 bool check_number_string_cache) {
3213 MaybeObject* maybe = NumberFromUint32(value);
3214 if (!maybe->To<Object>(&number)) return maybe;
3215 return NumberToString(number, check_number_string_cache);
3219 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3220 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3224 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3225 ExternalArrayType array_type) {
3226 switch (array_type) {
3227 case kExternalByteArray:
3228 return kExternalByteArrayMapRootIndex;
3229 case kExternalUnsignedByteArray:
3230 return kExternalUnsignedByteArrayMapRootIndex;
3231 case kExternalShortArray:
3232 return kExternalShortArrayMapRootIndex;
3233 case kExternalUnsignedShortArray:
3234 return kExternalUnsignedShortArrayMapRootIndex;
3235 case kExternalIntArray:
3236 return kExternalIntArrayMapRootIndex;
3237 case kExternalUnsignedIntArray:
3238 return kExternalUnsignedIntArrayMapRootIndex;
3239 case kExternalFloatArray:
3240 return kExternalFloatArrayMapRootIndex;
3241 case kExternalDoubleArray:
3242 return kExternalDoubleArrayMapRootIndex;
3243 case kExternalPixelArray:
3244 return kExternalPixelArrayMapRootIndex;
3247 return kUndefinedValueRootIndex;
3252 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3253 // We need to distinguish the minus zero value and this cannot be
3254 // done after conversion to int. Doing this by comparing bit
3255 // patterns is faster than using fpclassify() et al.
3256 static const DoubleRepresentation minus_zero(-0.0);
3258 DoubleRepresentation rep(value);
3259 if (rep.bits == minus_zero.bits) {
3260 return AllocateHeapNumber(-0.0, pretenure);
3263 int int_value = FastD2I(value);
3264 if (value == int_value && Smi::IsValid(int_value)) {
3265 return Smi::FromInt(int_value);
3268 // Materialize the value in the heap.
3269 return AllocateHeapNumber(value, pretenure);
3273 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3274 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3275 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3276 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3278 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3279 if (!maybe_result->To(&result)) return maybe_result;
3280 result->set_foreign_address(address);
3285 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3286 SharedFunctionInfo* share;
3287 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3288 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3290 // Set pointer fields.
3291 share->set_name(name);
3292 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3293 share->set_code(illegal);
3294 share->ClearOptimizedCodeMap();
3295 share->set_scope_info(ScopeInfo::Empty(isolate_));
3296 Code* construct_stub =
3297 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3298 share->set_construct_stub(construct_stub);
3299 share->set_instance_class_name(Object_string());
3300 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3301 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3302 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3303 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3304 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3305 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3306 share->set_ast_node_count(0);
3307 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3308 share->set_counters(0);
3310 // Set integer fields (smi or int, depending on the architecture).
3311 share->set_length(0);
3312 share->set_formal_parameter_count(0);
3313 share->set_expected_nof_properties(0);
3314 share->set_num_literals(0);
3315 share->set_start_position_and_type(0);
3316 share->set_end_position(0);
3317 share->set_function_token_position(0);
3318 // All compiler hints default to false or 0.
3319 share->set_compiler_hints(0);
3320 share->set_this_property_assignments_count(0);
3321 share->set_opt_count(0);
3327 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3332 Object* stack_trace,
3333 Object* stack_frames) {
3335 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3336 if (!maybe_result->ToObject(&result)) return maybe_result;
3338 JSMessageObject* message = JSMessageObject::cast(result);
3339 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3340 message->initialize_elements();
3341 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3342 message->set_type(type);
3343 message->set_arguments(arguments);
3344 message->set_start_position(start_position);
3345 message->set_end_position(end_position);
3346 message->set_script(script);
3347 message->set_stack_trace(stack_trace);
3348 message->set_stack_frames(stack_frames);
3354 // Returns true for a character in a range. Both limits are inclusive.
3355 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3356 // This makes uses of the the unsigned wraparound.
3357 return character - from <= to - from;
3361 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3366 // Numeric strings have a different hash algorithm not known by
3367 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3368 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3369 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3371 // Now we know the length is 2, we might as well make use of that fact
3372 // when building the new string.
3373 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3375 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3377 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3378 if (!maybe_result->ToObject(&result)) return maybe_result;
3380 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3381 dest[0] = static_cast<uint8_t>(c1);
3382 dest[1] = static_cast<uint8_t>(c2);
3386 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3387 if (!maybe_result->ToObject(&result)) return maybe_result;
3389 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3397 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3398 int first_length = first->length();
3399 if (first_length == 0) {
3403 int second_length = second->length();
3404 if (second_length == 0) {
3408 int length = first_length + second_length;
3410 // Optimization for 2-byte strings often used as keys in a decompression
3411 // dictionary. Check whether we already have the string in the string
3412 // table to prevent creation of many unneccesary strings.
3414 uint16_t c1 = first->Get(0);
3415 uint16_t c2 = second->Get(0);
3416 return MakeOrFindTwoCharacterString(this, c1, c2);
3419 bool first_is_one_byte = first->IsOneByteRepresentation();
3420 bool second_is_one_byte = second->IsOneByteRepresentation();
3421 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3422 // Make sure that an out of memory exception is thrown if the length
3423 // of the new cons string is too large.
3424 if (length > String::kMaxLength || length < 0) {
3425 isolate()->context()->mark_out_of_memory();
3426 return Failure::OutOfMemoryException(0x4);
3429 bool is_one_byte_data_in_two_byte_string = false;
3431 // At least one of the strings uses two-byte representation so we
3432 // can't use the fast case code for short ASCII strings below, but
3433 // we can try to save memory if all chars actually fit in ASCII.
3434 is_one_byte_data_in_two_byte_string =
3435 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3436 if (is_one_byte_data_in_two_byte_string) {
3437 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3441 // If the resulting string is small make a flat string.
3442 if (length < ConsString::kMinLength) {
3443 // Note that neither of the two inputs can be a slice because:
3444 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3445 ASSERT(first->IsFlat());
3446 ASSERT(second->IsFlat());
3449 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3450 if (!maybe_result->ToObject(&result)) return maybe_result;
3452 // Copy the characters into the new object.
3453 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3456 if (first->IsExternalString()) {
3457 src = ExternalAsciiString::cast(first)->GetChars();
3459 src = SeqOneByteString::cast(first)->GetChars();
3461 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3462 // Copy second part.
3463 if (second->IsExternalString()) {
3464 src = ExternalAsciiString::cast(second)->GetChars();
3466 src = SeqOneByteString::cast(second)->GetChars();
3468 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3471 if (is_one_byte_data_in_two_byte_string) {
3473 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3474 if (!maybe_result->ToObject(&result)) return maybe_result;
3476 // Copy the characters into the new object.
3477 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3478 String::WriteToFlat(first, dest, 0, first_length);
3479 String::WriteToFlat(second, dest + first_length, 0, second_length);
3480 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3485 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3486 if (!maybe_result->ToObject(&result)) return maybe_result;
3488 // Copy the characters into the new object.
3489 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3490 String::WriteToFlat(first, dest, 0, first_length);
3491 String::WriteToFlat(second, dest + first_length, 0, second_length);
3496 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3497 cons_ascii_string_map() : cons_string_map();
3500 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3501 if (!maybe_result->ToObject(&result)) return maybe_result;
3504 AssertNoAllocation no_gc;
3505 ConsString* cons_string = ConsString::cast(result);
3506 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3507 cons_string->set_length(length);
3508 cons_string->set_hash_field(String::kEmptyHashField);
3509 cons_string->set_first(first, mode);
3510 cons_string->set_second(second, mode);
3515 MaybeObject* Heap::AllocateSubString(String* buffer,
3518 PretenureFlag pretenure) {
3519 int length = end - start;
3521 return empty_string();
3522 } else if (length == 1) {
3523 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3524 } else if (length == 2) {
3525 // Optimization for 2-byte strings often used as keys in a decompression
3526 // dictionary. Check whether we already have the string in the string
3527 // table to prevent creation of many unnecessary strings.
3528 uint16_t c1 = buffer->Get(start);
3529 uint16_t c2 = buffer->Get(start + 1);
3530 return MakeOrFindTwoCharacterString(this, c1, c2);
3533 // Make an attempt to flatten the buffer to reduce access time.
3534 buffer = buffer->TryFlattenGetString();
3536 if (!FLAG_string_slices ||
3537 !buffer->IsFlat() ||
3538 length < SlicedString::kMinLength ||
3539 pretenure == TENURED) {
3541 // WriteToFlat takes care of the case when an indirect string has a
3542 // different encoding from its underlying string. These encodings may
3543 // differ because of externalization.
3544 bool is_one_byte = buffer->IsOneByteRepresentation();
3545 { MaybeObject* maybe_result = is_one_byte
3546 ? AllocateRawOneByteString(length, pretenure)
3547 : AllocateRawTwoByteString(length, pretenure);
3548 if (!maybe_result->ToObject(&result)) return maybe_result;
3550 String* string_result = String::cast(result);
3551 // Copy the characters into the new object.
3553 ASSERT(string_result->IsOneByteRepresentation());
3554 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3555 String::WriteToFlat(buffer, dest, start, end);
3557 ASSERT(string_result->IsTwoByteRepresentation());
3558 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3559 String::WriteToFlat(buffer, dest, start, end);
3564 ASSERT(buffer->IsFlat());
3566 if (FLAG_verify_heap) {
3567 buffer->StringVerify();
3572 // When slicing an indirect string we use its encoding for a newly created
3573 // slice and don't check the encoding of the underlying string. This is safe
3574 // even if the encodings are different because of externalization. If an
3575 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3576 // codes of the underlying string must still fit into ASCII (because
3577 // externalization must not change char codes).
3578 { Map* map = buffer->IsOneByteRepresentation()
3579 ? sliced_ascii_string_map()
3580 : sliced_string_map();
3581 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3582 if (!maybe_result->ToObject(&result)) return maybe_result;
3585 AssertNoAllocation no_gc;
3586 SlicedString* sliced_string = SlicedString::cast(result);
3587 sliced_string->set_length(length);
3588 sliced_string->set_hash_field(String::kEmptyHashField);
3589 if (buffer->IsConsString()) {
3590 ConsString* cons = ConsString::cast(buffer);
3591 ASSERT(cons->second()->length() == 0);
3592 sliced_string->set_parent(cons->first());
3593 sliced_string->set_offset(start);
3594 } else if (buffer->IsSlicedString()) {
3595 // Prevent nesting sliced strings.
3596 SlicedString* parent_slice = SlicedString::cast(buffer);
3597 sliced_string->set_parent(parent_slice->parent());
3598 sliced_string->set_offset(start + parent_slice->offset());
3600 sliced_string->set_parent(buffer);
3601 sliced_string->set_offset(start);
3603 ASSERT(sliced_string->parent()->IsSeqString() ||
3604 sliced_string->parent()->IsExternalString());
3609 MaybeObject* Heap::AllocateExternalStringFromAscii(
3610 const ExternalAsciiString::Resource* resource) {
3611 size_t length = resource->length();
3612 if (length > static_cast<size_t>(String::kMaxLength)) {
3613 isolate()->context()->mark_out_of_memory();
3614 return Failure::OutOfMemoryException(0x5);
3617 Map* map = external_ascii_string_map();
3619 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3620 if (!maybe_result->ToObject(&result)) return maybe_result;
3623 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3624 external_string->set_length(static_cast<int>(length));
3625 external_string->set_hash_field(String::kEmptyHashField);
3626 external_string->set_resource(resource);
3632 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3633 const ExternalTwoByteString::Resource* resource) {
3634 size_t length = resource->length();
3635 if (length > static_cast<size_t>(String::kMaxLength)) {
3636 isolate()->context()->mark_out_of_memory();
3637 return Failure::OutOfMemoryException(0x6);
3640 // For small strings we check whether the resource contains only
3641 // one byte characters. If yes, we use a different string map.
3642 static const size_t kOneByteCheckLengthLimit = 32;
3643 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3644 String::IsOneByte(resource->data(), static_cast<int>(length));
3645 Map* map = is_one_byte ?
3646 external_string_with_one_byte_data_map() : external_string_map();
3648 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3649 if (!maybe_result->ToObject(&result)) return maybe_result;
3652 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3653 external_string->set_length(static_cast<int>(length));
3654 external_string->set_hash_field(String::kEmptyHashField);
3655 external_string->set_resource(resource);
3661 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3662 if (code <= String::kMaxOneByteCharCode) {
3663 Object* value = single_character_string_cache()->get(code);
3664 if (value != undefined_value()) return value;
3667 buffer[0] = static_cast<uint8_t>(code);
3669 MaybeObject* maybe_result =
3670 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3672 if (!maybe_result->ToObject(&result)) return maybe_result;
3673 single_character_string_cache()->set(code, result);
3678 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3679 if (!maybe_result->ToObject(&result)) return maybe_result;
3681 String* answer = String::cast(result);
3682 answer->Set(0, code);
3687 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3688 if (length < 0 || length > ByteArray::kMaxLength) {
3689 return Failure::OutOfMemoryException(0x7);
3691 if (pretenure == NOT_TENURED) {
3692 return AllocateByteArray(length);
3694 int size = ByteArray::SizeFor(length);
3696 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3697 ? old_data_space_->AllocateRaw(size)
3698 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3699 if (!maybe_result->ToObject(&result)) return maybe_result;
3702 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3704 reinterpret_cast<ByteArray*>(result)->set_length(length);
3709 MaybeObject* Heap::AllocateByteArray(int length) {
3710 if (length < 0 || length > ByteArray::kMaxLength) {
3711 return Failure::OutOfMemoryException(0x8);
3713 int size = ByteArray::SizeFor(length);
3714 AllocationSpace space =
3715 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3717 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3718 if (!maybe_result->ToObject(&result)) return maybe_result;
3721 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3723 reinterpret_cast<ByteArray*>(result)->set_length(length);
3728 void Heap::CreateFillerObjectAt(Address addr, int size) {
3729 if (size == 0) return;
3730 HeapObject* filler = HeapObject::FromAddress(addr);
3731 if (size == kPointerSize) {
3732 filler->set_map_no_write_barrier(one_pointer_filler_map());
3733 } else if (size == 2 * kPointerSize) {
3734 filler->set_map_no_write_barrier(two_pointer_filler_map());
3736 filler->set_map_no_write_barrier(free_space_map());
3737 FreeSpace::cast(filler)->set_size(size);
3742 MaybeObject* Heap::AllocateExternalArray(int length,
3743 ExternalArrayType array_type,
3744 void* external_pointer,
3745 PretenureFlag pretenure) {
3746 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3748 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3751 if (!maybe_result->ToObject(&result)) return maybe_result;
3754 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3755 MapForExternalArrayType(array_type));
3756 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3757 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3764 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3766 Handle<Object> self_reference,
3768 bool crankshafted) {
3769 // Allocate ByteArray before the Code object, so that we do not risk
3770 // leaving uninitialized Code object (and breaking the heap).
3771 ByteArray* reloc_info;
3772 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3773 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3776 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3777 int obj_size = Code::SizeFor(body_size);
3778 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3779 MaybeObject* maybe_result;
3780 // Large code objects and code objects which should stay at a fixed address
3781 // are allocated in large object space.
3783 bool force_lo_space = obj_size > code_space()->AreaSize();
3784 if (force_lo_space) {
3785 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3787 maybe_result = code_space_->AllocateRaw(obj_size);
3789 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3791 if (immovable && !force_lo_space &&
3792 // Objects on the first page of each space are never moved.
3793 !code_space_->FirstPage()->Contains(result->address())) {
3794 // Discard the first code allocation, which was on a page where it could be
3796 CreateFillerObjectAt(result->address(), obj_size);
3797 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3798 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3801 // Initialize the object
3802 result->set_map_no_write_barrier(code_map());
3803 Code* code = Code::cast(result);
3804 ASSERT(!isolate_->code_range()->exists() ||
3805 isolate_->code_range()->contains(code->address()));
3806 code->set_instruction_size(desc.instr_size);
3807 code->set_relocation_info(reloc_info);
3808 code->set_flags(flags);
3809 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3810 code->set_check_type(RECEIVER_MAP_CHECK);
3812 code->set_is_crankshafted(crankshafted);
3813 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3814 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
3815 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3816 code->set_gc_metadata(Smi::FromInt(0));
3817 code->set_ic_age(global_ic_age_);
3818 code->set_prologue_offset(kPrologueOffsetNotSet);
3819 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3820 code->set_marked_for_deoptimization(false);
3822 // Allow self references to created code object by patching the handle to
3823 // point to the newly allocated Code object.
3824 if (!self_reference.is_null()) {
3825 *(self_reference.location()) = code;
3827 // Migrate generated code.
3828 // The generated code can contain Object** values (typically from handles)
3829 // that are dereferenced during the copy to point directly to the actual heap
3830 // objects. These pointers can include references to the code object itself,
3831 // through the self_reference parameter.
3832 code->CopyFrom(desc);
3835 if (FLAG_verify_heap) {
3843 MaybeObject* Heap::CopyCode(Code* code) {
3844 // Allocate an object the same size as the code object.
3845 int obj_size = code->Size();
3846 MaybeObject* maybe_result;
3847 if (obj_size > code_space()->AreaSize()) {
3848 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3850 maybe_result = code_space_->AllocateRaw(obj_size);
3854 if (!maybe_result->ToObject(&result)) return maybe_result;
3856 // Copy code object.
3857 Address old_addr = code->address();
3858 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3859 CopyBlock(new_addr, old_addr, obj_size);
3860 // Relocate the copy.
3861 Code* new_code = Code::cast(result);
3862 ASSERT(!isolate_->code_range()->exists() ||
3863 isolate_->code_range()->contains(code->address()));
3864 new_code->Relocate(new_addr - old_addr);
3869 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3870 // Allocate ByteArray before the Code object, so that we do not risk
3871 // leaving uninitialized Code object (and breaking the heap).
3872 Object* reloc_info_array;
3873 { MaybeObject* maybe_reloc_info_array =
3874 AllocateByteArray(reloc_info.length(), TENURED);
3875 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3876 return maybe_reloc_info_array;
3880 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3882 int new_obj_size = Code::SizeFor(new_body_size);
3884 Address old_addr = code->address();
3886 size_t relocation_offset =
3887 static_cast<size_t>(code->instruction_end() - old_addr);
3889 MaybeObject* maybe_result;
3890 if (new_obj_size > code_space()->AreaSize()) {
3891 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3893 maybe_result = code_space_->AllocateRaw(new_obj_size);
3897 if (!maybe_result->ToObject(&result)) return maybe_result;
3899 // Copy code object.
3900 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3902 // Copy header and instructions.
3903 CopyBytes(new_addr, old_addr, relocation_offset);
3905 Code* new_code = Code::cast(result);
3906 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3908 // Copy patched rinfo.
3909 CopyBytes(new_code->relocation_start(),
3911 static_cast<size_t>(reloc_info.length()));
3913 // Relocate the copy.
3914 ASSERT(!isolate_->code_range()->exists() ||
3915 isolate_->code_range()->contains(code->address()));
3916 new_code->Relocate(new_addr - old_addr);
3919 if (FLAG_verify_heap) {
3927 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
3928 Handle<Object> allocation_site_info_payload) {
3929 ASSERT(gc_state_ == NOT_IN_GC);
3930 ASSERT(map->instance_type() != MAP_TYPE);
3931 // If allocation failures are disallowed, we may allocate in a different
3932 // space when new space is full and the object is not a large object.
3933 AllocationSpace retry_space =
3934 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3935 int size = map->instance_size() + AllocationSiteInfo::kSize;
3937 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3938 if (!maybe_result->ToObject(&result)) return maybe_result;
3939 // No need for write barrier since object is white and map is in old space.
3940 HeapObject::cast(result)->set_map_no_write_barrier(map);
3941 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
3942 reinterpret_cast<Address>(result) + map->instance_size());
3943 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
3944 alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
3949 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3950 ASSERT(gc_state_ == NOT_IN_GC);
3951 ASSERT(map->instance_type() != MAP_TYPE);
3952 // If allocation failures are disallowed, we may allocate in a different
3953 // space when new space is full and the object is not a large object.
3954 AllocationSpace retry_space =
3955 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3956 int size = map->instance_size();
3958 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3959 if (!maybe_result->ToObject(&result)) return maybe_result;
3960 // No need for write barrier since object is white and map is in old space.
3961 HeapObject::cast(result)->set_map_no_write_barrier(map);
3966 void Heap::InitializeFunction(JSFunction* function,
3967 SharedFunctionInfo* shared,
3968 Object* prototype) {
3969 ASSERT(!prototype->IsMap());
3970 function->initialize_properties();
3971 function->initialize_elements();
3972 function->set_shared(shared);
3973 function->set_code(shared->code());
3974 function->set_prototype_or_initial_map(prototype);
3975 function->set_context(undefined_value());
3976 function->set_literals_or_bindings(empty_fixed_array());
3977 function->set_next_function_link(undefined_value());
3981 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3982 // Make sure to use globals from the function's context, since the function
3983 // can be from a different context.
3984 Context* native_context = function->context()->native_context();
3985 bool needs_constructor_property;
3987 if (function->shared()->is_generator()) {
3988 // Generator prototypes can share maps since they don't have "constructor"
3990 new_map = native_context->generator_object_prototype_map();
3991 needs_constructor_property = false;
3993 // Each function prototype gets a fresh map to avoid unwanted sharing of
3994 // maps between prototypes of different constructors.
3995 JSFunction* object_function = native_context->object_function();
3996 ASSERT(object_function->has_initial_map());
3997 MaybeObject* maybe_map = object_function->initial_map()->Copy();
3998 if (!maybe_map->To(&new_map)) return maybe_map;
3999 needs_constructor_property = true;
4003 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4004 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4006 if (needs_constructor_property) {
4007 MaybeObject* maybe_failure =
4008 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4009 constructor_string(), function, DONT_ENUM);
4010 if (maybe_failure->IsFailure()) return maybe_failure;
4017 MaybeObject* Heap::AllocateFunction(Map* function_map,
4018 SharedFunctionInfo* shared,
4020 PretenureFlag pretenure) {
4021 AllocationSpace space =
4022 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4024 { MaybeObject* maybe_result = Allocate(function_map, space);
4025 if (!maybe_result->ToObject(&result)) return maybe_result;
4027 InitializeFunction(JSFunction::cast(result), shared, prototype);
4032 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4033 // To get fast allocation and map sharing for arguments objects we
4034 // allocate them based on an arguments boilerplate.
4036 JSObject* boilerplate;
4037 int arguments_object_size;
4038 bool strict_mode_callee = callee->IsJSFunction() &&
4039 !JSFunction::cast(callee)->shared()->is_classic_mode();
4040 if (strict_mode_callee) {
4042 isolate()->context()->native_context()->
4043 strict_mode_arguments_boilerplate();
4044 arguments_object_size = kArgumentsObjectSizeStrict;
4047 isolate()->context()->native_context()->arguments_boilerplate();
4048 arguments_object_size = kArgumentsObjectSize;
4051 // This calls Copy directly rather than using Heap::AllocateRaw so we
4052 // duplicate the check here.
4053 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
4055 // Check that the size of the boilerplate matches our
4056 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4057 // on the size being a known constant.
4058 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4060 // Do the allocation.
4062 { MaybeObject* maybe_result =
4063 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4064 if (!maybe_result->ToObject(&result)) return maybe_result;
4067 // Copy the content. The arguments boilerplate doesn't have any
4068 // fields that point to new space so it's safe to skip the write
4070 CopyBlock(HeapObject::cast(result)->address(),
4071 boilerplate->address(),
4072 JSObject::kHeaderSize);
4074 // Set the length property.
4075 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4076 Smi::FromInt(length),
4077 SKIP_WRITE_BARRIER);
4078 // Set the callee property for non-strict mode arguments object only.
4079 if (!strict_mode_callee) {
4080 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4084 // Check the state of the object
4085 ASSERT(JSObject::cast(result)->HasFastProperties());
4086 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4092 static bool HasDuplicates(DescriptorArray* descriptors) {
4093 int count = descriptors->number_of_descriptors();
4095 Name* prev_key = descriptors->GetKey(0);
4096 for (int i = 1; i != count; i++) {
4097 Name* current_key = descriptors->GetKey(i);
4098 if (prev_key == current_key) return true;
4099 prev_key = current_key;
4106 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4107 ASSERT(!fun->has_initial_map());
4109 // First create a new map with the size and number of in-object properties
4110 // suggested by the function.
4111 InstanceType instance_type;
4113 int in_object_properties;
4114 if (fun->shared()->is_generator()) {
4115 instance_type = JS_GENERATOR_OBJECT_TYPE;
4116 instance_size = JSGeneratorObject::kSize;
4117 in_object_properties = 0;
4119 instance_type = JS_OBJECT_TYPE;
4120 instance_size = fun->shared()->CalculateInstanceSize();
4121 in_object_properties = fun->shared()->CalculateInObjectProperties();
4124 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4125 if (!maybe_map->To(&map)) return maybe_map;
4127 // Fetch or allocate prototype.
4129 if (fun->has_instance_prototype()) {
4130 prototype = fun->instance_prototype();
4132 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4133 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4135 map->set_inobject_properties(in_object_properties);
4136 map->set_unused_property_fields(in_object_properties);
4137 map->set_prototype(prototype);
4138 ASSERT(map->has_fast_object_elements());
4140 // If the function has only simple this property assignments add
4141 // field descriptors for these to the initial map as the object
4142 // cannot be constructed without having these properties. Guard by
4143 // the inline_new flag so we only change the map if we generate a
4144 // specialized construct stub.
4145 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
4146 if (instance_type == JS_OBJECT_TYPE &&
4147 fun->shared()->CanGenerateInlineConstructor(prototype)) {
4148 int count = fun->shared()->this_property_assignments_count();
4149 if (count > in_object_properties) {
4150 // Inline constructor can only handle inobject properties.
4151 fun->shared()->ForbidInlineConstructor();
4153 DescriptorArray* descriptors;
4154 MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
4155 if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
4157 DescriptorArray::WhitenessWitness witness(descriptors);
4158 for (int i = 0; i < count; i++) {
4159 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
4160 ASSERT(name->IsInternalizedString());
4161 // TODO(verwaest): Since we cannot update the boilerplate's map yet,
4162 // initialize to the worst case.
4163 FieldDescriptor field(name, i, NONE, Representation::Tagged());
4164 descriptors->Set(i, &field, witness);
4166 descriptors->Sort();
4168 // The descriptors may contain duplicates because the compiler does not
4169 // guarantee the uniqueness of property names (it would have required
4170 // quadratic time). Once the descriptors are sorted we can check for
4171 // duplicates in linear time.
4172 if (HasDuplicates(descriptors)) {
4173 fun->shared()->ForbidInlineConstructor();
4175 map->InitializeDescriptors(descriptors);
4176 map->set_pre_allocated_property_fields(count);
4177 map->set_unused_property_fields(in_object_properties - count);
4182 if (instance_type == JS_OBJECT_TYPE) {
4183 fun->shared()->StartInobjectSlackTracking(map);
4190 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4191 FixedArray* properties,
4193 obj->set_properties(properties);
4194 obj->initialize_elements();
4195 // TODO(1240798): Initialize the object's body using valid initial values
4196 // according to the object's initial map. For example, if the map's
4197 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4198 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4199 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4200 // verification code has to cope with (temporarily) invalid objects. See
4201 // for example, JSArray::JSArrayVerify).
4203 // We cannot always fill with one_pointer_filler_map because objects
4204 // created from API functions expect their internal fields to be initialized
4205 // with undefined_value.
4206 // Pre-allocated fields need to be initialized with undefined_value as well
4207 // so that object accesses before the constructor completes (e.g. in the
4208 // debugger) will not cause a crash.
4209 if (map->constructor()->IsJSFunction() &&
4210 JSFunction::cast(map->constructor())->shared()->
4211 IsInobjectSlackTrackingInProgress()) {
4212 // We might want to shrink the object later.
4213 ASSERT(obj->GetInternalFieldCount() == 0);
4214 filler = Heap::one_pointer_filler_map();
4216 filler = Heap::undefined_value();
4218 obj->InitializeBody(map, Heap::undefined_value(), filler);
4222 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4223 // JSFunctions should be allocated using AllocateFunction to be
4224 // properly initialized.
4225 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4227 // Both types of global objects should be allocated using
4228 // AllocateGlobalObject to be properly initialized.
4229 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4230 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4232 // Allocate the backing storage for the properties.
4234 map->pre_allocated_property_fields() +
4235 map->unused_property_fields() -
4236 map->inobject_properties();
4237 ASSERT(prop_size >= 0);
4239 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4240 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4243 // Allocate the JSObject.
4244 AllocationSpace space =
4245 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4246 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4248 MaybeObject* maybe_obj = Allocate(map, space);
4249 if (!maybe_obj->To(&obj)) return maybe_obj;
4251 // Initialize the JSObject.
4252 InitializeJSObjectFromMap(JSObject::cast(obj),
4253 FixedArray::cast(properties),
4255 ASSERT(JSObject::cast(obj)->HasFastElements());
4260 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4261 Handle<Object> allocation_site_info_payload) {
4262 // JSFunctions should be allocated using AllocateFunction to be
4263 // properly initialized.
4264 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4266 // Both types of global objects should be allocated using
4267 // AllocateGlobalObject to be properly initialized.
4268 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4269 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4271 // Allocate the backing storage for the properties.
4273 map->pre_allocated_property_fields() +
4274 map->unused_property_fields() -
4275 map->inobject_properties();
4276 ASSERT(prop_size >= 0);
4278 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4279 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4282 // Allocate the JSObject.
4283 AllocationSpace space = NEW_SPACE;
4284 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4286 MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4287 allocation_site_info_payload);
4288 if (!maybe_obj->To(&obj)) return maybe_obj;
4290 // Initialize the JSObject.
4291 InitializeJSObjectFromMap(JSObject::cast(obj),
4292 FixedArray::cast(properties),
4294 ASSERT(JSObject::cast(obj)->HasFastElements());
4299 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4300 PretenureFlag pretenure) {
4301 // Allocate the initial map if absent.
4302 if (!constructor->has_initial_map()) {
4303 Object* initial_map;
4304 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4305 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4307 constructor->set_initial_map(Map::cast(initial_map));
4308 Map::cast(initial_map)->set_constructor(constructor);
4310 // Allocate the object based on the constructors initial map.
4311 MaybeObject* result = AllocateJSObjectFromMap(
4312 constructor->initial_map(), pretenure);
4314 // Make sure result is NOT a global object if valid.
4315 Object* non_failure;
4316 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4322 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4323 Handle<Object> allocation_site_info_payload) {
4324 // Allocate the initial map if absent.
4325 if (!constructor->has_initial_map()) {
4326 Object* initial_map;
4327 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4328 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4330 constructor->set_initial_map(Map::cast(initial_map));
4331 Map::cast(initial_map)->set_constructor(constructor);
4333 // Allocate the object based on the constructors initial map, or the payload
4335 Map* initial_map = constructor->initial_map();
4337 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4338 *allocation_site_info_payload);
4339 Smi* smi = Smi::cast(cell->value());
4340 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4341 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4342 if (to_kind != initial_map->elements_kind()) {
4343 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4344 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4345 // Possibly alter the mode, since we found an updated elements kind
4346 // in the type info cell.
4347 mode = AllocationSiteInfo::GetMode(to_kind);
4350 MaybeObject* result;
4351 if (mode == TRACK_ALLOCATION_SITE) {
4352 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4353 allocation_site_info_payload);
4355 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4358 // Make sure result is NOT a global object if valid.
4359 Object* non_failure;
4360 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4366 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4367 ASSERT(function->shared()->is_generator());
4369 if (function->has_initial_map()) {
4370 map = function->initial_map();
4372 // Allocate the initial map if absent.
4373 MaybeObject* maybe_map = AllocateInitialMap(function);
4374 if (!maybe_map->To(&map)) return maybe_map;
4375 function->set_initial_map(map);
4376 map->set_constructor(function);
4378 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4379 return AllocateJSObjectFromMap(map);
4383 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4384 // Allocate a fresh map. Modules do not have a prototype.
4386 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4387 if (!maybe_map->To(&map)) return maybe_map;
4388 // Allocate the object based on the map.
4390 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4391 if (!maybe_module->To(&module)) return maybe_module;
4392 module->set_context(context);
4393 module->set_scope_info(scope_info);
4398 MaybeObject* Heap::AllocateJSArrayAndStorage(
4399 ElementsKind elements_kind,
4402 ArrayStorageAllocationMode mode,
4403 PretenureFlag pretenure) {
4404 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4406 if (!maybe_array->To(&array)) return maybe_array;
4408 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4409 // for performance reasons.
4410 ASSERT(capacity >= length);
4412 if (capacity == 0) {
4413 array->set_length(Smi::FromInt(0));
4414 array->set_elements(empty_fixed_array());
4418 FixedArrayBase* elms;
4419 MaybeObject* maybe_elms = NULL;
4420 if (IsFastDoubleElementsKind(elements_kind)) {
4421 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4422 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4424 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4425 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4428 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4429 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4430 maybe_elms = AllocateUninitializedFixedArray(capacity);
4432 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4433 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4436 if (!maybe_elms->To(&elms)) return maybe_elms;
4438 array->set_elements(elms);
4439 array->set_length(Smi::FromInt(length));
4444 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4445 ElementsKind elements_kind,
4448 Handle<Object> allocation_site_payload,
4449 ArrayStorageAllocationMode mode) {
4450 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4451 allocation_site_payload);
4453 if (!maybe_array->To(&array)) return maybe_array;
4454 return AllocateJSArrayStorage(array, length, capacity, mode);
4458 MaybeObject* Heap::AllocateJSArrayStorage(
4462 ArrayStorageAllocationMode mode) {
4463 ASSERT(capacity >= length);
4465 if (capacity == 0) {
4466 array->set_length(Smi::FromInt(0));
4467 array->set_elements(empty_fixed_array());
4471 FixedArrayBase* elms;
4472 MaybeObject* maybe_elms = NULL;
4473 ElementsKind elements_kind = array->GetElementsKind();
4474 if (IsFastDoubleElementsKind(elements_kind)) {
4475 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4476 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4478 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4479 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4482 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4483 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4484 maybe_elms = AllocateUninitializedFixedArray(capacity);
4486 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4487 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4490 if (!maybe_elms->To(&elms)) return maybe_elms;
4492 array->set_elements(elms);
4493 array->set_length(Smi::FromInt(length));
4498 MaybeObject* Heap::AllocateJSArrayWithElements(
4499 FixedArrayBase* elements,
4500 ElementsKind elements_kind,
4502 PretenureFlag pretenure) {
4503 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4505 if (!maybe_array->To(&array)) return maybe_array;
4507 array->set_elements(elements);
4508 array->set_length(Smi::FromInt(length));
4509 array->ValidateElements();
4514 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4516 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4517 // maps. Will probably depend on the identity of the handler object, too.
4519 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4520 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4521 map->set_prototype(prototype);
4523 // Allocate the proxy object.
4525 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4526 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4527 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4528 result->set_handler(handler);
4529 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4534 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4536 Object* construct_trap,
4537 Object* prototype) {
4539 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4540 // maps. Will probably depend on the identity of the handler object, too.
4542 MaybeObject* maybe_map_obj =
4543 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4544 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4545 map->set_prototype(prototype);
4547 // Allocate the proxy object.
4548 JSFunctionProxy* result;
4549 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4550 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4551 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4552 result->set_handler(handler);
4553 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4554 result->set_call_trap(call_trap);
4555 result->set_construct_trap(construct_trap);
4560 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4561 ASSERT(constructor->has_initial_map());
4562 Map* map = constructor->initial_map();
4563 ASSERT(map->is_dictionary_map());
4565 // Make sure no field properties are described in the initial map.
4566 // This guarantees us that normalizing the properties does not
4567 // require us to change property values to JSGlobalPropertyCells.
4568 ASSERT(map->NextFreePropertyIndex() == 0);
4570 // Make sure we don't have a ton of pre-allocated slots in the
4571 // global objects. They will be unused once we normalize the object.
4572 ASSERT(map->unused_property_fields() == 0);
4573 ASSERT(map->inobject_properties() == 0);
4575 // Initial size of the backing store to avoid resize of the storage during
4576 // bootstrapping. The size differs between the JS global object ad the
4578 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4580 // Allocate a dictionary object for backing storage.
4581 NameDictionary* dictionary;
4582 MaybeObject* maybe_dictionary =
4583 NameDictionary::Allocate(
4585 map->NumberOfOwnDescriptors() * 2 + initial_size);
4586 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4588 // The global object might be created from an object template with accessors.
4589 // Fill these accessors into the dictionary.
4590 DescriptorArray* descs = map->instance_descriptors();
4591 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4592 PropertyDetails details = descs->GetDetails(i);
4593 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4594 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4595 Object* value = descs->GetCallbacksObject(i);
4596 MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4597 if (!maybe_value->ToObject(&value)) return maybe_value;
4599 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4600 if (!maybe_added->To(&dictionary)) return maybe_added;
4603 // Allocate the global object and initialize it with the backing store.
4605 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4606 if (!maybe_global->To(&global)) return maybe_global;
4608 InitializeJSObjectFromMap(global, dictionary, map);
4610 // Create a new map for the global object.
4612 MaybeObject* maybe_map = map->CopyDropDescriptors();
4613 if (!maybe_map->To(&new_map)) return maybe_map;
4614 new_map->set_dictionary_map(true);
4616 // Set up the global object as a normalized object.
4617 global->set_map(new_map);
4618 global->set_properties(dictionary);
4620 // Make sure result is a global object with properties in dictionary.
4621 ASSERT(global->IsGlobalObject());
4622 ASSERT(!global->HasFastProperties());
4627 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4628 // Never used to copy functions. If functions need to be copied we
4629 // have to be careful to clear the literals array.
4630 SLOW_ASSERT(!source->IsJSFunction());
4633 Map* map = source->map();
4634 int object_size = map->instance_size();
4637 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4639 // If we're forced to always allocate, we use the general allocation
4640 // functions which may leave us with an object in old space.
4641 if (always_allocate()) {
4642 { MaybeObject* maybe_clone =
4643 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4644 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4646 Address clone_address = HeapObject::cast(clone)->address();
4647 CopyBlock(clone_address,
4650 // Update write barrier for all fields that lie beyond the header.
4651 RecordWrites(clone_address,
4652 JSObject::kHeaderSize,
4653 (object_size - JSObject::kHeaderSize) / kPointerSize);
4655 wb_mode = SKIP_WRITE_BARRIER;
4657 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4658 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4660 SLOW_ASSERT(InNewSpace(clone));
4661 // Since we know the clone is allocated in new space, we can copy
4662 // the contents without worrying about updating the write barrier.
4663 CopyBlock(HeapObject::cast(clone)->address(),
4669 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4670 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4671 FixedArray* properties = FixedArray::cast(source->properties());
4672 // Update elements if necessary.
4673 if (elements->length() > 0) {
4675 { MaybeObject* maybe_elem;
4676 if (elements->map() == fixed_cow_array_map()) {
4677 maybe_elem = FixedArray::cast(elements);
4678 } else if (source->HasFastDoubleElements()) {
4679 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4681 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4683 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4685 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4687 // Update properties if necessary.
4688 if (properties->length() > 0) {
4690 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4691 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4693 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4695 // Return the new clone.
4700 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4701 // Never used to copy functions. If functions need to be copied we
4702 // have to be careful to clear the literals array.
4703 SLOW_ASSERT(!source->IsJSFunction());
4706 Map* map = source->map();
4707 int object_size = map->instance_size();
4710 ASSERT(map->CanTrackAllocationSite());
4711 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4712 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4714 // If we're forced to always allocate, we use the general allocation
4715 // functions which may leave us with an object in old space.
4716 int adjusted_object_size = object_size;
4717 if (always_allocate()) {
4718 // We'll only track origin if we are certain to allocate in new space
4719 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4720 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4721 adjusted_object_size += AllocationSiteInfo::kSize;
4724 { MaybeObject* maybe_clone =
4725 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4726 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4728 Address clone_address = HeapObject::cast(clone)->address();
4729 CopyBlock(clone_address,
4732 // Update write barrier for all fields that lie beyond the header.
4733 int write_barrier_offset = adjusted_object_size > object_size
4734 ? JSArray::kSize + AllocationSiteInfo::kSize
4735 : JSObject::kHeaderSize;
4736 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4737 RecordWrites(clone_address,
4738 write_barrier_offset,
4739 (object_size - write_barrier_offset) / kPointerSize);
4742 // Track allocation site information, if we failed to allocate it inline.
4743 if (InNewSpace(clone) &&
4744 adjusted_object_size == object_size) {
4745 MaybeObject* maybe_alloc_info =
4746 AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4747 AllocationSiteInfo* alloc_info;
4748 if (maybe_alloc_info->To(&alloc_info)) {
4749 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4750 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4754 wb_mode = SKIP_WRITE_BARRIER;
4755 adjusted_object_size += AllocationSiteInfo::kSize;
4757 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4758 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4760 SLOW_ASSERT(InNewSpace(clone));
4761 // Since we know the clone is allocated in new space, we can copy
4762 // the contents without worrying about updating the write barrier.
4763 CopyBlock(HeapObject::cast(clone)->address(),
4768 if (adjusted_object_size > object_size) {
4769 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4770 reinterpret_cast<Address>(clone) + object_size);
4771 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4772 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4776 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4777 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4778 FixedArray* properties = FixedArray::cast(source->properties());
4779 // Update elements if necessary.
4780 if (elements->length() > 0) {
4782 { MaybeObject* maybe_elem;
4783 if (elements->map() == fixed_cow_array_map()) {
4784 maybe_elem = FixedArray::cast(elements);
4785 } else if (source->HasFastDoubleElements()) {
4786 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4788 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4790 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4792 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4794 // Update properties if necessary.
4795 if (properties->length() > 0) {
4797 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4798 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4800 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4802 // Return the new clone.
4807 MaybeObject* Heap::ReinitializeJSReceiver(
4808 JSReceiver* object, InstanceType type, int size) {
4809 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4811 // Allocate fresh map.
4812 // TODO(rossberg): Once we optimize proxies, cache these maps.
4814 MaybeObject* maybe = AllocateMap(type, size);
4815 if (!maybe->To<Map>(&map)) return maybe;
4817 // Check that the receiver has at least the size of the fresh object.
4818 int size_difference = object->map()->instance_size() - map->instance_size();
4819 ASSERT(size_difference >= 0);
4821 map->set_prototype(object->map()->prototype());
4823 // Allocate the backing storage for the properties.
4824 int prop_size = map->unused_property_fields() - map->inobject_properties();
4826 maybe = AllocateFixedArray(prop_size, TENURED);
4827 if (!maybe->ToObject(&properties)) return maybe;
4829 // Functions require some allocation, which might fail here.
4830 SharedFunctionInfo* shared = NULL;
4831 if (type == JS_FUNCTION_TYPE) {
4834 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4835 if (!maybe->To<String>(&name)) return maybe;
4836 maybe = AllocateSharedFunctionInfo(name);
4837 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4840 // Because of possible retries of this function after failure,
4841 // we must NOT fail after this point, where we have changed the type!
4843 // Reset the map for the object.
4844 object->set_map(map);
4845 JSObject* jsobj = JSObject::cast(object);
4847 // Reinitialize the object from the constructor map.
4848 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4850 // Functions require some minimal initialization.
4851 if (type == JS_FUNCTION_TYPE) {
4852 map->set_function_with_prototype(true);
4853 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4854 JSFunction::cast(object)->set_context(
4855 isolate()->context()->native_context());
4858 // Put in filler if the new object is smaller than the old.
4859 if (size_difference > 0) {
4860 CreateFillerObjectAt(
4861 object->address() + map->instance_size(), size_difference);
4868 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4869 JSGlobalProxy* object) {
4870 ASSERT(constructor->has_initial_map());
4871 Map* map = constructor->initial_map();
4873 // Check that the already allocated object has the same size and type as
4874 // objects allocated using the constructor.
4875 ASSERT(map->instance_size() == object->map()->instance_size());
4876 ASSERT(map->instance_type() == object->map()->instance_type());
4878 // Allocate the backing storage for the properties.
4879 int prop_size = map->unused_property_fields() - map->inobject_properties();
4881 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4882 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4885 // Reset the map for the object.
4886 object->set_map(constructor->initial_map());
4888 // Reinitialize the object from the constructor map.
4889 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4894 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4895 PretenureFlag pretenure) {
4896 int length = string.length();
4898 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4901 { MaybeObject* maybe_result =
4902 AllocateRawOneByteString(string.length(), pretenure);
4903 if (!maybe_result->ToObject(&result)) return maybe_result;
4906 // Copy the characters into the new object.
4907 CopyChars(SeqOneByteString::cast(result)->GetChars(),
4914 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4915 int non_ascii_start,
4916 PretenureFlag pretenure) {
4917 // Continue counting the number of characters in the UTF-8 string, starting
4918 // from the first non-ascii character or word.
4919 Access<UnicodeCache::Utf8Decoder>
4920 decoder(isolate_->unicode_cache()->utf8_decoder());
4921 decoder->Reset(string.start() + non_ascii_start,
4922 string.length() - non_ascii_start);
4923 int utf16_length = decoder->Utf16Length();
4924 ASSERT(utf16_length > 0);
4928 int chars = non_ascii_start + utf16_length;
4929 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4930 if (!maybe_result->ToObject(&result)) return maybe_result;
4932 // Convert and copy the characters into the new object.
4933 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4934 // Copy ascii portion.
4935 uint16_t* data = twobyte->GetChars();
4936 if (non_ascii_start != 0) {
4937 const char* ascii_data = string.start();
4938 for (int i = 0; i < non_ascii_start; i++) {
4939 *data++ = *ascii_data++;
4942 // Now write the remainder.
4943 decoder->WriteUtf16(data, utf16_length);
4948 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4949 PretenureFlag pretenure) {
4950 // Check if the string is an ASCII string.
4952 int length = string.length();
4953 const uc16* start = string.start();
4955 if (String::IsOneByte(start, length)) {
4956 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4957 if (!maybe_result->ToObject(&result)) return maybe_result;
4958 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4959 } else { // It's not a one byte string.
4960 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4961 if (!maybe_result->ToObject(&result)) return maybe_result;
4962 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4968 Map* Heap::InternalizedStringMapForString(String* string) {
4969 // If the string is in new space it cannot be used as internalized.
4970 if (InNewSpace(string)) return NULL;
4972 // Find the corresponding internalized string map for strings.
4973 switch (string->map()->instance_type()) {
4974 case STRING_TYPE: return internalized_string_map();
4975 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4976 case CONS_STRING_TYPE: return cons_internalized_string_map();
4977 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4978 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4979 case EXTERNAL_ASCII_STRING_TYPE:
4980 return external_ascii_internalized_string_map();
4981 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4982 return external_internalized_string_with_one_byte_data_map();
4983 case SHORT_EXTERNAL_STRING_TYPE:
4984 return short_external_internalized_string_map();
4985 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4986 return short_external_ascii_internalized_string_map();
4987 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4988 return short_external_internalized_string_with_one_byte_data_map();
4989 default: return NULL; // No match found.
4994 static inline void WriteOneByteData(Vector<const char> vector,
4997 // Only works for ascii.
4998 ASSERT(vector.length() == len);
4999 OS::MemCopy(chars, vector.start(), len);
5002 static inline void WriteTwoByteData(Vector<const char> vector,
5005 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5006 unsigned stream_length = vector.length();
5007 while (stream_length != 0) {
5008 unsigned consumed = 0;
5009 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5010 ASSERT(c != unibrow::Utf8::kBadChar);
5011 ASSERT(consumed <= stream_length);
5012 stream_length -= consumed;
5014 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5017 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5018 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5025 ASSERT(stream_length == 0);
5030 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5031 ASSERT(s->length() == len);
5032 String::WriteToFlat(s, chars, 0, len);
5035 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5036 ASSERT(s->length() == len);
5037 String::WriteToFlat(s, chars, 0, len);
5041 template<bool is_one_byte, typename T>
5042 MaybeObject* Heap::AllocateInternalizedStringImpl(
5043 T t, int chars, uint32_t hash_field) {
5045 // Compute map and object size.
5050 if (chars > SeqOneByteString::kMaxLength) {
5051 return Failure::OutOfMemoryException(0x9);
5053 map = ascii_internalized_string_map();
5054 size = SeqOneByteString::SizeFor(chars);
5056 if (chars > SeqTwoByteString::kMaxLength) {
5057 return Failure::OutOfMemoryException(0xa);
5059 map = internalized_string_map();
5060 size = SeqTwoByteString::SizeFor(chars);
5065 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5066 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5067 : old_data_space_->AllocateRaw(size);
5068 if (!maybe_result->ToObject(&result)) return maybe_result;
5071 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5072 // Set length and hash fields of the allocated string.
5073 String* answer = String::cast(result);
5074 answer->set_length(chars);
5075 answer->set_hash_field(hash_field);
5077 ASSERT_EQ(size, answer->Size());
5080 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5082 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5088 // Need explicit instantiations.
5090 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5092 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5093 String*, int, uint32_t);
5095 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5096 Vector<const char>, int, uint32_t);
5099 MaybeObject* Heap::AllocateRawOneByteString(int length,
5100 PretenureFlag pretenure) {
5101 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5102 return Failure::OutOfMemoryException(0xb);
5105 int size = SeqOneByteString::SizeFor(length);
5106 ASSERT(size <= SeqOneByteString::kMaxSize);
5108 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5109 AllocationSpace retry_space = OLD_DATA_SPACE;
5111 if (space == NEW_SPACE) {
5112 if (size > kMaxObjectSizeInNewSpace) {
5113 // Allocate in large object space, retry space will be ignored.
5115 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5116 // Allocate in new space, retry in large object space.
5117 retry_space = LO_SPACE;
5119 } else if (space == OLD_DATA_SPACE &&
5120 size > Page::kMaxNonCodeHeapObjectSize) {
5124 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5125 if (!maybe_result->ToObject(&result)) return maybe_result;
5128 // Partially initialize the object.
5129 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5130 String::cast(result)->set_length(length);
5131 String::cast(result)->set_hash_field(String::kEmptyHashField);
5132 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5138 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5139 PretenureFlag pretenure) {
5140 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5141 return Failure::OutOfMemoryException(0xc);
5143 int size = SeqTwoByteString::SizeFor(length);
5144 ASSERT(size <= SeqTwoByteString::kMaxSize);
5145 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5146 AllocationSpace retry_space = OLD_DATA_SPACE;
5148 if (space == NEW_SPACE) {
5149 if (size > kMaxObjectSizeInNewSpace) {
5150 // Allocate in large object space, retry space will be ignored.
5152 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5153 // Allocate in new space, retry in large object space.
5154 retry_space = LO_SPACE;
5156 } else if (space == OLD_DATA_SPACE &&
5157 size > Page::kMaxNonCodeHeapObjectSize) {
5161 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5162 if (!maybe_result->ToObject(&result)) return maybe_result;
5165 // Partially initialize the object.
5166 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5167 String::cast(result)->set_length(length);
5168 String::cast(result)->set_hash_field(String::kEmptyHashField);
5169 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5174 MaybeObject* Heap::AllocateJSArray(
5175 ElementsKind elements_kind,
5176 PretenureFlag pretenure) {
5177 Context* native_context = isolate()->context()->native_context();
5178 JSFunction* array_function = native_context->array_function();
5179 Map* map = array_function->initial_map();
5180 Object* maybe_map_array = native_context->js_array_maps();
5181 if (!maybe_map_array->IsUndefined()) {
5182 Object* maybe_transitioned_map =
5183 FixedArray::cast(maybe_map_array)->get(elements_kind);
5184 if (!maybe_transitioned_map->IsUndefined()) {
5185 map = Map::cast(maybe_transitioned_map);
5189 return AllocateJSObjectFromMap(map, pretenure);
5193 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5194 ElementsKind elements_kind,
5195 Handle<Object> allocation_site_info_payload) {
5196 Context* native_context = isolate()->context()->native_context();
5197 JSFunction* array_function = native_context->array_function();
5198 Map* map = array_function->initial_map();
5199 Object* maybe_map_array = native_context->js_array_maps();
5200 if (!maybe_map_array->IsUndefined()) {
5201 Object* maybe_transitioned_map =
5202 FixedArray::cast(maybe_map_array)->get(elements_kind);
5203 if (!maybe_transitioned_map->IsUndefined()) {
5204 map = Map::cast(maybe_transitioned_map);
5207 return AllocateJSObjectFromMapWithAllocationSite(map,
5208 allocation_site_info_payload);
5212 MaybeObject* Heap::AllocateEmptyFixedArray() {
5213 int size = FixedArray::SizeFor(0);
5215 { MaybeObject* maybe_result =
5216 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5217 if (!maybe_result->ToObject(&result)) return maybe_result;
5219 // Initialize the object.
5220 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5222 reinterpret_cast<FixedArray*>(result)->set_length(0);
5227 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5228 if (length < 0 || length > FixedArray::kMaxLength) {
5229 return Failure::OutOfMemoryException(0xd);
5232 // Use the general function if we're forced to always allocate.
5233 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5234 // Allocate the raw data for a fixed array.
5235 int size = FixedArray::SizeFor(length);
5236 return size <= kMaxObjectSizeInNewSpace
5237 ? new_space_.AllocateRaw(size)
5238 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5242 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5243 int len = src->length();
5245 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5246 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5248 if (InNewSpace(obj)) {
5249 HeapObject* dst = HeapObject::cast(obj);
5250 dst->set_map_no_write_barrier(map);
5251 CopyBlock(dst->address() + kPointerSize,
5252 src->address() + kPointerSize,
5253 FixedArray::SizeFor(len) - kPointerSize);
5256 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5257 FixedArray* result = FixedArray::cast(obj);
5258 result->set_length(len);
5261 AssertNoAllocation no_gc;
5262 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5263 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5268 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5270 int len = src->length();
5272 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5273 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5275 HeapObject* dst = HeapObject::cast(obj);
5276 dst->set_map_no_write_barrier(map);
5278 dst->address() + FixedDoubleArray::kLengthOffset,
5279 src->address() + FixedDoubleArray::kLengthOffset,
5280 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5285 MaybeObject* Heap::AllocateFixedArray(int length) {
5286 ASSERT(length >= 0);
5287 if (length == 0) return empty_fixed_array();
5289 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5290 if (!maybe_result->ToObject(&result)) return maybe_result;
5292 // Initialize header.
5293 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5294 array->set_map_no_write_barrier(fixed_array_map());
5295 array->set_length(length);
5297 ASSERT(!InNewSpace(undefined_value()));
5298 MemsetPointer(array->data_start(), undefined_value(), length);
5303 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5304 if (length < 0 || length > FixedArray::kMaxLength) {
5305 return Failure::OutOfMemoryException(0xe);
5308 AllocationSpace space =
5309 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5310 int size = FixedArray::SizeFor(length);
5311 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5312 // Too big for new space.
5314 } else if (space == OLD_POINTER_SPACE &&
5315 size > Page::kMaxNonCodeHeapObjectSize) {
5316 // Too big for old pointer space.
5320 AllocationSpace retry_space =
5321 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5323 return AllocateRaw(size, space, retry_space);
5327 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5330 PretenureFlag pretenure,
5332 ASSERT(length >= 0);
5333 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5334 if (length == 0) return heap->empty_fixed_array();
5336 ASSERT(!heap->InNewSpace(filler));
5338 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5339 if (!maybe_result->ToObject(&result)) return maybe_result;
5342 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5343 FixedArray* array = FixedArray::cast(result);
5344 array->set_length(length);
5345 MemsetPointer(array->data_start(), filler, length);
5350 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5351 return AllocateFixedArrayWithFiller(this,
5358 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5359 PretenureFlag pretenure) {
5360 return AllocateFixedArrayWithFiller(this,
5367 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5368 if (length == 0) return empty_fixed_array();
5371 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5372 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5375 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5377 FixedArray::cast(obj)->set_length(length);
5382 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5383 int size = FixedDoubleArray::SizeFor(0);
5385 { MaybeObject* maybe_result =
5386 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5387 if (!maybe_result->ToObject(&result)) return maybe_result;
5389 // Initialize the object.
5390 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5391 fixed_double_array_map());
5392 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5397 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5399 PretenureFlag pretenure) {
5400 if (length == 0) return empty_fixed_array();
5402 Object* elements_object;
5403 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5404 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5405 FixedDoubleArray* elements =
5406 reinterpret_cast<FixedDoubleArray*>(elements_object);
5408 elements->set_map_no_write_barrier(fixed_double_array_map());
5409 elements->set_length(length);
5414 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5416 PretenureFlag pretenure) {
5417 if (length == 0) return empty_fixed_array();
5419 Object* elements_object;
5420 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5421 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5422 FixedDoubleArray* elements =
5423 reinterpret_cast<FixedDoubleArray*>(elements_object);
5425 for (int i = 0; i < length; ++i) {
5426 elements->set_the_hole(i);
5429 elements->set_map_no_write_barrier(fixed_double_array_map());
5430 elements->set_length(length);
5435 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5436 PretenureFlag pretenure) {
5437 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5438 return Failure::OutOfMemoryException(0xf);
5441 AllocationSpace space =
5442 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5443 int size = FixedDoubleArray::SizeFor(length);
5445 #ifndef V8_HOST_ARCH_64_BIT
5446 size += kPointerSize;
5449 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5450 // Too big for new space.
5452 } else if (space == OLD_DATA_SPACE &&
5453 size > Page::kMaxNonCodeHeapObjectSize) {
5454 // Too big for old data space.
5458 AllocationSpace retry_space =
5459 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5462 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5463 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5466 return EnsureDoubleAligned(this, object, size);
5470 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5472 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5473 if (!maybe_result->ToObject(&result)) return maybe_result;
5475 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5477 ASSERT(result->IsHashTable());
5482 MaybeObject* Heap::AllocateSymbol() {
5483 // Statically ensure that it is safe to allocate symbols in paged spaces.
5484 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5487 MaybeObject* maybe =
5488 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5489 if (!maybe->ToObject(&result)) return maybe;
5491 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5493 // Generate a random hash value.
5497 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5499 } while (hash == 0 && attempts < 30);
5500 if (hash == 0) hash = 1; // never return 0
5502 Symbol::cast(result)->set_hash_field(
5503 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5504 Symbol::cast(result)->set_name(undefined_value());
5506 ASSERT(result->IsSymbol());
5511 MaybeObject* Heap::AllocateNativeContext() {
5513 { MaybeObject* maybe_result =
5514 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5515 if (!maybe_result->ToObject(&result)) return maybe_result;
5517 Context* context = reinterpret_cast<Context*>(result);
5518 context->set_map_no_write_barrier(native_context_map());
5519 context->set_js_array_maps(undefined_value());
5520 ASSERT(context->IsNativeContext());
5521 ASSERT(result->IsContext());
5526 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5527 ScopeInfo* scope_info) {
5529 { MaybeObject* maybe_result =
5530 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5531 if (!maybe_result->ToObject(&result)) return maybe_result;
5533 Context* context = reinterpret_cast<Context*>(result);
5534 context->set_map_no_write_barrier(global_context_map());
5535 context->set_closure(function);
5536 context->set_previous(function->context());
5537 context->set_extension(scope_info);
5538 context->set_global_object(function->context()->global_object());
5539 ASSERT(context->IsGlobalContext());
5540 ASSERT(result->IsContext());
5545 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5547 { MaybeObject* maybe_result =
5548 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5549 if (!maybe_result->ToObject(&result)) return maybe_result;
5551 Context* context = reinterpret_cast<Context*>(result);
5552 context->set_map_no_write_barrier(module_context_map());
5553 // Instance link will be set later.
5554 context->set_extension(Smi::FromInt(0));
5559 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5560 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5562 { MaybeObject* maybe_result = AllocateFixedArray(length);
5563 if (!maybe_result->ToObject(&result)) return maybe_result;
5565 Context* context = reinterpret_cast<Context*>(result);
5566 context->set_map_no_write_barrier(function_context_map());
5567 context->set_closure(function);
5568 context->set_previous(function->context());
5569 context->set_extension(Smi::FromInt(0));
5570 context->set_global_object(function->context()->global_object());
5575 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5578 Object* thrown_object) {
5579 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5581 { MaybeObject* maybe_result =
5582 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5583 if (!maybe_result->ToObject(&result)) return maybe_result;
5585 Context* context = reinterpret_cast<Context*>(result);
5586 context->set_map_no_write_barrier(catch_context_map());
5587 context->set_closure(function);
5588 context->set_previous(previous);
5589 context->set_extension(name);
5590 context->set_global_object(previous->global_object());
5591 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5596 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5598 JSObject* extension) {
5600 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5601 if (!maybe_result->ToObject(&result)) return maybe_result;
5603 Context* context = reinterpret_cast<Context*>(result);
5604 context->set_map_no_write_barrier(with_context_map());
5605 context->set_closure(function);
5606 context->set_previous(previous);
5607 context->set_extension(extension);
5608 context->set_global_object(previous->global_object());
5613 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5615 ScopeInfo* scope_info) {
5617 { MaybeObject* maybe_result =
5618 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5619 if (!maybe_result->ToObject(&result)) return maybe_result;
5621 Context* context = reinterpret_cast<Context*>(result);
5622 context->set_map_no_write_barrier(block_context_map());
5623 context->set_closure(function);
5624 context->set_previous(previous);
5625 context->set_extension(scope_info);
5626 context->set_global_object(previous->global_object());
5631 MaybeObject* Heap::AllocateScopeInfo(int length) {
5632 FixedArray* scope_info;
5633 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5634 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5635 scope_info->set_map_no_write_barrier(scope_info_map());
5640 MaybeObject* Heap::AllocateExternal(void* value) {
5642 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5643 if (!maybe_result->To(&foreign)) return maybe_result;
5646 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5647 if (!maybe_result->To(&external)) return maybe_result;
5649 external->SetInternalField(0, foreign);
5654 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5657 #define MAKE_CASE(NAME, Name, name) \
5658 case NAME##_TYPE: map = name##_map(); break;
5659 STRUCT_LIST(MAKE_CASE)
5663 return Failure::InternalError();
5665 int size = map->instance_size();
5666 AllocationSpace space =
5667 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5669 { MaybeObject* maybe_result = Allocate(map, space);
5670 if (!maybe_result->ToObject(&result)) return maybe_result;
5672 Struct::cast(result)->InitializeBody(size);
5677 bool Heap::IsHeapIterable() {
5678 return (!old_pointer_space()->was_swept_conservatively() &&
5679 !old_data_space()->was_swept_conservatively());
5683 void Heap::EnsureHeapIsIterable() {
5684 ASSERT(IsAllocationAllowed());
5685 if (!IsHeapIterable()) {
5686 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5688 ASSERT(IsHeapIterable());
5692 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5693 incremental_marking()->Step(step_size,
5694 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5696 if (incremental_marking()->IsComplete()) {
5697 bool uncommit = false;
5698 if (gc_count_at_last_idle_gc_ == gc_count_) {
5699 // No GC since the last full GC, the mutator is probably not active.
5700 isolate_->compilation_cache()->Clear();
5703 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5704 gc_count_at_last_idle_gc_ = gc_count_;
5706 new_space_.Shrink();
5707 UncommitFromSpace();
5713 bool Heap::IdleNotification(int hint) {
5714 // Hints greater than this value indicate that
5715 // the embedder is requesting a lot of GC work.
5716 const int kMaxHint = 1000;
5717 // Minimal hint that allows to do full GC.
5718 const int kMinHintForFullGC = 100;
5719 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5720 // The size factor is in range [5..250]. The numbers here are chosen from
5721 // experiments. If you changes them, make sure to test with
5722 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5723 intptr_t step_size =
5724 size_factor * IncrementalMarking::kAllocatedThreshold;
5726 if (contexts_disposed_ > 0) {
5727 if (hint >= kMaxHint) {
5728 // The embedder is requesting a lot of GC work after context disposal,
5729 // we age inline caches so that they don't keep objects from
5730 // the old context alive.
5733 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5734 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5735 incremental_marking()->IsStopped()) {
5736 HistogramTimerScope scope(isolate_->counters()->gc_context());
5737 CollectAllGarbage(kReduceMemoryFootprintMask,
5738 "idle notification: contexts disposed");
5740 AdvanceIdleIncrementalMarking(step_size);
5741 contexts_disposed_ = 0;
5743 // After context disposal there is likely a lot of garbage remaining, reset
5744 // the idle notification counters in order to trigger more incremental GCs
5745 // on subsequent idle notifications.
5750 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5751 return IdleGlobalGC();
5754 // By doing small chunks of GC work in each IdleNotification,
5755 // perform a round of incremental GCs and after that wait until
5756 // the mutator creates enough garbage to justify a new round.
5757 // An incremental GC progresses as follows:
5758 // 1. many incremental marking steps,
5759 // 2. one old space mark-sweep-compact,
5760 // 3. many lazy sweep steps.
5761 // Use mark-sweep-compact events to count incremental GCs in a round.
5763 if (incremental_marking()->IsStopped()) {
5764 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5765 !IsSweepingComplete() &&
5766 !AdvanceSweepers(static_cast<int>(step_size))) {
5771 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5772 if (EnoughGarbageSinceLastIdleRound()) {
5779 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5780 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5781 ms_count_at_last_idle_notification_ = ms_count_;
5783 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5784 mark_sweeps_since_idle_round_started_;
5786 if (remaining_mark_sweeps <= 0) {
5791 if (incremental_marking()->IsStopped()) {
5792 // If there are no more than two GCs left in this idle round and we are
5793 // allowed to do a full GC, then make those GCs full in order to compact
5795 // TODO(ulan): Once we enable code compaction for incremental marking,
5796 // we can get rid of this special case and always start incremental marking.
5797 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5798 CollectAllGarbage(kReduceMemoryFootprintMask,
5799 "idle notification: finalize idle round");
5801 incremental_marking()->Start();
5804 if (!incremental_marking()->IsStopped()) {
5805 AdvanceIdleIncrementalMarking(step_size);
5811 bool Heap::IdleGlobalGC() {
5812 static const int kIdlesBeforeScavenge = 4;
5813 static const int kIdlesBeforeMarkSweep = 7;
5814 static const int kIdlesBeforeMarkCompact = 8;
5815 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5816 static const unsigned int kGCsBetweenCleanup = 4;
5818 if (!last_idle_notification_gc_count_init_) {
5819 last_idle_notification_gc_count_ = gc_count_;
5820 last_idle_notification_gc_count_init_ = true;
5823 bool uncommit = true;
5824 bool finished = false;
5826 // Reset the number of idle notifications received when a number of
5827 // GCs have taken place. This allows another round of cleanup based
5828 // on idle notifications if enough work has been carried out to
5829 // provoke a number of garbage collections.
5830 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5831 number_idle_notifications_ =
5832 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5834 number_idle_notifications_ = 0;
5835 last_idle_notification_gc_count_ = gc_count_;
5838 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5839 CollectGarbage(NEW_SPACE, "idle notification");
5840 new_space_.Shrink();
5841 last_idle_notification_gc_count_ = gc_count_;
5842 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5843 // Before doing the mark-sweep collections we clear the
5844 // compilation cache to avoid hanging on to source code and
5845 // generated code for cached functions.
5846 isolate_->compilation_cache()->Clear();
5848 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5849 new_space_.Shrink();
5850 last_idle_notification_gc_count_ = gc_count_;
5852 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5853 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5854 new_space_.Shrink();
5855 last_idle_notification_gc_count_ = gc_count_;
5856 number_idle_notifications_ = 0;
5858 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5859 // If we have received more than kIdlesBeforeMarkCompact idle
5860 // notifications we do not perform any cleanup because we don't
5861 // expect to gain much by doing so.
5865 if (uncommit) UncommitFromSpace();
5873 void Heap::Print() {
5874 if (!HasBeenSetUp()) return;
5875 isolate()->PrintStack();
5876 AllSpaces spaces(this);
5877 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5883 void Heap::ReportCodeStatistics(const char* title) {
5884 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5885 PagedSpace::ResetCodeStatistics();
5886 // We do not look for code in new space, map space, or old space. If code
5887 // somehow ends up in those spaces, we would miss it here.
5888 code_space_->CollectCodeStatistics();
5889 lo_space_->CollectCodeStatistics();
5890 PagedSpace::ReportCodeStatistics();
5894 // This function expects that NewSpace's allocated objects histogram is
5895 // populated (via a call to CollectStatistics or else as a side effect of a
5896 // just-completed scavenge collection).
5897 void Heap::ReportHeapStatistics(const char* title) {
5899 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5901 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5902 old_gen_promotion_limit_);
5903 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5904 old_gen_allocation_limit_);
5905 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5908 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5909 isolate_->global_handles()->PrintStats();
5912 PrintF("Heap statistics : ");
5913 isolate_->memory_allocator()->ReportStatistics();
5914 PrintF("To space : ");
5915 new_space_.ReportStatistics();
5916 PrintF("Old pointer space : ");
5917 old_pointer_space_->ReportStatistics();
5918 PrintF("Old data space : ");
5919 old_data_space_->ReportStatistics();
5920 PrintF("Code space : ");
5921 code_space_->ReportStatistics();
5922 PrintF("Map space : ");
5923 map_space_->ReportStatistics();
5924 PrintF("Cell space : ");
5925 cell_space_->ReportStatistics();
5926 PrintF("Large object space : ");
5927 lo_space_->ReportStatistics();
5928 PrintF(">>>>>> ========================================= >>>>>>\n");
5933 bool Heap::Contains(HeapObject* value) {
5934 return Contains(value->address());
5938 bool Heap::Contains(Address addr) {
5939 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5940 return HasBeenSetUp() &&
5941 (new_space_.ToSpaceContains(addr) ||
5942 old_pointer_space_->Contains(addr) ||
5943 old_data_space_->Contains(addr) ||
5944 code_space_->Contains(addr) ||
5945 map_space_->Contains(addr) ||
5946 cell_space_->Contains(addr) ||
5947 lo_space_->SlowContains(addr));
5951 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5952 return InSpace(value->address(), space);
5956 bool Heap::InSpace(Address addr, AllocationSpace space) {
5957 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5958 if (!HasBeenSetUp()) return false;
5962 return new_space_.ToSpaceContains(addr);
5963 case OLD_POINTER_SPACE:
5964 return old_pointer_space_->Contains(addr);
5965 case OLD_DATA_SPACE:
5966 return old_data_space_->Contains(addr);
5968 return code_space_->Contains(addr);
5970 return map_space_->Contains(addr);
5972 return cell_space_->Contains(addr);
5974 return lo_space_->SlowContains(addr);
5982 void Heap::Verify() {
5983 CHECK(HasBeenSetUp());
5985 store_buffer()->Verify();
5987 VerifyPointersVisitor visitor;
5988 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5990 new_space_.Verify();
5992 old_pointer_space_->Verify(&visitor);
5993 map_space_->Verify(&visitor);
5995 VerifyPointersVisitor no_dirty_regions_visitor;
5996 old_data_space_->Verify(&no_dirty_regions_visitor);
5997 code_space_->Verify(&no_dirty_regions_visitor);
5998 cell_space_->Verify(&no_dirty_regions_visitor);
6000 lo_space_->Verify();
6005 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6006 Object* result = NULL;
6008 { MaybeObject* maybe_new_table =
6009 string_table()->LookupUtf8String(string, &result);
6010 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6012 // Can't use set_string_table because StringTable::cast knows that
6013 // StringTable is a singleton and checks for identity.
6014 roots_[kStringTableRootIndex] = new_table;
6015 ASSERT(result != NULL);
6020 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6021 Object* result = NULL;
6023 { MaybeObject* maybe_new_table =
6024 string_table()->LookupOneByteString(string, &result);
6025 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6027 // Can't use set_string_table because StringTable::cast knows that
6028 // StringTable is a singleton and checks for identity.
6029 roots_[kStringTableRootIndex] = new_table;
6030 ASSERT(result != NULL);
6035 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6038 Object* result = NULL;
6040 { MaybeObject* maybe_new_table =
6041 string_table()->LookupSubStringOneByteString(string,
6045 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6047 // Can't use set_string_table because StringTable::cast knows that
6048 // StringTable is a singleton and checks for identity.
6049 roots_[kStringTableRootIndex] = new_table;
6050 ASSERT(result != NULL);
6055 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6056 Object* result = NULL;
6058 { MaybeObject* maybe_new_table =
6059 string_table()->LookupTwoByteString(string, &result);
6060 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6062 // Can't use set_string_table because StringTable::cast knows that
6063 // StringTable is a singleton and checks for identity.
6064 roots_[kStringTableRootIndex] = new_table;
6065 ASSERT(result != NULL);
6070 MaybeObject* Heap::InternalizeString(String* string) {
6071 if (string->IsInternalizedString()) return string;
6072 Object* result = NULL;
6074 { MaybeObject* maybe_new_table =
6075 string_table()->LookupString(string, &result);
6076 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6078 // Can't use set_string_table because StringTable::cast knows that
6079 // StringTable is a singleton and checks for identity.
6080 roots_[kStringTableRootIndex] = new_table;
6081 ASSERT(result != NULL);
6086 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6087 if (string->IsInternalizedString()) {
6091 return string_table()->LookupStringIfExists(string, result);
6095 void Heap::ZapFromSpace() {
6096 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6097 new_space_.FromSpaceEnd());
6098 while (it.has_next()) {
6099 NewSpacePage* page = it.next();
6100 for (Address cursor = page->area_start(), limit = page->area_end();
6102 cursor += kPointerSize) {
6103 Memory::Address_at(cursor) = kFromSpaceZapValue;
6109 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6111 ObjectSlotCallback callback) {
6112 Address slot_address = start;
6114 // We are not collecting slots on new space objects during mutation
6115 // thus we have to scan for pointers to evacuation candidates when we
6116 // promote objects. But we should not record any slots in non-black
6117 // objects. Grey object's slots would be rescanned.
6118 // White object might not survive until the end of collection
6119 // it would be a violation of the invariant to record it's slots.
6120 bool record_slots = false;
6121 if (incremental_marking()->IsCompacting()) {
6122 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6123 record_slots = Marking::IsBlack(mark_bit);
6126 while (slot_address < end) {
6127 Object** slot = reinterpret_cast<Object**>(slot_address);
6128 Object* object = *slot;
6129 // If the store buffer becomes overfull we mark pages as being exempt from
6130 // the store buffer. These pages are scanned to find pointers that point
6131 // to the new space. In that case we may hit newly promoted objects and
6132 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6133 if (object->IsHeapObject()) {
6134 if (Heap::InFromSpace(object)) {
6135 callback(reinterpret_cast<HeapObject**>(slot),
6136 HeapObject::cast(object));
6137 Object* new_object = *slot;
6138 if (InNewSpace(new_object)) {
6139 SLOW_ASSERT(Heap::InToSpace(new_object));
6140 SLOW_ASSERT(new_object->IsHeapObject());
6141 store_buffer_.EnterDirectlyIntoStoreBuffer(
6142 reinterpret_cast<Address>(slot));
6144 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6145 } else if (record_slots &&
6146 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6147 mark_compact_collector()->RecordSlot(slot, slot, object);
6150 slot_address += kPointerSize;
6156 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6159 bool IsAMapPointerAddress(Object** addr) {
6160 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6161 int mod = a % Map::kSize;
6162 return mod >= Map::kPointerFieldsBeginOffset &&
6163 mod < Map::kPointerFieldsEndOffset;
6167 bool EverythingsAPointer(Object** addr) {
6172 static void CheckStoreBuffer(Heap* heap,
6175 Object**** store_buffer_position,
6176 Object*** store_buffer_top,
6177 CheckStoreBufferFilter filter,
6178 Address special_garbage_start,
6179 Address special_garbage_end) {
6180 Map* free_space_map = heap->free_space_map();
6181 for ( ; current < limit; current++) {
6182 Object* o = *current;
6183 Address current_address = reinterpret_cast<Address>(current);
6185 if (o == free_space_map) {
6186 Address current_address = reinterpret_cast<Address>(current);
6187 FreeSpace* free_space =
6188 FreeSpace::cast(HeapObject::FromAddress(current_address));
6189 int skip = free_space->Size();
6190 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6192 current_address += skip - kPointerSize;
6193 current = reinterpret_cast<Object**>(current_address);
6196 // Skip the current linear allocation space between top and limit which is
6197 // unmarked with the free space map, but can contain junk.
6198 if (current_address == special_garbage_start &&
6199 special_garbage_end != special_garbage_start) {
6200 current_address = special_garbage_end - kPointerSize;
6201 current = reinterpret_cast<Object**>(current_address);
6204 if (!(*filter)(current)) continue;
6205 ASSERT(current_address < special_garbage_start ||
6206 current_address >= special_garbage_end);
6207 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6208 // We have to check that the pointer does not point into new space
6209 // without trying to cast it to a heap object since the hash field of
6210 // a string can contain values like 1 and 3 which are tagged null
6212 if (!heap->InNewSpace(o)) continue;
6213 while (**store_buffer_position < current &&
6214 *store_buffer_position < store_buffer_top) {
6215 (*store_buffer_position)++;
6217 if (**store_buffer_position != current ||
6218 *store_buffer_position == store_buffer_top) {
6219 Object** obj_start = current;
6220 while (!(*obj_start)->IsMap()) obj_start--;
6227 // Check that the store buffer contains all intergenerational pointers by
6228 // scanning a page and ensuring that all pointers to young space are in the
6230 void Heap::OldPointerSpaceCheckStoreBuffer() {
6231 OldSpace* space = old_pointer_space();
6232 PageIterator pages(space);
6234 store_buffer()->SortUniq();
6236 while (pages.has_next()) {
6237 Page* page = pages.next();
6238 Object** current = reinterpret_cast<Object**>(page->area_start());
6240 Address end = page->area_end();
6242 Object*** store_buffer_position = store_buffer()->Start();
6243 Object*** store_buffer_top = store_buffer()->Top();
6245 Object** limit = reinterpret_cast<Object**>(end);
6246 CheckStoreBuffer(this,
6249 &store_buffer_position,
6251 &EverythingsAPointer,
6258 void Heap::MapSpaceCheckStoreBuffer() {
6259 MapSpace* space = map_space();
6260 PageIterator pages(space);
6262 store_buffer()->SortUniq();
6264 while (pages.has_next()) {
6265 Page* page = pages.next();
6266 Object** current = reinterpret_cast<Object**>(page->area_start());
6268 Address end = page->area_end();
6270 Object*** store_buffer_position = store_buffer()->Start();
6271 Object*** store_buffer_top = store_buffer()->Top();
6273 Object** limit = reinterpret_cast<Object**>(end);
6274 CheckStoreBuffer(this,
6277 &store_buffer_position,
6279 &IsAMapPointerAddress,
6286 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6287 LargeObjectIterator it(lo_space());
6288 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6289 // We only have code, sequential strings, or fixed arrays in large
6290 // object space, and only fixed arrays can possibly contain pointers to
6291 // the young generation.
6292 if (object->IsFixedArray()) {
6293 Object*** store_buffer_position = store_buffer()->Start();
6294 Object*** store_buffer_top = store_buffer()->Top();
6295 Object** current = reinterpret_cast<Object**>(object->address());
6297 reinterpret_cast<Object**>(object->address() + object->Size());
6298 CheckStoreBuffer(this,
6301 &store_buffer_position,
6303 &EverythingsAPointer,
6312 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6313 IterateStrongRoots(v, mode);
6314 IterateWeakRoots(v, mode);
6318 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6319 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6320 v->Synchronize(VisitorSynchronization::kStringTable);
6321 if (mode != VISIT_ALL_IN_SCAVENGE &&
6322 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6323 // Scavenge collections have special processing for this.
6324 external_string_table_.Iterate(v);
6325 error_object_list_.Iterate(v);
6327 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6331 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6332 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6333 v->Synchronize(VisitorSynchronization::kStrongRootList);
6335 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6336 v->Synchronize(VisitorSynchronization::kInternalizedString);
6338 isolate_->bootstrapper()->Iterate(v);
6339 v->Synchronize(VisitorSynchronization::kBootstrapper);
6340 isolate_->Iterate(v);
6341 v->Synchronize(VisitorSynchronization::kTop);
6342 Relocatable::Iterate(v);
6343 v->Synchronize(VisitorSynchronization::kRelocatable);
6345 #ifdef ENABLE_DEBUGGER_SUPPORT
6346 isolate_->debug()->Iterate(v);
6347 if (isolate_->deoptimizer_data() != NULL) {
6348 isolate_->deoptimizer_data()->Iterate(v);
6351 v->Synchronize(VisitorSynchronization::kDebug);
6352 isolate_->compilation_cache()->Iterate(v);
6353 v->Synchronize(VisitorSynchronization::kCompilationCache);
6355 // Iterate over local handles in handle scopes.
6356 isolate_->handle_scope_implementer()->Iterate(v);
6357 isolate_->IterateDeferredHandles(v);
6358 v->Synchronize(VisitorSynchronization::kHandleScope);
6360 // Iterate over the builtin code objects and code stubs in the
6361 // heap. Note that it is not necessary to iterate over code objects
6362 // on scavenge collections.
6363 if (mode != VISIT_ALL_IN_SCAVENGE) {
6364 isolate_->builtins()->IterateBuiltins(v);
6366 v->Synchronize(VisitorSynchronization::kBuiltins);
6368 // Iterate over global handles.
6370 case VISIT_ONLY_STRONG:
6371 isolate_->global_handles()->IterateStrongRoots(v);
6373 case VISIT_ALL_IN_SCAVENGE:
6374 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6376 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6378 isolate_->global_handles()->IterateAllRoots(v);
6381 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6383 // Iterate over pointers being held by inactive threads.
6384 isolate_->thread_manager()->Iterate(v);
6385 v->Synchronize(VisitorSynchronization::kThreadManager);
6387 // Iterate over the pointers the Serialization/Deserialization code is
6389 // During garbage collection this keeps the partial snapshot cache alive.
6390 // During deserialization of the startup snapshot this creates the partial
6391 // snapshot cache and deserializes the objects it refers to. During
6392 // serialization this does nothing, since the partial snapshot cache is
6393 // empty. However the next thing we do is create the partial snapshot,
6394 // filling up the partial snapshot cache with objects it needs as we go.
6395 SerializerDeserializer::Iterate(v);
6396 // We don't do a v->Synchronize call here, because in debug mode that will
6397 // output a flag to the snapshot. However at this point the serializer and
6398 // deserializer are deliberately a little unsynchronized (see above) so the
6399 // checking of the sync flag in the snapshot would fail.
6403 // TODO(1236194): Since the heap size is configurable on the command line
6404 // and through the API, we should gracefully handle the case that the heap
6405 // size is not big enough to fit all the initial objects.
6406 bool Heap::ConfigureHeap(int max_semispace_size,
6407 intptr_t max_old_gen_size,
6408 intptr_t max_executable_size) {
6409 if (HasBeenSetUp()) return false;
6411 if (FLAG_stress_compaction) {
6412 // This will cause more frequent GCs when stressing.
6413 max_semispace_size_ = Page::kPageSize;
6416 if (max_semispace_size > 0) {
6417 if (max_semispace_size < Page::kPageSize) {
6418 max_semispace_size = Page::kPageSize;
6419 if (FLAG_trace_gc) {
6420 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6421 Page::kPageSize >> 10);
6424 max_semispace_size_ = max_semispace_size;
6427 if (Snapshot::IsEnabled()) {
6428 // If we are using a snapshot we always reserve the default amount
6429 // of memory for each semispace because code in the snapshot has
6430 // write-barrier code that relies on the size and alignment of new
6431 // space. We therefore cannot use a larger max semispace size
6432 // than the default reserved semispace size.
6433 if (max_semispace_size_ > reserved_semispace_size_) {
6434 max_semispace_size_ = reserved_semispace_size_;
6435 if (FLAG_trace_gc) {
6436 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6437 reserved_semispace_size_ >> 10);
6441 // If we are not using snapshots we reserve space for the actual
6442 // max semispace size.
6443 reserved_semispace_size_ = max_semispace_size_;
6446 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6447 if (max_executable_size > 0) {
6448 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6451 // The max executable size must be less than or equal to the max old
6453 if (max_executable_size_ > max_old_generation_size_) {
6454 max_executable_size_ = max_old_generation_size_;
6457 // The new space size must be a power of two to support single-bit testing
6459 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6460 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6461 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6462 external_allocation_limit_ = 16 * max_semispace_size_;
6464 // The old generation is paged and needs at least one page for each space.
6465 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6466 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6468 RoundUp(max_old_generation_size_,
6476 bool Heap::ConfigureHeapDefault() {
6477 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6478 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6479 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6483 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6484 *stats->start_marker = HeapStats::kStartMarker;
6485 *stats->end_marker = HeapStats::kEndMarker;
6486 *stats->new_space_size = new_space_.SizeAsInt();
6487 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6488 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6489 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6490 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6491 *stats->old_data_space_capacity = old_data_space_->Capacity();
6492 *stats->code_space_size = code_space_->SizeOfObjects();
6493 *stats->code_space_capacity = code_space_->Capacity();
6494 *stats->map_space_size = map_space_->SizeOfObjects();
6495 *stats->map_space_capacity = map_space_->Capacity();
6496 *stats->cell_space_size = cell_space_->SizeOfObjects();
6497 *stats->cell_space_capacity = cell_space_->Capacity();
6498 *stats->lo_space_size = lo_space_->Size();
6499 isolate_->global_handles()->RecordStats(stats);
6500 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6501 *stats->memory_allocator_capacity =
6502 isolate()->memory_allocator()->Size() +
6503 isolate()->memory_allocator()->Available();
6504 *stats->os_error = OS::GetLastError();
6505 isolate()->memory_allocator()->Available();
6506 if (take_snapshot) {
6507 HeapIterator iterator(this);
6508 for (HeapObject* obj = iterator.next();
6510 obj = iterator.next()) {
6511 InstanceType type = obj->map()->instance_type();
6512 ASSERT(0 <= type && type <= LAST_TYPE);
6513 stats->objects_per_type[type]++;
6514 stats->size_per_type[type] += obj->Size();
6520 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6521 return old_pointer_space_->SizeOfObjects()
6522 + old_data_space_->SizeOfObjects()
6523 + code_space_->SizeOfObjects()
6524 + map_space_->SizeOfObjects()
6525 + cell_space_->SizeOfObjects()
6526 + lo_space_->SizeOfObjects();
6530 intptr_t Heap::PromotedExternalMemorySize() {
6531 if (amount_of_external_allocated_memory_
6532 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6533 return amount_of_external_allocated_memory_
6534 - amount_of_external_allocated_memory_at_last_global_gc_;
6538 V8_DECLARE_ONCE(initialize_gc_once);
6540 static void InitializeGCOnce() {
6541 InitializeScavengingVisitorsTables();
6542 NewSpaceScavenger::Initialize();
6543 MarkCompactCollector::Initialize();
6546 bool Heap::SetUp() {
6548 allocation_timeout_ = FLAG_gc_interval;
6551 // Initialize heap spaces and initial maps and objects. Whenever something
6552 // goes wrong, just return false. The caller should check the results and
6553 // call Heap::TearDown() to release allocated memory.
6555 // If the heap is not yet configured (e.g. through the API), configure it.
6556 // Configuration is based on the flags new-space-size (really the semispace
6557 // size) and old-space-size if set or the initial values of semispace_size_
6558 // and old_generation_size_ otherwise.
6560 if (!ConfigureHeapDefault()) return false;
6563 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6565 MarkMapPointersAsEncoded(false);
6567 // Set up memory allocator.
6568 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6571 // Set up new space.
6572 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6576 // Initialize old pointer space.
6577 old_pointer_space_ =
6579 max_old_generation_size_,
6582 if (old_pointer_space_ == NULL) return false;
6583 if (!old_pointer_space_->SetUp()) return false;
6585 // Initialize old data space.
6588 max_old_generation_size_,
6591 if (old_data_space_ == NULL) return false;
6592 if (!old_data_space_->SetUp()) return false;
6594 // Initialize the code space, set its maximum capacity to the old
6595 // generation size. It needs executable memory.
6596 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6597 // virtual address space, so that they can call each other with near calls.
6598 if (code_range_size_ > 0) {
6599 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6605 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6606 if (code_space_ == NULL) return false;
6607 if (!code_space_->SetUp()) return false;
6609 // Initialize map space.
6610 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6611 if (map_space_ == NULL) return false;
6612 if (!map_space_->SetUp()) return false;
6614 // Initialize global property cell space.
6615 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6616 if (cell_space_ == NULL) return false;
6617 if (!cell_space_->SetUp()) return false;
6619 // The large object code space may contain code or data. We set the memory
6620 // to be non-executable here for safety, but this means we need to enable it
6621 // explicitly when allocating large code objects.
6622 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6623 if (lo_space_ == NULL) return false;
6624 if (!lo_space_->SetUp()) return false;
6626 // Set up the seed that is used to randomize the string hash function.
6627 ASSERT(hash_seed() == 0);
6628 if (FLAG_randomize_hashes) {
6629 if (FLAG_hash_seed == 0) {
6631 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6633 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6637 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6638 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6640 store_buffer()->SetUp();
6642 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6644 relocation_mutex_locked_by_optimizer_thread_ = false;
6650 bool Heap::CreateHeapObjects() {
6651 // Create initial maps.
6652 if (!CreateInitialMaps()) return false;
6653 if (!CreateApiObjects()) return false;
6655 // Create initial objects
6656 if (!CreateInitialObjects()) return false;
6658 native_contexts_list_ = undefined_value();
6663 void Heap::SetStackLimits() {
6664 ASSERT(isolate_ != NULL);
6665 ASSERT(isolate_ == isolate());
6666 // On 64 bit machines, pointers are generally out of range of Smis. We write
6667 // something that looks like an out of range Smi to the GC.
6669 // Set up the special root array entries containing the stack limits.
6670 // These are actually addresses, but the tag makes the GC ignore it.
6671 roots_[kStackLimitRootIndex] =
6672 reinterpret_cast<Object*>(
6673 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6674 roots_[kRealStackLimitRootIndex] =
6675 reinterpret_cast<Object*>(
6676 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6680 void Heap::TearDown() {
6682 if (FLAG_verify_heap) {
6687 if (FLAG_print_cumulative_gc_stat) {
6689 PrintF("gc_count=%d ", gc_count_);
6690 PrintF("mark_sweep_count=%d ", ms_count_);
6691 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6692 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6693 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6694 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6695 get_max_alive_after_gc());
6696 PrintF("total_marking_time=%.1f ", marking_time());
6697 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6701 isolate_->global_handles()->TearDown();
6703 external_string_table_.TearDown();
6705 error_object_list_.TearDown();
6707 new_space_.TearDown();
6709 if (old_pointer_space_ != NULL) {
6710 old_pointer_space_->TearDown();
6711 delete old_pointer_space_;
6712 old_pointer_space_ = NULL;
6715 if (old_data_space_ != NULL) {
6716 old_data_space_->TearDown();
6717 delete old_data_space_;
6718 old_data_space_ = NULL;
6721 if (code_space_ != NULL) {
6722 code_space_->TearDown();
6727 if (map_space_ != NULL) {
6728 map_space_->TearDown();
6733 if (cell_space_ != NULL) {
6734 cell_space_->TearDown();
6739 if (lo_space_ != NULL) {
6740 lo_space_->TearDown();
6745 store_buffer()->TearDown();
6746 incremental_marking()->TearDown();
6748 isolate_->memory_allocator()->TearDown();
6750 delete relocation_mutex_;
6754 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6755 ASSERT(callback != NULL);
6756 GCPrologueCallbackPair pair(callback, gc_type);
6757 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6758 return gc_prologue_callbacks_.Add(pair);
6762 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6763 ASSERT(callback != NULL);
6764 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6765 if (gc_prologue_callbacks_[i].callback == callback) {
6766 gc_prologue_callbacks_.Remove(i);
6774 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6775 ASSERT(callback != NULL);
6776 GCEpilogueCallbackPair pair(callback, gc_type);
6777 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6778 return gc_epilogue_callbacks_.Add(pair);
6782 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6783 ASSERT(callback != NULL);
6784 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6785 if (gc_epilogue_callbacks_[i].callback == callback) {
6786 gc_epilogue_callbacks_.Remove(i);
6796 class PrintHandleVisitor: public ObjectVisitor {
6798 void VisitPointers(Object** start, Object** end) {
6799 for (Object** p = start; p < end; p++)
6800 PrintF(" handle %p to %p\n",
6801 reinterpret_cast<void*>(p),
6802 reinterpret_cast<void*>(*p));
6806 void Heap::PrintHandles() {
6807 PrintF("Handles:\n");
6808 PrintHandleVisitor v;
6809 isolate_->handle_scope_implementer()->Iterate(&v);
6815 Space* AllSpaces::next() {
6816 switch (counter_++) {
6818 return heap_->new_space();
6819 case OLD_POINTER_SPACE:
6820 return heap_->old_pointer_space();
6821 case OLD_DATA_SPACE:
6822 return heap_->old_data_space();
6824 return heap_->code_space();
6826 return heap_->map_space();
6828 return heap_->cell_space();
6830 return heap_->lo_space();
6837 PagedSpace* PagedSpaces::next() {
6838 switch (counter_++) {
6839 case OLD_POINTER_SPACE:
6840 return heap_->old_pointer_space();
6841 case OLD_DATA_SPACE:
6842 return heap_->old_data_space();
6844 return heap_->code_space();
6846 return heap_->map_space();
6848 return heap_->cell_space();
6856 OldSpace* OldSpaces::next() {
6857 switch (counter_++) {
6858 case OLD_POINTER_SPACE:
6859 return heap_->old_pointer_space();
6860 case OLD_DATA_SPACE:
6861 return heap_->old_data_space();
6863 return heap_->code_space();
6870 SpaceIterator::SpaceIterator(Heap* heap)
6872 current_space_(FIRST_SPACE),
6878 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6880 current_space_(FIRST_SPACE),
6882 size_func_(size_func) {
6886 SpaceIterator::~SpaceIterator() {
6887 // Delete active iterator if any.
6892 bool SpaceIterator::has_next() {
6893 // Iterate until no more spaces.
6894 return current_space_ != LAST_SPACE;
6898 ObjectIterator* SpaceIterator::next() {
6899 if (iterator_ != NULL) {
6902 // Move to the next space
6904 if (current_space_ > LAST_SPACE) {
6909 // Return iterator for the new current space.
6910 return CreateIterator();
6914 // Create an iterator for the space to iterate.
6915 ObjectIterator* SpaceIterator::CreateIterator() {
6916 ASSERT(iterator_ == NULL);
6918 switch (current_space_) {
6920 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6922 case OLD_POINTER_SPACE:
6924 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6926 case OLD_DATA_SPACE:
6927 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6930 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6933 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6936 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6939 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6943 // Return the newly allocated iterator;
6944 ASSERT(iterator_ != NULL);
6949 class HeapObjectsFilter {
6951 virtual ~HeapObjectsFilter() {}
6952 virtual bool SkipObject(HeapObject* object) = 0;
6956 class UnreachableObjectsFilter : public HeapObjectsFilter {
6958 UnreachableObjectsFilter() {
6959 MarkReachableObjects();
6962 ~UnreachableObjectsFilter() {
6963 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6966 bool SkipObject(HeapObject* object) {
6967 MarkBit mark_bit = Marking::MarkBitFrom(object);
6968 return !mark_bit.Get();
6972 class MarkingVisitor : public ObjectVisitor {
6974 MarkingVisitor() : marking_stack_(10) {}
6976 void VisitPointers(Object** start, Object** end) {
6977 for (Object** p = start; p < end; p++) {
6978 if (!(*p)->IsHeapObject()) continue;
6979 HeapObject* obj = HeapObject::cast(*p);
6980 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6981 if (!mark_bit.Get()) {
6983 marking_stack_.Add(obj);
6988 void TransitiveClosure() {
6989 while (!marking_stack_.is_empty()) {
6990 HeapObject* obj = marking_stack_.RemoveLast();
6996 List<HeapObject*> marking_stack_;
6999 void MarkReachableObjects() {
7000 Heap* heap = Isolate::Current()->heap();
7001 MarkingVisitor visitor;
7002 heap->IterateRoots(&visitor, VISIT_ALL);
7003 visitor.TransitiveClosure();
7006 AssertNoAllocation no_alloc;
7010 HeapIterator::HeapIterator(Heap* heap)
7012 filtering_(HeapIterator::kNoFiltering),
7018 HeapIterator::HeapIterator(Heap* heap,
7019 HeapIterator::HeapObjectsFiltering filtering)
7021 filtering_(filtering),
7027 HeapIterator::~HeapIterator() {
7032 void HeapIterator::Init() {
7033 // Start the iteration.
7034 space_iterator_ = new SpaceIterator(heap_);
7035 switch (filtering_) {
7036 case kFilterUnreachable:
7037 filter_ = new UnreachableObjectsFilter;
7042 object_iterator_ = space_iterator_->next();
7046 void HeapIterator::Shutdown() {
7048 // Assert that in filtering mode we have iterated through all
7049 // objects. Otherwise, heap will be left in an inconsistent state.
7050 if (filtering_ != kNoFiltering) {
7051 ASSERT(object_iterator_ == NULL);
7054 // Make sure the last iterator is deallocated.
7055 delete space_iterator_;
7056 space_iterator_ = NULL;
7057 object_iterator_ = NULL;
7063 HeapObject* HeapIterator::next() {
7064 if (filter_ == NULL) return NextObject();
7066 HeapObject* obj = NextObject();
7067 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7072 HeapObject* HeapIterator::NextObject() {
7073 // No iterator means we are done.
7074 if (object_iterator_ == NULL) return NULL;
7076 if (HeapObject* obj = object_iterator_->next_object()) {
7077 // If the current iterator has more objects we are fine.
7080 // Go though the spaces looking for one that has objects.
7081 while (space_iterator_->has_next()) {
7082 object_iterator_ = space_iterator_->next();
7083 if (HeapObject* obj = object_iterator_->next_object()) {
7088 // Done with the last space.
7089 object_iterator_ = NULL;
7094 void HeapIterator::reset() {
7095 // Restart the iterator.
7103 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7105 class PathTracer::MarkVisitor: public ObjectVisitor {
7107 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7108 void VisitPointers(Object** start, Object** end) {
7109 // Scan all HeapObject pointers in [start, end)
7110 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7111 if ((*p)->IsHeapObject())
7112 tracer_->MarkRecursively(p, this);
7117 PathTracer* tracer_;
7121 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7123 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7124 void VisitPointers(Object** start, Object** end) {
7125 // Scan all HeapObject pointers in [start, end)
7126 for (Object** p = start; p < end; p++) {
7127 if ((*p)->IsHeapObject())
7128 tracer_->UnmarkRecursively(p, this);
7133 PathTracer* tracer_;
7137 void PathTracer::VisitPointers(Object** start, Object** end) {
7138 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7139 // Visit all HeapObject pointers in [start, end)
7140 for (Object** p = start; !done && (p < end); p++) {
7141 if ((*p)->IsHeapObject()) {
7143 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7149 void PathTracer::Reset() {
7150 found_target_ = false;
7151 object_stack_.Clear();
7155 void PathTracer::TracePathFrom(Object** root) {
7156 ASSERT((search_target_ == kAnyGlobalObject) ||
7157 search_target_->IsHeapObject());
7158 found_target_in_trace_ = false;
7161 MarkVisitor mark_visitor(this);
7162 MarkRecursively(root, &mark_visitor);
7164 UnmarkVisitor unmark_visitor(this);
7165 UnmarkRecursively(root, &unmark_visitor);
7171 static bool SafeIsNativeContext(HeapObject* obj) {
7172 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7176 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7177 if (!(*p)->IsHeapObject()) return;
7179 HeapObject* obj = HeapObject::cast(*p);
7181 Object* map = obj->map();
7183 if (!map->IsHeapObject()) return; // visited before
7185 if (found_target_in_trace_) return; // stop if target found
7186 object_stack_.Add(obj);
7187 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7188 (obj == search_target_)) {
7189 found_target_in_trace_ = true;
7190 found_target_ = true;
7194 bool is_native_context = SafeIsNativeContext(obj);
7197 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7199 Address map_addr = map_p->address();
7201 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7203 // Scan the object body.
7204 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7205 // This is specialized to scan Context's properly.
7206 Object** start = reinterpret_cast<Object**>(obj->address() +
7207 Context::kHeaderSize);
7208 Object** end = reinterpret_cast<Object**>(obj->address() +
7209 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7210 mark_visitor->VisitPointers(start, end);
7212 obj->IterateBody(map_p->instance_type(),
7213 obj->SizeFromMap(map_p),
7217 // Scan the map after the body because the body is a lot more interesting
7218 // when doing leak detection.
7219 MarkRecursively(&map, mark_visitor);
7221 if (!found_target_in_trace_) // don't pop if found the target
7222 object_stack_.RemoveLast();
7226 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7227 if (!(*p)->IsHeapObject()) return;
7229 HeapObject* obj = HeapObject::cast(*p);
7231 Object* map = obj->map();
7233 if (map->IsHeapObject()) return; // unmarked already
7235 Address map_addr = reinterpret_cast<Address>(map);
7237 map_addr -= kMarkTag;
7239 ASSERT_TAG_ALIGNED(map_addr);
7241 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7243 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7245 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7247 obj->IterateBody(Map::cast(map_p)->instance_type(),
7248 obj->SizeFromMap(Map::cast(map_p)),
7253 void PathTracer::ProcessResults() {
7254 if (found_target_) {
7255 PrintF("=====================================\n");
7256 PrintF("==== Path to object ====\n");
7257 PrintF("=====================================\n\n");
7259 ASSERT(!object_stack_.is_empty());
7260 for (int i = 0; i < object_stack_.length(); i++) {
7261 if (i > 0) PrintF("\n |\n |\n V\n\n");
7262 Object* obj = object_stack_[i];
7265 PrintF("=====================================\n");
7270 // Triggers a depth-first traversal of reachable objects from one
7271 // given root object and finds a path to a specific heap object and
7273 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7274 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7275 tracer.VisitPointer(&root);
7279 // Triggers a depth-first traversal of reachable objects from roots
7280 // and finds a path to a specific heap object and prints it.
7281 void Heap::TracePathToObject(Object* target) {
7282 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7283 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7287 // Triggers a depth-first traversal of reachable objects from roots
7288 // and finds a path to any global object and prints it. Useful for
7289 // determining the source for leaks of global objects.
7290 void Heap::TracePathToGlobal() {
7291 PathTracer tracer(PathTracer::kAnyGlobalObject,
7292 PathTracer::FIND_ALL,
7294 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7299 static intptr_t CountTotalHolesSize(Heap* heap) {
7300 intptr_t holes_size = 0;
7301 OldSpaces spaces(heap);
7302 for (OldSpace* space = spaces.next();
7304 space = spaces.next()) {
7305 holes_size += space->Waste() + space->Available();
7311 GCTracer::GCTracer(Heap* heap,
7312 const char* gc_reason,
7313 const char* collector_reason)
7315 start_object_size_(0),
7316 start_memory_size_(0),
7319 allocated_since_last_gc_(0),
7320 spent_in_mutator_(0),
7321 promoted_objects_size_(0),
7322 nodes_died_in_new_space_(0),
7323 nodes_copied_in_new_space_(0),
7326 gc_reason_(gc_reason),
7327 collector_reason_(collector_reason) {
7328 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7329 start_time_ = OS::TimeCurrentMillis();
7330 start_object_size_ = heap_->SizeOfObjects();
7331 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7333 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7337 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7339 allocated_since_last_gc_ =
7340 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7342 if (heap_->last_gc_end_timestamp_ > 0) {
7343 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7346 steps_count_ = heap_->incremental_marking()->steps_count();
7347 steps_took_ = heap_->incremental_marking()->steps_took();
7348 longest_step_ = heap_->incremental_marking()->longest_step();
7349 steps_count_since_last_gc_ =
7350 heap_->incremental_marking()->steps_count_since_last_gc();
7351 steps_took_since_last_gc_ =
7352 heap_->incremental_marking()->steps_took_since_last_gc();
7356 GCTracer::~GCTracer() {
7357 // Printf ONE line iff flag is set.
7358 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7360 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7362 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7363 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7365 double time = heap_->last_gc_end_timestamp_ - start_time_;
7367 // Update cumulative GC statistics if required.
7368 if (FLAG_print_cumulative_gc_stat) {
7369 heap_->total_gc_time_ms_ += time;
7370 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7371 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7372 heap_->alive_after_last_gc_);
7374 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7377 } else if (FLAG_trace_gc_verbose) {
7378 heap_->total_gc_time_ms_ += time;
7381 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7383 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7385 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7386 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7388 if (!FLAG_trace_gc_nvp) {
7389 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7391 double end_memory_size_mb =
7392 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7394 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7396 static_cast<double>(start_object_size_) / MB,
7397 static_cast<double>(start_memory_size_) / MB,
7398 SizeOfHeapObjects(),
7399 end_memory_size_mb);
7401 if (external_time > 0) PrintF("%d / ", external_time);
7402 PrintF("%.1f ms", time);
7403 if (steps_count_ > 0) {
7404 if (collector_ == SCAVENGER) {
7405 PrintF(" (+ %.1f ms in %d steps since last GC)",
7406 steps_took_since_last_gc_,
7407 steps_count_since_last_gc_);
7409 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7410 "biggest step %.1f ms)",
7417 if (gc_reason_ != NULL) {
7418 PrintF(" [%s]", gc_reason_);
7421 if (collector_reason_ != NULL) {
7422 PrintF(" [%s]", collector_reason_);
7427 PrintF("pause=%.1f ", time);
7428 PrintF("mutator=%.1f ", spent_in_mutator_);
7430 switch (collector_) {
7434 case MARK_COMPACTOR:
7442 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7443 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7444 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7445 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7446 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7447 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7448 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7449 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7450 PrintF("compaction_ptrs=%.1f ",
7451 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7452 PrintF("intracompaction_ptrs=%.1f ",
7453 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7454 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7456 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7457 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7458 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7459 in_free_list_or_wasted_before_gc_);
7460 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7462 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7463 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7464 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7465 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7466 PrintF("nodes_promoted=%d ", nodes_promoted_);
7468 if (collector_ == SCAVENGER) {
7469 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7470 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7472 PrintF("stepscount=%d ", steps_count_);
7473 PrintF("stepstook=%.1f ", steps_took_);
7474 PrintF("longeststep=%.1f ", longest_step_);
7480 heap_->PrintShortHeapStatistics();
7484 const char* GCTracer::CollectorString() {
7485 switch (collector_) {
7488 case MARK_COMPACTOR:
7489 return "Mark-sweep";
7491 return "Unknown GC";
7495 int KeyedLookupCache::Hash(Map* map, Name* name) {
7496 // Uses only lower 32 bits if pointers are larger.
7497 uintptr_t addr_hash =
7498 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7499 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7503 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7504 int index = (Hash(map, name) & kHashMask);
7505 for (int i = 0; i < kEntriesPerBucket; i++) {
7506 Key& key = keys_[index + i];
7507 if ((key.map == map) && key.name->Equals(name)) {
7508 return field_offsets_[index + i];
7515 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7516 if (!name->IsUniqueName()) {
7517 String* internalized_string;
7518 if (!HEAP->InternalizeStringIfExists(
7519 String::cast(name), &internalized_string)) {
7522 name = internalized_string;
7524 // This cache is cleared only between mark compact passes, so we expect the
7525 // cache to only contain old space names.
7526 ASSERT(!HEAP->InNewSpace(name));
7528 int index = (Hash(map, name) & kHashMask);
7529 // After a GC there will be free slots, so we use them in order (this may
7530 // help to get the most frequently used one in position 0).
7531 for (int i = 0; i< kEntriesPerBucket; i++) {
7532 Key& key = keys_[index];
7533 Object* free_entry_indicator = NULL;
7534 if (key.map == free_entry_indicator) {
7537 field_offsets_[index + i] = field_offset;
7541 // No free entry found in this bucket, so we move them all down one and
7542 // put the new entry at position zero.
7543 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7544 Key& key = keys_[index + i];
7545 Key& key2 = keys_[index + i - 1];
7547 field_offsets_[index + i] = field_offsets_[index + i - 1];
7550 // Write the new first entry.
7551 Key& key = keys_[index];
7554 field_offsets_[index] = field_offset;
7558 void KeyedLookupCache::Clear() {
7559 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7563 void DescriptorLookupCache::Clear() {
7564 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7569 void Heap::GarbageCollectionGreedyCheck() {
7570 ASSERT(FLAG_gc_greedy);
7571 if (isolate_->bootstrapper()->IsActive()) return;
7572 if (disallow_allocation_failure()) return;
7573 CollectGarbage(NEW_SPACE);
7578 TranscendentalCache::SubCache::SubCache(Type t)
7580 isolate_(Isolate::Current()) {
7581 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7582 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7583 for (int i = 0; i < kCacheSize; i++) {
7584 elements_[i].in[0] = in0;
7585 elements_[i].in[1] = in1;
7586 elements_[i].output = NULL;
7591 void TranscendentalCache::Clear() {
7592 for (int i = 0; i < kNumberOfCaches; i++) {
7593 if (caches_[i] != NULL) {
7601 void ExternalStringTable::CleanUp() {
7603 for (int i = 0; i < new_space_strings_.length(); ++i) {
7604 if (new_space_strings_[i] == heap_->the_hole_value()) {
7607 if (heap_->InNewSpace(new_space_strings_[i])) {
7608 new_space_strings_[last++] = new_space_strings_[i];
7610 old_space_strings_.Add(new_space_strings_[i]);
7613 new_space_strings_.Rewind(last);
7614 new_space_strings_.Trim();
7617 for (int i = 0; i < old_space_strings_.length(); ++i) {
7618 if (old_space_strings_[i] == heap_->the_hole_value()) {
7621 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7622 old_space_strings_[last++] = old_space_strings_[i];
7624 old_space_strings_.Rewind(last);
7625 old_space_strings_.Trim();
7627 if (FLAG_verify_heap) {
7634 void ExternalStringTable::TearDown() {
7635 new_space_strings_.Free();
7636 old_space_strings_.Free();
7640 // Update all references.
7641 void ErrorObjectList::UpdateReferences() {
7642 for (int i = 0; i < list_.length(); i++) {
7643 HeapObject* object = HeapObject::cast(list_[i]);
7644 MapWord first_word = object->map_word();
7645 if (first_word.IsForwardingAddress()) {
7646 list_[i] = first_word.ToForwardingAddress();
7652 // Unforwarded objects in new space are dead and removed from the list.
7653 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7654 if (list_.is_empty()) return;
7656 int write_index = 0;
7657 for (int i = 0; i < list_.length(); i++) {
7658 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7659 if (first_word.IsForwardingAddress()) {
7660 list_[write_index++] = first_word.ToForwardingAddress();
7663 list_.Rewind(write_index);
7665 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7666 // objects in the list, just remove dead ones, as to not confuse the
7667 // loop in DeferredFormatStackTrace.
7668 for (int i = 0; i < list_.length(); i++) {
7669 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7670 list_[i] = first_word.IsForwardingAddress()
7671 ? first_word.ToForwardingAddress()
7672 : heap->the_hole_value();
7678 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7679 // If formatting the stack trace causes a GC, this method will be
7680 // recursively called. In that case, skip the recursive call, since
7681 // the loop modifies the list while iterating over it.
7682 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7684 HandleScope scope(isolate);
7685 Handle<String> stack_key = isolate->factory()->stack_string();
7686 int write_index = 0;
7687 int budget = kBudgetPerGC;
7688 for (int i = 0; i < list_.length(); i++) {
7689 Object* object = list_[i];
7690 JSFunction* getter_fun;
7692 { AssertNoAllocation assert;
7693 // Skip possible holes in the list.
7694 if (object->IsTheHole()) continue;
7695 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7696 list_[write_index++] = object;
7700 // Check whether the stack property is backed by the original getter.
7701 LookupResult lookup(isolate);
7702 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7703 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7704 Object* callback = lookup.GetCallbackObject();
7705 if (!callback->IsAccessorPair()) continue;
7706 Object* getter_obj = AccessorPair::cast(callback)->getter();
7707 if (!getter_obj->IsJSFunction()) continue;
7708 getter_fun = JSFunction::cast(getter_obj);
7709 String* key = isolate->heap()->hidden_stack_trace_string();
7710 Object* value = getter_fun->GetHiddenProperty(key);
7711 if (key != value) continue;
7715 HandleScope scope(isolate);
7716 bool has_exception = false;
7718 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7720 Handle<Object> object_handle(object, isolate);
7721 Handle<Object> getter_handle(getter_fun, isolate);
7722 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7723 ASSERT(*map == HeapObject::cast(*object_handle)->map());
7724 if (has_exception) {
7725 // Hit an exception (most likely a stack overflow).
7726 // Wrap up this pass and retry after another GC.
7727 isolate->clear_pending_exception();
7728 // We use the handle since calling the getter might have caused a GC.
7729 list_[write_index++] = *object_handle;
7733 list_.Rewind(write_index);
7739 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7740 for (int i = 0; i < list_.length(); i++) {
7741 HeapObject* object = HeapObject::cast(list_[i]);
7742 if (!Marking::MarkBitFrom(object).Get()) {
7743 list_[i] = heap->the_hole_value();
7749 void ErrorObjectList::TearDown() {
7754 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7755 chunk->set_next_chunk(chunks_queued_for_free_);
7756 chunks_queued_for_free_ = chunk;
7760 void Heap::FreeQueuedChunks() {
7761 if (chunks_queued_for_free_ == NULL) return;
7764 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7765 next = chunk->next_chunk();
7766 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7768 if (chunk->owner()->identity() == LO_SPACE) {
7769 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7770 // If FromAnyPointerAddress encounters a slot that belongs to a large
7771 // chunk queued for deletion it will fail to find the chunk because
7772 // it try to perform a search in the list of pages owned by of the large
7773 // object space and queued chunks were detached from that list.
7774 // To work around this we split large chunk into normal kPageSize aligned
7775 // pieces and initialize size, owner and flags field of every piece.
7776 // If FromAnyPointerAddress encounters a slot that belongs to one of
7777 // these smaller pieces it will treat it as a slot on a normal Page.
7778 Address chunk_end = chunk->address() + chunk->size();
7779 MemoryChunk* inner = MemoryChunk::FromAddress(
7780 chunk->address() + Page::kPageSize);
7781 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7782 while (inner <= inner_last) {
7783 // Size of a large chunk is always a multiple of
7784 // OS::AllocateAlignment() so there is always
7785 // enough space for a fake MemoryChunk header.
7786 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7787 // Guard against overflow.
7788 if (area_end < inner->address()) area_end = chunk_end;
7789 inner->SetArea(inner->address(), area_end);
7790 inner->set_size(Page::kPageSize);
7791 inner->set_owner(lo_space());
7792 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7793 inner = MemoryChunk::FromAddress(
7794 inner->address() + Page::kPageSize);
7798 isolate_->heap()->store_buffer()->Compact();
7799 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7800 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7801 next = chunk->next_chunk();
7802 isolate_->memory_allocator()->Free(chunk);
7804 chunks_queued_for_free_ = NULL;
7808 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7809 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7810 // Tag the page pointer to make it findable in the dump file.
7812 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7814 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7816 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7817 reinterpret_cast<Address>(p);
7818 remembered_unmapped_pages_index_++;
7819 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7823 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7824 memset(object_counts_, 0, sizeof(object_counts_));
7825 memset(object_sizes_, 0, sizeof(object_sizes_));
7826 if (clear_last_time_stats) {
7827 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7828 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7833 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7836 void Heap::CheckpointObjectStats() {
7837 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7838 Counters* counters = isolate()->counters();
7839 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7840 counters->count_of_##name()->Increment( \
7841 static_cast<int>(object_counts_[name])); \
7842 counters->count_of_##name()->Decrement( \
7843 static_cast<int>(object_counts_last_time_[name])); \
7844 counters->size_of_##name()->Increment( \
7845 static_cast<int>(object_sizes_[name])); \
7846 counters->size_of_##name()->Decrement( \
7847 static_cast<int>(object_sizes_last_time_[name]));
7848 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7849 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7851 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7852 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7853 counters->count_of_CODE_TYPE_##name()->Increment( \
7854 static_cast<int>(object_counts_[index])); \
7855 counters->count_of_CODE_TYPE_##name()->Decrement( \
7856 static_cast<int>(object_counts_last_time_[index])); \
7857 counters->size_of_CODE_TYPE_##name()->Increment( \
7858 static_cast<int>(object_sizes_[index])); \
7859 counters->size_of_CODE_TYPE_##name()->Decrement( \
7860 static_cast<int>(object_sizes_last_time_[index]));
7861 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7862 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7863 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7864 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7865 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7866 static_cast<int>(object_counts_[index])); \
7867 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7868 static_cast<int>(object_counts_last_time_[index])); \
7869 counters->size_of_FIXED_ARRAY_##name()->Increment( \
7870 static_cast<int>(object_sizes_[index])); \
7871 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7872 static_cast<int>(object_sizes_last_time_[index]));
7873 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7874 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7876 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7877 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7882 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7883 if (FLAG_parallel_recompilation) {
7884 heap_->relocation_mutex_->Lock();
7886 heap_->relocation_mutex_locked_by_optimizer_thread_ =
7887 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7892 } } // namespace v8::internal