1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
42 #include "objects-visiting.h"
43 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
69 #if defined(V8_TARGET_ARCH_X64)
70 #define LUMP_OF_MEMORY (2 * MB)
71 code_range_size_(512*MB),
73 #define LUMP_OF_MEMORY MB
77 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
78 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 initial_semispace_size_(Page::kPageSize),
80 max_old_generation_size_(192*MB),
81 max_executable_size_(max_old_generation_size_),
83 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 initial_semispace_size_(Page::kPageSize),
86 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
87 max_executable_size_(256l * LUMP_OF_MEMORY),
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94 survived_since_last_expansion_(0),
96 always_allocate_scope_depth_(0),
97 linear_allocation_scope_depth_(0),
98 contexts_disposed_(0),
100 flush_monomorphic_ics_(false),
101 scan_on_scavenge_pages_(0),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 gc_post_processing_depth_(0),
113 remembered_unmapped_pages_index_(0),
114 unflattened_strings_length_(0),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
119 new_space_high_promotion_mode_active_(false),
120 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
121 size_of_old_gen_at_last_old_space_gc_(0),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 store_buffer_rebuilder_(store_buffer()),
127 hidden_string_(NULL),
128 global_gc_prologue_callback_(NULL),
129 global_gc_epilogue_callback_(NULL),
130 gc_safe_size_of_old_object_(NULL),
131 total_regexp_code_generated_(0),
133 young_survivors_after_last_gc_(0),
134 high_survival_rate_period_length_(0),
135 low_survival_rate_period_length_(0),
137 previous_survival_rate_trend_(Heap::STABLE),
138 survival_rate_trend_(Heap::STABLE),
140 total_gc_time_ms_(0.0),
141 max_alive_after_gc_(0),
142 min_in_mutator_(kMaxInt),
143 alive_after_last_gc_(0),
144 last_gc_end_timestamp_(0.0),
149 incremental_marking_(this),
150 number_idle_notifications_(0),
151 last_idle_notification_gc_count_(0),
152 last_idle_notification_gc_count_init_(false),
153 mark_sweeps_since_idle_round_started_(0),
154 ms_count_at_last_idle_notification_(0),
155 gc_count_at_last_idle_gc_(0),
156 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
157 gcs_since_last_deopt_(0),
159 no_weak_embedded_maps_verification_scope_depth_(0),
161 promotion_queue_(this),
163 chunks_queued_for_free_(NULL),
164 relocation_mutex_(NULL) {
165 // Allow build-time customization of the max semispace size. Building
166 // V8 with snapshots and a non-default max semispace size is much
167 // easier if you can define it as part of the build environment.
168 #if defined(V8_MAX_SEMISPACE_SIZE)
169 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
172 intptr_t max_virtual = OS::MaxVirtualMemory();
174 if (max_virtual > 0) {
175 if (code_range_size_ > 0) {
176 // Reserve no more than 1/8 of the memory for the code range.
177 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
181 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
182 native_contexts_list_ = NULL;
183 array_buffers_list_ = Smi::FromInt(0);
184 mark_compact_collector_.heap_ = this;
185 external_string_table_.heap_ = this;
186 // Put a dummy entry in the remembered pages so we can find the list the
187 // minidump even if there are no real unmapped pages.
188 RememberUnmappedPage(NULL, false);
190 ClearObjectStats(true);
194 intptr_t Heap::Capacity() {
195 if (!HasBeenSetUp()) return 0;
197 return new_space_.Capacity() +
198 old_pointer_space_->Capacity() +
199 old_data_space_->Capacity() +
200 code_space_->Capacity() +
201 map_space_->Capacity() +
202 cell_space_->Capacity();
206 intptr_t Heap::CommittedMemory() {
207 if (!HasBeenSetUp()) return 0;
209 return new_space_.CommittedMemory() +
210 old_pointer_space_->CommittedMemory() +
211 old_data_space_->CommittedMemory() +
212 code_space_->CommittedMemory() +
213 map_space_->CommittedMemory() +
214 cell_space_->CommittedMemory() +
219 size_t Heap::CommittedPhysicalMemory() {
220 if (!HasBeenSetUp()) return 0;
222 return new_space_.CommittedPhysicalMemory() +
223 old_pointer_space_->CommittedPhysicalMemory() +
224 old_data_space_->CommittedPhysicalMemory() +
225 code_space_->CommittedPhysicalMemory() +
226 map_space_->CommittedPhysicalMemory() +
227 cell_space_->CommittedPhysicalMemory() +
228 lo_space_->CommittedPhysicalMemory();
232 intptr_t Heap::CommittedMemoryExecutable() {
233 if (!HasBeenSetUp()) return 0;
235 return isolate()->memory_allocator()->SizeExecutable();
239 intptr_t Heap::Available() {
240 if (!HasBeenSetUp()) return 0;
242 return new_space_.Available() +
243 old_pointer_space_->Available() +
244 old_data_space_->Available() +
245 code_space_->Available() +
246 map_space_->Available() +
247 cell_space_->Available();
251 bool Heap::HasBeenSetUp() {
252 return old_pointer_space_ != NULL &&
253 old_data_space_ != NULL &&
254 code_space_ != NULL &&
255 map_space_ != NULL &&
256 cell_space_ != NULL &&
261 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
262 if (IntrusiveMarking::IsMarked(object)) {
263 return IntrusiveMarking::SizeOfMarkedObject(object);
265 return object->SizeFromMap(object->map());
269 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
270 const char** reason) {
271 // Is global GC requested?
272 if (space != NEW_SPACE) {
273 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
274 *reason = "GC in old space requested";
275 return MARK_COMPACTOR;
278 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
279 *reason = "GC in old space forced by flags";
280 return MARK_COMPACTOR;
283 // Is enough data promoted to justify a global GC?
284 if (OldGenerationAllocationLimitReached()) {
285 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
286 *reason = "promotion limit reached";
287 return MARK_COMPACTOR;
290 // Have allocation in OLD and LO failed?
291 if (old_gen_exhausted_) {
292 isolate_->counters()->
293 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
294 *reason = "old generations exhausted";
295 return MARK_COMPACTOR;
298 // Is there enough space left in OLD to guarantee that a scavenge can
301 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
302 // for object promotion. It counts only the bytes that the memory
303 // allocator has not yet allocated from the OS and assigned to any space,
304 // and does not count available bytes already in the old space or code
305 // space. Undercounting is safe---we may get an unrequested full GC when
306 // a scavenge would have succeeded.
307 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
308 isolate_->counters()->
309 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
310 *reason = "scavenge might not succeed";
311 return MARK_COMPACTOR;
320 // TODO(1238405): Combine the infrastructure for --heap-stats and
321 // --log-gc to avoid the complicated preprocessor and flag testing.
322 void Heap::ReportStatisticsBeforeGC() {
323 // Heap::ReportHeapStatistics will also log NewSpace statistics when
324 // compiled --log-gc is set. The following logic is used to avoid
327 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
328 if (FLAG_heap_stats) {
329 ReportHeapStatistics("Before GC");
330 } else if (FLAG_log_gc) {
331 new_space_.ReportStatistics();
333 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
336 new_space_.CollectStatistics();
337 new_space_.ReportStatistics();
338 new_space_.ClearHistograms();
344 void Heap::PrintShortHeapStatistics() {
345 if (!FLAG_trace_gc_verbose) return;
346 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
347 ", available: %6" V8_PTR_PREFIX "d KB\n",
348 isolate_->memory_allocator()->Size() / KB,
349 isolate_->memory_allocator()->Available() / KB);
350 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
351 ", available: %6" V8_PTR_PREFIX "d KB"
352 ", committed: %6" V8_PTR_PREFIX "d KB\n",
353 new_space_.Size() / KB,
354 new_space_.Available() / KB,
355 new_space_.CommittedMemory() / KB);
356 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
357 ", available: %6" V8_PTR_PREFIX "d KB"
358 ", committed: %6" V8_PTR_PREFIX "d KB\n",
359 old_pointer_space_->SizeOfObjects() / KB,
360 old_pointer_space_->Available() / KB,
361 old_pointer_space_->CommittedMemory() / KB);
362 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
363 ", available: %6" V8_PTR_PREFIX "d KB"
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 old_data_space_->SizeOfObjects() / KB,
366 old_data_space_->Available() / KB,
367 old_data_space_->CommittedMemory() / KB);
368 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
369 ", available: %6" V8_PTR_PREFIX "d KB"
370 ", committed: %6" V8_PTR_PREFIX "d KB\n",
371 code_space_->SizeOfObjects() / KB,
372 code_space_->Available() / KB,
373 code_space_->CommittedMemory() / KB);
374 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
375 ", available: %6" V8_PTR_PREFIX "d KB"
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 map_space_->SizeOfObjects() / KB,
378 map_space_->Available() / KB,
379 map_space_->CommittedMemory() / KB);
380 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
381 ", available: %6" V8_PTR_PREFIX "d KB"
382 ", committed: %6" V8_PTR_PREFIX "d KB\n",
383 cell_space_->SizeOfObjects() / KB,
384 cell_space_->Available() / KB,
385 cell_space_->CommittedMemory() / KB);
386 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
387 ", available: %6" V8_PTR_PREFIX "d KB"
388 ", committed: %6" V8_PTR_PREFIX "d KB\n",
389 lo_space_->SizeOfObjects() / KB,
390 lo_space_->Available() / KB,
391 lo_space_->CommittedMemory() / KB);
392 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
393 ", available: %6" V8_PTR_PREFIX "d KB"
394 ", committed: %6" V8_PTR_PREFIX "d KB\n",
395 this->SizeOfObjects() / KB,
396 this->Available() / KB,
397 this->CommittedMemory() / KB);
398 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
402 // TODO(1238405): Combine the infrastructure for --heap-stats and
403 // --log-gc to avoid the complicated preprocessor and flag testing.
404 void Heap::ReportStatisticsAfterGC() {
405 // Similar to the before GC, we use some complicated logic to ensure that
406 // NewSpace statistics are logged exactly once when --log-gc is turned on.
408 if (FLAG_heap_stats) {
409 new_space_.CollectStatistics();
410 ReportHeapStatistics("After GC");
411 } else if (FLAG_log_gc) {
412 new_space_.ReportStatistics();
415 if (FLAG_log_gc) new_space_.ReportStatistics();
420 void Heap::GarbageCollectionPrologue() {
421 { AllowHeapAllocation for_the_first_part_of_prologue;
422 isolate_->transcendental_cache()->Clear();
423 ClearJSFunctionResultCaches();
425 unflattened_strings_length_ = 0;
427 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
428 mark_compact_collector()->EnableCodeFlushing(true);
432 if (FLAG_verify_heap) {
439 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
441 if (FLAG_gc_verbose) Print();
443 ReportStatisticsBeforeGC();
446 store_buffer()->GCPrologue();
450 intptr_t Heap::SizeOfObjects() {
452 AllSpaces spaces(this);
453 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454 total += space->SizeOfObjects();
460 void Heap::RepairFreeListsAfterBoot() {
461 PagedSpaces spaces(this);
462 for (PagedSpace* space = spaces.next();
464 space = spaces.next()) {
465 space->RepairFreeListsAfterBoot();
470 void Heap::GarbageCollectionEpilogue() {
471 store_buffer()->GCEpilogue();
473 // In release mode, we only zap the from space under heap verification.
474 if (Heap::ShouldZapGarbage()) {
479 if (FLAG_verify_heap) {
484 AllowHeapAllocation for_the_rest_of_the_epilogue;
487 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
488 if (FLAG_print_handles) PrintHandles();
489 if (FLAG_gc_verbose) Print();
490 if (FLAG_code_stats) ReportCodeStatistics("After GC");
492 if (FLAG_deopt_every_n_garbage_collections > 0) {
493 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
494 Deoptimizer::DeoptimizeAll(isolate());
495 gcs_since_last_deopt_ = 0;
499 isolate_->counters()->alive_after_last_gc()->Set(
500 static_cast<int>(SizeOfObjects()));
502 isolate_->counters()->string_table_capacity()->Set(
503 string_table()->Capacity());
504 isolate_->counters()->number_of_symbols()->Set(
505 string_table()->NumberOfElements());
507 if (CommittedMemory() > 0) {
508 isolate_->counters()->external_fragmentation_total()->AddSample(
509 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
511 isolate_->counters()->heap_fraction_map_space()->AddSample(
513 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
514 isolate_->counters()->heap_fraction_cell_space()->AddSample(
516 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
518 isolate_->counters()->heap_sample_total_committed()->AddSample(
519 static_cast<int>(CommittedMemory() / KB));
520 isolate_->counters()->heap_sample_total_used()->AddSample(
521 static_cast<int>(SizeOfObjects() / KB));
522 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
523 static_cast<int>(map_space()->CommittedMemory() / KB));
524 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
525 static_cast<int>(cell_space()->CommittedMemory() / KB));
528 #define UPDATE_COUNTERS_FOR_SPACE(space) \
529 isolate_->counters()->space##_bytes_available()->Set( \
530 static_cast<int>(space()->Available())); \
531 isolate_->counters()->space##_bytes_committed()->Set( \
532 static_cast<int>(space()->CommittedMemory())); \
533 isolate_->counters()->space##_bytes_used()->Set( \
534 static_cast<int>(space()->SizeOfObjects()));
535 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
536 if (space()->CommittedMemory() > 0) { \
537 isolate_->counters()->external_fragmentation_##space()->AddSample( \
538 static_cast<int>(100 - \
539 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
541 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
542 UPDATE_COUNTERS_FOR_SPACE(space) \
543 UPDATE_FRAGMENTATION_FOR_SPACE(space)
545 UPDATE_COUNTERS_FOR_SPACE(new_space)
546 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
547 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
548 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
549 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
550 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
551 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
552 #undef UPDATE_COUNTERS_FOR_SPACE
553 #undef UPDATE_FRAGMENTATION_FOR_SPACE
554 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
557 ReportStatisticsAfterGC();
559 #ifdef ENABLE_DEBUGGER_SUPPORT
560 isolate_->debug()->AfterGarbageCollection();
561 #endif // ENABLE_DEBUGGER_SUPPORT
563 error_object_list_.DeferredFormatStackTrace(isolate());
567 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
568 // Since we are ignoring the return value, the exact choice of space does
569 // not matter, so long as we do not specify NEW_SPACE, which would not
571 mark_compact_collector_.SetFlags(flags);
572 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
573 mark_compact_collector_.SetFlags(kNoGCFlags);
577 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
578 // Since we are ignoring the return value, the exact choice of space does
579 // not matter, so long as we do not specify NEW_SPACE, which would not
581 // Major GC would invoke weak handle callbacks on weakly reachable
582 // handles, but won't collect weakly reachable objects until next
583 // major GC. Therefore if we collect aggressively and weak handle callback
584 // has been invoked, we rerun major GC to release objects which become
586 // Note: as weak callbacks can execute arbitrary code, we cannot
587 // hope that eventually there will be no weak callbacks invocations.
588 // Therefore stop recollecting after several attempts.
589 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
590 kReduceMemoryFootprintMask);
591 isolate_->compilation_cache()->Clear();
592 const int kMaxNumberOfAttempts = 7;
593 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
594 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
598 mark_compact_collector()->SetFlags(kNoGCFlags);
601 incremental_marking()->UncommitMarkingDeque();
605 bool Heap::CollectGarbage(AllocationSpace space,
606 GarbageCollector collector,
607 const char* gc_reason,
608 const char* collector_reason) {
609 // The VM is in the GC state until exiting this function.
610 VMState<GC> state(isolate_);
613 // Reset the allocation timeout to the GC interval, but make sure to
614 // allow at least a few allocations after a collection. The reason
615 // for this is that we have a lot of allocation sequences and we
616 // assume that a garbage collection will allow the subsequent
617 // allocation attempts to go through.
618 allocation_timeout_ = Max(6, FLAG_gc_interval);
621 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
622 if (FLAG_trace_incremental_marking) {
623 PrintF("[IncrementalMarking] Scavenge during marking.\n");
627 if (collector == MARK_COMPACTOR &&
628 !mark_compact_collector()->abort_incremental_marking() &&
629 !incremental_marking()->IsStopped() &&
630 !incremental_marking()->should_hurry() &&
631 FLAG_incremental_marking_steps) {
632 // Make progress in incremental marking.
633 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
634 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
635 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
636 if (!incremental_marking()->IsComplete()) {
637 if (FLAG_trace_incremental_marking) {
638 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
640 collector = SCAVENGER;
641 collector_reason = "incremental marking delaying mark-sweep";
645 bool next_gc_likely_to_collect_more = false;
647 { GCTracer tracer(this, gc_reason, collector_reason);
648 ASSERT(AllowHeapAllocation::IsAllowed());
649 DisallowHeapAllocation no_allocation_during_gc;
650 GarbageCollectionPrologue();
651 // The GC count was incremented in the prologue. Tell the tracer about
653 tracer.set_gc_count(gc_count_);
655 // Tell the tracer which collector we've selected.
656 tracer.set_collector(collector);
659 HistogramTimerScope histogram_timer_scope(
660 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
661 : isolate_->counters()->gc_compactor());
662 next_gc_likely_to_collect_more =
663 PerformGarbageCollection(collector, &tracer);
666 GarbageCollectionEpilogue();
669 // Start incremental marking for the next cycle. The heap snapshot
670 // generator needs incremental marking to stay off after it aborted.
671 if (!mark_compact_collector()->abort_incremental_marking() &&
672 incremental_marking()->IsStopped() &&
673 incremental_marking()->WorthActivating() &&
674 NextGCIsLikelyToBeFull()) {
675 incremental_marking()->Start();
678 return next_gc_likely_to_collect_more;
682 void Heap::PerformScavenge() {
683 GCTracer tracer(this, NULL, NULL);
684 if (incremental_marking()->IsStopped()) {
685 PerformGarbageCollection(SCAVENGER, &tracer);
687 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
692 void Heap::MoveElements(FixedArray* array,
696 if (len == 0) return;
698 ASSERT(array->map() != HEAP->fixed_cow_array_map());
699 Object** dst_objects = array->data_start() + dst_index;
700 OS::MemMove(dst_objects,
701 array->data_start() + src_index,
703 if (!InNewSpace(array)) {
704 for (int i = 0; i < len; i++) {
705 // TODO(hpayer): check store buffer for entries
706 if (InNewSpace(dst_objects[i])) {
707 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
711 incremental_marking()->RecordWrites(array);
716 // Helper class for verifying the string table.
717 class StringTableVerifier : public ObjectVisitor {
719 void VisitPointers(Object** start, Object** end) {
720 // Visit all HeapObject pointers in [start, end).
721 for (Object** p = start; p < end; p++) {
722 if ((*p)->IsHeapObject()) {
723 // Check that the string is actually internalized.
724 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
725 (*p)->IsInternalizedString());
732 static void VerifyStringTable() {
733 StringTableVerifier verifier;
734 HEAP->string_table()->IterateElements(&verifier);
736 #endif // VERIFY_HEAP
739 static bool AbortIncrementalMarkingAndCollectGarbage(
741 AllocationSpace space,
742 const char* gc_reason = NULL) {
743 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
744 bool result = heap->CollectGarbage(space, gc_reason);
745 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
750 void Heap::ReserveSpace(
752 Address *locations_out) {
753 bool gc_performed = true;
755 static const int kThreshold = 20;
756 while (gc_performed && counter++ < kThreshold) {
757 gc_performed = false;
758 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
759 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
760 if (sizes[space] != 0) {
761 MaybeObject* allocation;
762 if (space == NEW_SPACE) {
763 allocation = new_space()->AllocateRaw(sizes[space]);
765 allocation = paged_space(space)->AllocateRaw(sizes[space]);
768 if (!allocation->To<FreeListNode>(&node)) {
769 if (space == NEW_SPACE) {
770 Heap::CollectGarbage(NEW_SPACE,
771 "failed to reserve space in the new space");
773 AbortIncrementalMarkingAndCollectGarbage(
775 static_cast<AllocationSpace>(space),
776 "failed to reserve space in paged space");
781 // Mark with a free list node, in case we have a GC before
783 node->set_size(this, sizes[space]);
784 locations_out[space] = node->address();
791 // Failed to reserve the space after several attempts.
792 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
797 void Heap::EnsureFromSpaceIsCommitted() {
798 if (new_space_.CommitFromSpaceIfNeeded()) return;
800 // Committing memory to from space failed.
801 // Memory is exhausted and we will die.
802 V8::FatalProcessOutOfMemory("Committing semi space failed.");
806 void Heap::ClearJSFunctionResultCaches() {
807 if (isolate_->bootstrapper()->IsActive()) return;
809 Object* context = native_contexts_list_;
810 while (!context->IsUndefined()) {
811 // Get the caches for this context. GC can happen when the context
812 // is not fully initialized, so the caches can be undefined.
813 Object* caches_or_undefined =
814 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
815 if (!caches_or_undefined->IsUndefined()) {
816 FixedArray* caches = FixedArray::cast(caches_or_undefined);
818 int length = caches->length();
819 for (int i = 0; i < length; i++) {
820 JSFunctionResultCache::cast(caches->get(i))->Clear();
823 // Get the next context:
824 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
829 void Heap::ClearNormalizedMapCaches() {
830 if (isolate_->bootstrapper()->IsActive() &&
831 !incremental_marking()->IsMarking()) {
835 Object* context = native_contexts_list_;
836 while (!context->IsUndefined()) {
837 // GC can happen when the context is not fully initialized,
838 // so the cache can be undefined.
840 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
841 if (!cache->IsUndefined()) {
842 NormalizedMapCache::cast(cache)->Clear();
844 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
849 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
850 double survival_rate =
851 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
852 start_new_space_size;
854 if (survival_rate > kYoungSurvivalRateHighThreshold) {
855 high_survival_rate_period_length_++;
857 high_survival_rate_period_length_ = 0;
860 if (survival_rate < kYoungSurvivalRateLowThreshold) {
861 low_survival_rate_period_length_++;
863 low_survival_rate_period_length_ = 0;
866 double survival_rate_diff = survival_rate_ - survival_rate;
868 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
869 set_survival_rate_trend(DECREASING);
870 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
871 set_survival_rate_trend(INCREASING);
873 set_survival_rate_trend(STABLE);
876 survival_rate_ = survival_rate;
879 bool Heap::PerformGarbageCollection(GarbageCollector collector,
881 bool next_gc_likely_to_collect_more = false;
883 if (collector != SCAVENGER) {
884 PROFILE(isolate_, CodeMovingGCEvent());
888 if (FLAG_verify_heap) {
894 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
897 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
898 VMState<EXTERNAL> state(isolate_);
899 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
902 EnsureFromSpaceIsCommitted();
904 int start_new_space_size = Heap::new_space()->SizeAsInt();
906 if (IsHighSurvivalRate()) {
907 // We speed up the incremental marker if it is running so that it
908 // does not fall behind the rate of promotion, which would cause a
909 // constantly growing old space.
910 incremental_marking()->NotifyOfHighPromotionRate();
913 if (collector == MARK_COMPACTOR) {
914 // Perform mark-sweep with optional compaction.
918 UpdateSurvivalRateTrend(start_new_space_size);
920 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
922 old_generation_allocation_limit_ =
923 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
925 old_gen_exhausted_ = false;
931 UpdateSurvivalRateTrend(start_new_space_size);
934 if (!new_space_high_promotion_mode_active_ &&
935 new_space_.Capacity() == new_space_.MaximumCapacity() &&
936 IsStableOrIncreasingSurvivalTrend() &&
937 IsHighSurvivalRate()) {
938 // Stable high survival rates even though young generation is at
939 // maximum capacity indicates that most objects will be promoted.
940 // To decrease scavenger pauses and final mark-sweep pauses, we
941 // have to limit maximal capacity of the young generation.
942 SetNewSpaceHighPromotionModeActive(true);
944 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
945 new_space_.InitialCapacity() / MB);
947 // Support for global pre-tenuring uses the high promotion mode as a
948 // heuristic indicator of whether to pretenure or not, we trigger
949 // deoptimization here to take advantage of pre-tenuring as soon as
951 if (FLAG_pretenuring) {
952 isolate_->stack_guard()->FullDeopt();
954 } else if (new_space_high_promotion_mode_active_ &&
955 IsStableOrDecreasingSurvivalTrend() &&
956 IsLowSurvivalRate()) {
957 // Decreasing low survival rates might indicate that the above high
958 // promotion mode is over and we should allow the young generation
960 SetNewSpaceHighPromotionModeActive(false);
962 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
963 new_space_.MaximumCapacity() / MB);
965 // Trigger deoptimization here to turn off pre-tenuring as soon as
967 if (FLAG_pretenuring) {
968 isolate_->stack_guard()->FullDeopt();
972 if (new_space_high_promotion_mode_active_ &&
973 new_space_.Capacity() > new_space_.InitialCapacity()) {
977 isolate_->counters()->objs_since_last_young()->Set(0);
979 // Callbacks that fire after this point might trigger nested GCs and
980 // restart incremental marking, the assertion can't be moved down.
981 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
983 gc_post_processing_depth_++;
984 { AllowHeapAllocation allow_allocation;
985 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
986 next_gc_likely_to_collect_more =
987 isolate_->global_handles()->PostGarbageCollectionProcessing(
990 gc_post_processing_depth_--;
992 // Update relocatables.
993 Relocatable::PostGarbageCollectionProcessing();
995 if (collector == MARK_COMPACTOR) {
996 // Register the amount of external allocated memory.
997 amount_of_external_allocated_memory_at_last_global_gc_ =
998 amount_of_external_allocated_memory_;
1002 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1003 VMState<EXTERNAL> state(isolate_);
1004 CallGCEpilogueCallbacks(gc_type);
1008 if (FLAG_verify_heap) {
1009 VerifyStringTable();
1013 return next_gc_likely_to_collect_more;
1017 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1018 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1019 global_gc_prologue_callback_();
1021 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1022 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1023 gc_prologue_callbacks_[i].callback(gc_type, flags);
1029 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1030 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1031 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1032 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1035 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1036 global_gc_epilogue_callback_();
1041 void Heap::MarkCompact(GCTracer* tracer) {
1042 gc_state_ = MARK_COMPACT;
1043 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1045 mark_compact_collector_.Prepare(tracer);
1048 tracer->set_full_gc_count(ms_count_);
1050 MarkCompactPrologue();
1052 mark_compact_collector_.CollectGarbage();
1054 LOG(isolate_, ResourceEvent("markcompact", "end"));
1056 gc_state_ = NOT_IN_GC;
1058 isolate_->counters()->objs_since_last_full()->Set(0);
1060 contexts_disposed_ = 0;
1062 flush_monomorphic_ics_ = false;
1066 void Heap::MarkCompactPrologue() {
1067 // At any old GC clear the keyed lookup cache to enable collection of unused
1069 isolate_->keyed_lookup_cache()->Clear();
1070 isolate_->context_slot_cache()->Clear();
1071 isolate_->descriptor_lookup_cache()->Clear();
1072 RegExpResultsCache::Clear(string_split_cache());
1073 RegExpResultsCache::Clear(regexp_multiple_cache());
1075 isolate_->compilation_cache()->MarkCompactPrologue();
1077 CompletelyClearInstanceofCache();
1079 FlushNumberStringCache();
1080 if (FLAG_cleanup_code_caches_at_gc) {
1081 polymorphic_code_cache()->set_cache(undefined_value());
1084 ClearNormalizedMapCaches();
1088 Object* Heap::FindCodeObject(Address a) {
1089 return isolate()->inner_pointer_to_code_cache()->
1090 GcSafeFindCodeForInnerPointer(a);
1094 // Helper class for copying HeapObjects
1095 class ScavengeVisitor: public ObjectVisitor {
1097 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1099 void VisitPointer(Object** p) { ScavengePointer(p); }
1101 void VisitPointers(Object** start, Object** end) {
1102 // Copy all HeapObject pointers in [start, end)
1103 for (Object** p = start; p < end; p++) ScavengePointer(p);
1107 void ScavengePointer(Object** p) {
1108 Object* object = *p;
1109 if (!heap_->InNewSpace(object)) return;
1110 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1111 reinterpret_cast<HeapObject*>(object));
1119 // Visitor class to verify pointers in code or data space do not point into
1121 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1123 void VisitPointers(Object** start, Object**end) {
1124 for (Object** current = start; current < end; current++) {
1125 if ((*current)->IsHeapObject()) {
1126 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1133 static void VerifyNonPointerSpacePointers() {
1134 // Verify that there are no pointers to new space in spaces where we
1135 // do not expect them.
1136 VerifyNonPointerSpacePointersVisitor v;
1137 HeapObjectIterator code_it(HEAP->code_space());
1138 for (HeapObject* object = code_it.Next();
1139 object != NULL; object = code_it.Next())
1140 object->Iterate(&v);
1142 // The old data space was normally swept conservatively so that the iterator
1143 // doesn't work, so we normally skip the next bit.
1144 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1145 HeapObjectIterator data_it(HEAP->old_data_space());
1146 for (HeapObject* object = data_it.Next();
1147 object != NULL; object = data_it.Next())
1148 object->Iterate(&v);
1151 #endif // VERIFY_HEAP
1154 void Heap::CheckNewSpaceExpansionCriteria() {
1155 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1156 survived_since_last_expansion_ > new_space_.Capacity() &&
1157 !new_space_high_promotion_mode_active_) {
1158 // Grow the size of new space if there is room to grow, enough data
1159 // has survived scavenge since the last expansion and we are not in
1160 // high promotion mode.
1162 survived_since_last_expansion_ = 0;
1167 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1168 return heap->InNewSpace(*p) &&
1169 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1173 void Heap::ScavengeStoreBufferCallback(
1176 StoreBufferEvent event) {
1177 heap->store_buffer_rebuilder_.Callback(page, event);
1181 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1182 if (event == kStoreBufferStartScanningPagesEvent) {
1183 start_of_current_page_ = NULL;
1184 current_page_ = NULL;
1185 } else if (event == kStoreBufferScanningPageEvent) {
1186 if (current_page_ != NULL) {
1187 // If this page already overflowed the store buffer during this iteration.
1188 if (current_page_->scan_on_scavenge()) {
1189 // Then we should wipe out the entries that have been added for it.
1190 store_buffer_->SetTop(start_of_current_page_);
1191 } else if (store_buffer_->Top() - start_of_current_page_ >=
1192 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1193 // Did we find too many pointers in the previous page? The heuristic is
1194 // that no page can take more then 1/5 the remaining slots in the store
1196 current_page_->set_scan_on_scavenge(true);
1197 store_buffer_->SetTop(start_of_current_page_);
1199 // In this case the page we scanned took a reasonable number of slots in
1200 // the store buffer. It has now been rehabilitated and is no longer
1201 // marked scan_on_scavenge.
1202 ASSERT(!current_page_->scan_on_scavenge());
1205 start_of_current_page_ = store_buffer_->Top();
1206 current_page_ = page;
1207 } else if (event == kStoreBufferFullEvent) {
1208 // The current page overflowed the store buffer again. Wipe out its entries
1209 // in the store buffer and mark it scan-on-scavenge again. This may happen
1210 // several times while scanning.
1211 if (current_page_ == NULL) {
1212 // Store Buffer overflowed while scanning promoted objects. These are not
1213 // in any particular page, though they are likely to be clustered by the
1214 // allocation routines.
1215 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1217 // Store Buffer overflowed while scanning a particular old space page for
1218 // pointers to new space.
1219 ASSERT(current_page_ == page);
1220 ASSERT(page != NULL);
1221 current_page_->set_scan_on_scavenge(true);
1222 ASSERT(start_of_current_page_ != store_buffer_->Top());
1223 store_buffer_->SetTop(start_of_current_page_);
1231 void PromotionQueue::Initialize() {
1232 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1233 // entries (where each is a pair of intptr_t). This allows us to simplify
1234 // the test fpr when to switch pages.
1235 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1237 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1239 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1240 emergency_stack_ = NULL;
1245 void PromotionQueue::RelocateQueueHead() {
1246 ASSERT(emergency_stack_ == NULL);
1248 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1249 intptr_t* head_start = rear_;
1250 intptr_t* head_end =
1251 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1254 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1256 emergency_stack_ = new List<Entry>(2 * entries_count);
1258 while (head_start != head_end) {
1259 int size = static_cast<int>(*(head_start++));
1260 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1261 emergency_stack_->Add(Entry(obj, size));
1267 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1269 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1271 virtual Object* RetainAs(Object* object) {
1272 if (!heap_->InFromSpace(object)) {
1276 MapWord map_word = HeapObject::cast(object)->map_word();
1277 if (map_word.IsForwardingAddress()) {
1278 return map_word.ToForwardingAddress();
1288 void Heap::Scavenge() {
1289 RelocationLock relocation_lock(this);
1292 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1295 gc_state_ = SCAVENGE;
1297 // Implements Cheney's copying algorithm
1298 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1300 // Clear descriptor cache.
1301 isolate_->descriptor_lookup_cache()->Clear();
1303 // Used for updating survived_since_last_expansion_ at function end.
1304 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1306 CheckNewSpaceExpansionCriteria();
1308 SelectScavengingVisitorsTable();
1310 incremental_marking()->PrepareForScavenge();
1312 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1313 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1315 // Flip the semispaces. After flipping, to space is empty, from space has
1318 new_space_.ResetAllocationInfo();
1320 // We need to sweep newly copied objects which can be either in the
1321 // to space or promoted to the old generation. For to-space
1322 // objects, we treat the bottom of the to space as a queue. Newly
1323 // copied and unswept objects lie between a 'front' mark and the
1324 // allocation pointer.
1326 // Promoted objects can go into various old-generation spaces, and
1327 // can be allocated internally in the spaces (from the free list).
1328 // We treat the top of the to space as a queue of addresses of
1329 // promoted objects. The addresses of newly promoted and unswept
1330 // objects lie between a 'front' mark and a 'rear' mark that is
1331 // updated as a side effect of promoting an object.
1333 // There is guaranteed to be enough room at the top of the to space
1334 // for the addresses of promoted objects: every object promoted
1335 // frees up its size in bytes from the top of the new space, and
1336 // objects are at least one pointer in size.
1337 Address new_space_front = new_space_.ToSpaceStart();
1338 promotion_queue_.Initialize();
1341 store_buffer()->Clean();
1344 ScavengeVisitor scavenge_visitor(this);
1346 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1348 // Copy objects reachable from the old generation.
1350 StoreBufferRebuildScope scope(this,
1352 &ScavengeStoreBufferCallback);
1353 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1356 // Copy objects reachable from cells by scavenging cell values directly.
1357 HeapObjectIterator cell_iterator(cell_space_);
1358 for (HeapObject* heap_object = cell_iterator.Next();
1359 heap_object != NULL;
1360 heap_object = cell_iterator.Next()) {
1361 if (heap_object->IsJSGlobalPropertyCell()) {
1362 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1363 Address value_address = cell->ValueAddress();
1364 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1368 // Copy objects reachable from the code flushing candidates list.
1369 MarkCompactCollector* collector = mark_compact_collector();
1370 if (collector->is_code_flushing_enabled()) {
1371 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1374 // Scavenge object reachable from the native contexts list directly.
1375 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1377 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1379 while (isolate()->global_handles()->IterateObjectGroups(
1380 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1381 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1383 isolate()->global_handles()->RemoveObjectGroups();
1384 isolate()->global_handles()->RemoveImplicitRefGroups();
1386 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1387 &IsUnscavengedHeapObject);
1388 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1390 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1392 UpdateNewSpaceReferencesInExternalStringTable(
1393 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1395 error_object_list_.UpdateReferencesInNewSpace(this);
1397 promotion_queue_.Destroy();
1399 if (!FLAG_watch_ic_patching) {
1400 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1402 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1404 ScavengeWeakObjectRetainer weak_object_retainer(this);
1405 ProcessWeakReferences(&weak_object_retainer);
1407 ASSERT(new_space_front == new_space_.top());
1410 new_space_.set_age_mark(new_space_.top());
1412 new_space_.LowerInlineAllocationLimit(
1413 new_space_.inline_allocation_limit_step());
1415 // Update how much has survived scavenge.
1416 IncrementYoungSurvivorsCounter(static_cast<int>(
1417 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1419 LOG(isolate_, ResourceEvent("scavenge", "end"));
1421 gc_state_ = NOT_IN_GC;
1423 scavenges_since_last_idle_round_++;
1427 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1429 MapWord first_word = HeapObject::cast(*p)->map_word();
1431 if (!first_word.IsForwardingAddress()) {
1432 // Unreachable external string can be finalized.
1433 heap->FinalizeExternalString(String::cast(*p));
1437 // String is still reachable.
1438 return String::cast(first_word.ToForwardingAddress());
1442 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1443 ExternalStringTableUpdaterCallback updater_func) {
1445 if (FLAG_verify_heap) {
1446 external_string_table_.Verify();
1450 if (external_string_table_.new_space_strings_.is_empty()) return;
1452 Object** start = &external_string_table_.new_space_strings_[0];
1453 Object** end = start + external_string_table_.new_space_strings_.length();
1454 Object** last = start;
1456 for (Object** p = start; p < end; ++p) {
1457 ASSERT(InFromSpace(*p));
1458 String* target = updater_func(this, p);
1460 if (target == NULL) continue;
1462 ASSERT(target->IsExternalString());
1464 if (InNewSpace(target)) {
1465 // String is still in new space. Update the table entry.
1469 // String got promoted. Move it to the old string list.
1470 external_string_table_.AddOldString(target);
1474 ASSERT(last <= end);
1475 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1479 void Heap::UpdateReferencesInExternalStringTable(
1480 ExternalStringTableUpdaterCallback updater_func) {
1482 // Update old space string references.
1483 if (external_string_table_.old_space_strings_.length() > 0) {
1484 Object** start = &external_string_table_.old_space_strings_[0];
1485 Object** end = start + external_string_table_.old_space_strings_.length();
1486 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1489 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1493 static Object* ProcessFunctionWeakReferences(Heap* heap,
1495 WeakObjectRetainer* retainer,
1496 bool record_slots) {
1497 Object* undefined = heap->undefined_value();
1498 Object* head = undefined;
1499 JSFunction* tail = NULL;
1500 Object* candidate = function;
1501 while (candidate != undefined) {
1502 // Check whether to keep the candidate in the list.
1503 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1504 Object* retain = retainer->RetainAs(candidate);
1505 if (retain != NULL) {
1506 if (head == undefined) {
1507 // First element in the list.
1510 // Subsequent elements in the list.
1511 ASSERT(tail != NULL);
1512 tail->set_next_function_link(retain);
1514 Object** next_function =
1515 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1516 heap->mark_compact_collector()->RecordSlot(
1517 next_function, next_function, retain);
1520 // Retained function is new tail.
1521 candidate_function = reinterpret_cast<JSFunction*>(retain);
1522 tail = candidate_function;
1524 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1526 if (retain == undefined) break;
1529 // Move to next element in the list.
1530 candidate = candidate_function->next_function_link();
1533 // Terminate the list if there is one or more elements.
1535 tail->set_next_function_link(undefined);
1542 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1543 // We don't record weak slots during marking or scavenges.
1544 // Instead we do it once when we complete mark-compact cycle.
1545 // Note that write barrier has no effect if we are already in the middle of
1546 // compacting mark-sweep cycle and we have to record slots manually.
1548 gc_state() == MARK_COMPACT &&
1549 mark_compact_collector()->is_compacting();
1550 ProcessArrayBuffers(retainer, record_slots);
1551 ProcessNativeContexts(retainer, record_slots);
1554 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1555 bool record_slots) {
1556 Object* undefined = undefined_value();
1557 Object* head = undefined;
1558 Context* tail = NULL;
1559 Object* candidate = native_contexts_list_;
1561 while (candidate != undefined) {
1562 // Check whether to keep the candidate in the list.
1563 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1564 Object* retain = retainer->RetainAs(candidate);
1565 if (retain != NULL) {
1566 if (head == undefined) {
1567 // First element in the list.
1570 // Subsequent elements in the list.
1571 ASSERT(tail != NULL);
1572 tail->set_unchecked(this,
1573 Context::NEXT_CONTEXT_LINK,
1575 UPDATE_WRITE_BARRIER);
1578 Object** next_context =
1579 HeapObject::RawField(
1580 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1581 mark_compact_collector()->RecordSlot(
1582 next_context, next_context, retain);
1585 // Retained context is new tail.
1586 candidate_context = reinterpret_cast<Context*>(retain);
1587 tail = candidate_context;
1589 if (retain == undefined) break;
1591 // Process the weak list of optimized functions for the context.
1592 Object* function_list_head =
1593 ProcessFunctionWeakReferences(
1595 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1598 candidate_context->set_unchecked(this,
1599 Context::OPTIMIZED_FUNCTIONS_LIST,
1601 UPDATE_WRITE_BARRIER);
1603 Object** optimized_functions =
1604 HeapObject::RawField(
1605 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1606 mark_compact_collector()->RecordSlot(
1607 optimized_functions, optimized_functions, function_list_head);
1611 // Move to next element in the list.
1612 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1615 // Terminate the list if there is one or more elements.
1617 tail->set_unchecked(this,
1618 Context::NEXT_CONTEXT_LINK,
1619 Heap::undefined_value(),
1620 UPDATE_WRITE_BARRIER);
1623 // Update the head of the list of contexts.
1624 native_contexts_list_ = head;
1629 struct WeakListVisitor;
1633 static Object* VisitWeakList(Object* list,
1634 MarkCompactCollector* collector,
1635 WeakObjectRetainer* retainer, bool record_slots) {
1636 Object* head = Smi::FromInt(0);
1638 while (list != Smi::FromInt(0)) {
1639 Object* retained = retainer->RetainAs(list);
1640 if (retained != NULL) {
1641 if (head == Smi::FromInt(0)) {
1644 ASSERT(tail != NULL);
1645 WeakListVisitor<T>::set_weak_next(tail, retained);
1647 Object** next_slot =
1648 HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
1649 collector->RecordSlot(next_slot, next_slot, retained);
1652 tail = reinterpret_cast<T*>(retained);
1653 WeakListVisitor<T>::VisitLiveObject(
1654 tail, collector, retainer, record_slots);
1656 list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
1659 tail->set_weak_next(Smi::FromInt(0));
1666 struct WeakListVisitor<JSTypedArray> {
1667 static void set_weak_next(JSTypedArray* obj, Object* next) {
1668 obj->set_weak_next(next);
1671 static Object* get_weak_next(JSTypedArray* obj) {
1672 return obj->weak_next();
1675 static void VisitLiveObject(JSTypedArray* obj,
1676 MarkCompactCollector* collector,
1677 WeakObjectRetainer* retainer,
1678 bool record_slots) {}
1680 static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
1685 struct WeakListVisitor<JSArrayBuffer> {
1686 static void set_weak_next(JSArrayBuffer* obj, Object* next) {
1687 obj->set_weak_next(next);
1690 static Object* get_weak_next(JSArrayBuffer* obj) {
1691 return obj->weak_next();
1694 static void VisitLiveObject(JSArrayBuffer* array_buffer,
1695 MarkCompactCollector* collector,
1696 WeakObjectRetainer* retainer,
1697 bool record_slots) {
1698 Object* typed_array_obj =
1699 VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
1700 collector, retainer, record_slots);
1701 array_buffer->set_weak_first_array(typed_array_obj);
1702 if (typed_array_obj != Smi::FromInt(0) && record_slots) {
1703 Object** slot = HeapObject::RawField(
1704 array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
1705 collector->RecordSlot(slot, slot, typed_array_obj);
1709 static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
1713 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1714 bool record_slots) {
1715 Object* array_buffer_obj =
1716 VisitWeakList<JSArrayBuffer>(array_buffers_list(),
1717 mark_compact_collector(),
1718 retainer, record_slots);
1719 set_array_buffers_list(array_buffer_obj);
1723 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1724 DisallowHeapAllocation no_allocation;
1726 // Both the external string table and the string table may contain
1727 // external strings, but neither lists them exhaustively, nor is the
1728 // intersection set empty. Therefore we iterate over the external string
1729 // table first, ignoring internalized strings, and then over the
1730 // internalized string table.
1732 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1734 explicit ExternalStringTableVisitorAdapter(
1735 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1736 virtual void VisitPointers(Object** start, Object** end) {
1737 for (Object** p = start; p < end; p++) {
1738 // Visit non-internalized external strings,
1739 // since internalized strings are listed in the string table.
1740 if (!(*p)->IsInternalizedString()) {
1741 ASSERT((*p)->IsExternalString());
1742 visitor_->VisitExternalString(Utils::ToLocal(
1743 Handle<String>(String::cast(*p))));
1748 v8::ExternalResourceVisitor* visitor_;
1749 } external_string_table_visitor(visitor);
1751 external_string_table_.Iterate(&external_string_table_visitor);
1753 class StringTableVisitorAdapter : public ObjectVisitor {
1755 explicit StringTableVisitorAdapter(
1756 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1757 virtual void VisitPointers(Object** start, Object** end) {
1758 for (Object** p = start; p < end; p++) {
1759 if ((*p)->IsExternalString()) {
1760 ASSERT((*p)->IsInternalizedString());
1761 visitor_->VisitExternalString(Utils::ToLocal(
1762 Handle<String>(String::cast(*p))));
1767 v8::ExternalResourceVisitor* visitor_;
1768 } string_table_visitor(visitor);
1770 string_table()->IterateElements(&string_table_visitor);
1774 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1776 static inline void VisitPointer(Heap* heap, Object** p) {
1777 Object* object = *p;
1778 if (!heap->InNewSpace(object)) return;
1779 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1780 reinterpret_cast<HeapObject*>(object));
1785 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1786 Address new_space_front) {
1788 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1789 // The addresses new_space_front and new_space_.top() define a
1790 // queue of unprocessed copied objects. Process them until the
1792 while (new_space_front != new_space_.top()) {
1793 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1794 HeapObject* object = HeapObject::FromAddress(new_space_front);
1796 NewSpaceScavenger::IterateBody(object->map(), object);
1799 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1803 // Promote and process all the to-be-promoted objects.
1805 StoreBufferRebuildScope scope(this,
1807 &ScavengeStoreBufferCallback);
1808 while (!promotion_queue()->is_empty()) {
1811 promotion_queue()->remove(&target, &size);
1813 // Promoted object might be already partially visited
1814 // during old space pointer iteration. Thus we search specificly
1815 // for pointers to from semispace instead of looking for pointers
1817 ASSERT(!target->IsMap());
1818 IterateAndMarkPointersToFromSpace(target->address(),
1819 target->address() + size,
1824 // Take another spin if there are now unswept objects in new space
1825 // (there are currently no more unswept promoted objects).
1826 } while (new_space_front != new_space_.top());
1828 return new_space_front;
1832 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1835 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1839 static HeapObject* EnsureDoubleAligned(Heap* heap,
1842 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1843 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1844 return HeapObject::FromAddress(object->address() + kPointerSize);
1846 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1853 enum LoggingAndProfiling {
1854 LOGGING_AND_PROFILING_ENABLED,
1855 LOGGING_AND_PROFILING_DISABLED
1859 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1862 template<MarksHandling marks_handling,
1863 LoggingAndProfiling logging_and_profiling_mode>
1864 class ScavengingVisitor : public StaticVisitorBase {
1866 static void Initialize() {
1867 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1868 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1869 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1870 table_.Register(kVisitByteArray, &EvacuateByteArray);
1871 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1872 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1874 table_.Register(kVisitNativeContext,
1875 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1876 template VisitSpecialized<Context::kSize>);
1878 table_.Register(kVisitConsString,
1879 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1880 template VisitSpecialized<ConsString::kSize>);
1882 table_.Register(kVisitSlicedString,
1883 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1884 template VisitSpecialized<SlicedString::kSize>);
1886 table_.Register(kVisitSymbol,
1887 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1888 template VisitSpecialized<Symbol::kSize>);
1890 table_.Register(kVisitSharedFunctionInfo,
1891 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1892 template VisitSpecialized<SharedFunctionInfo::kSize>);
1894 table_.Register(kVisitJSWeakMap,
1895 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1898 table_.Register(kVisitJSArrayBuffer,
1899 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1902 table_.Register(kVisitJSTypedArray,
1903 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1906 table_.Register(kVisitJSRegExp,
1907 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1910 if (marks_handling == IGNORE_MARKS) {
1911 table_.Register(kVisitJSFunction,
1912 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1913 template VisitSpecialized<JSFunction::kSize>);
1915 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1918 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1920 kVisitDataObjectGeneric>();
1922 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1924 kVisitJSObjectGeneric>();
1926 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1928 kVisitStructGeneric>();
1931 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1936 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1937 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1939 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1940 bool should_record = false;
1942 should_record = FLAG_heap_stats;
1944 should_record = should_record || FLAG_log_gc;
1945 if (should_record) {
1946 if (heap->new_space()->Contains(obj)) {
1947 heap->new_space()->RecordAllocation(obj);
1949 heap->new_space()->RecordPromotion(obj);
1954 // Helper function used by CopyObject to copy a source object to an
1955 // allocated target object and update the forwarding pointer in the source
1956 // object. Returns the target object.
1957 INLINE(static void MigrateObject(Heap* heap,
1961 // Copy the content of source to target.
1962 heap->CopyBlock(target->address(), source->address(), size);
1964 // Set the forwarding address.
1965 source->set_map_word(MapWord::FromForwardingAddress(target));
1967 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1968 // Update NewSpace stats if necessary.
1969 RecordCopiedObject(heap, target);
1970 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1971 Isolate* isolate = heap->isolate();
1972 if (isolate->logger()->is_logging_code_events() ||
1973 isolate->cpu_profiler()->is_profiling()) {
1974 if (target->IsSharedFunctionInfo()) {
1975 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1976 source->address(), target->address()));
1981 if (marks_handling == TRANSFER_MARKS) {
1982 if (Marking::TransferColor(source, target)) {
1983 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1989 template<ObjectContents object_contents,
1990 SizeRestriction size_restriction,
1992 static inline void EvacuateObject(Map* map,
1996 SLOW_ASSERT((size_restriction != SMALL) ||
1997 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1998 SLOW_ASSERT(object->Size() == object_size);
2000 int allocation_size = object_size;
2001 if (alignment != kObjectAlignment) {
2002 ASSERT(alignment == kDoubleAlignment);
2003 allocation_size += kPointerSize;
2006 Heap* heap = map->GetHeap();
2007 if (heap->ShouldBePromoted(object->address(), object_size)) {
2008 MaybeObject* maybe_result;
2010 if ((size_restriction != SMALL) &&
2011 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2012 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2015 if (object_contents == DATA_OBJECT) {
2016 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2019 heap->old_pointer_space()->AllocateRaw(allocation_size);
2023 Object* result = NULL; // Initialization to please compiler.
2024 if (maybe_result->ToObject(&result)) {
2025 HeapObject* target = HeapObject::cast(result);
2027 if (alignment != kObjectAlignment) {
2028 target = EnsureDoubleAligned(heap, target, allocation_size);
2031 // Order is important: slot might be inside of the target if target
2032 // was allocated over a dead object and slot comes from the store
2035 MigrateObject(heap, object, target, object_size);
2037 if (object_contents == POINTER_OBJECT) {
2038 if (map->instance_type() == JS_FUNCTION_TYPE) {
2039 heap->promotion_queue()->insert(
2040 target, JSFunction::kNonWeakFieldsEndOffset);
2042 heap->promotion_queue()->insert(target, object_size);
2046 heap->tracer()->increment_promoted_objects_size(object_size);
2050 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2051 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2052 Object* result = allocation->ToObjectUnchecked();
2053 HeapObject* target = HeapObject::cast(result);
2055 if (alignment != kObjectAlignment) {
2056 target = EnsureDoubleAligned(heap, target, allocation_size);
2059 // Order is important: slot might be inside of the target if target
2060 // was allocated over a dead object and slot comes from the store
2063 MigrateObject(heap, object, target, object_size);
2068 static inline void EvacuateJSFunction(Map* map,
2070 HeapObject* object) {
2071 ObjectEvacuationStrategy<POINTER_OBJECT>::
2072 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2074 HeapObject* target = *slot;
2075 MarkBit mark_bit = Marking::MarkBitFrom(target);
2076 if (Marking::IsBlack(mark_bit)) {
2077 // This object is black and it might not be rescanned by marker.
2078 // We should explicitly record code entry slot for compaction because
2079 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2080 // miss it as it is not HeapObject-tagged.
2081 Address code_entry_slot =
2082 target->address() + JSFunction::kCodeEntryOffset;
2083 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2084 map->GetHeap()->mark_compact_collector()->
2085 RecordCodeEntrySlot(code_entry_slot, code);
2090 static inline void EvacuateFixedArray(Map* map,
2092 HeapObject* object) {
2093 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2094 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2101 static inline void EvacuateFixedDoubleArray(Map* map,
2103 HeapObject* object) {
2104 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2105 int object_size = FixedDoubleArray::SizeFor(length);
2106 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2114 static inline void EvacuateByteArray(Map* map,
2116 HeapObject* object) {
2117 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2118 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2119 map, slot, object, object_size);
2123 static inline void EvacuateSeqOneByteString(Map* map,
2125 HeapObject* object) {
2126 int object_size = SeqOneByteString::cast(object)->
2127 SeqOneByteStringSize(map->instance_type());
2128 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2129 map, slot, object, object_size);
2133 static inline void EvacuateSeqTwoByteString(Map* map,
2135 HeapObject* object) {
2136 int object_size = SeqTwoByteString::cast(object)->
2137 SeqTwoByteStringSize(map->instance_type());
2138 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2139 map, slot, object, object_size);
2143 static inline bool IsShortcutCandidate(int type) {
2144 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2147 static inline void EvacuateShortcutCandidate(Map* map,
2149 HeapObject* object) {
2150 ASSERT(IsShortcutCandidate(map->instance_type()));
2152 Heap* heap = map->GetHeap();
2154 if (marks_handling == IGNORE_MARKS &&
2155 ConsString::cast(object)->unchecked_second() ==
2156 heap->empty_string()) {
2158 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2162 if (!heap->InNewSpace(first)) {
2163 object->set_map_word(MapWord::FromForwardingAddress(first));
2167 MapWord first_word = first->map_word();
2168 if (first_word.IsForwardingAddress()) {
2169 HeapObject* target = first_word.ToForwardingAddress();
2172 object->set_map_word(MapWord::FromForwardingAddress(target));
2176 heap->DoScavengeObject(first->map(), slot, first);
2177 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2181 int object_size = ConsString::kSize;
2182 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2183 map, slot, object, object_size);
2186 template<ObjectContents object_contents>
2187 class ObjectEvacuationStrategy {
2189 template<int object_size>
2190 static inline void VisitSpecialized(Map* map,
2192 HeapObject* object) {
2193 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2194 map, slot, object, object_size);
2197 static inline void Visit(Map* map,
2199 HeapObject* object) {
2200 int object_size = map->instance_size();
2201 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2202 map, slot, object, object_size);
2206 static VisitorDispatchTable<ScavengingCallback> table_;
2210 template<MarksHandling marks_handling,
2211 LoggingAndProfiling logging_and_profiling_mode>
2212 VisitorDispatchTable<ScavengingCallback>
2213 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2216 static void InitializeScavengingVisitorsTables() {
2217 ScavengingVisitor<TRANSFER_MARKS,
2218 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2220 ScavengingVisitor<TRANSFER_MARKS,
2221 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2222 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2226 void Heap::SelectScavengingVisitorsTable() {
2227 bool logging_and_profiling =
2228 isolate()->logger()->is_logging() ||
2229 isolate()->cpu_profiler()->is_profiling() ||
2230 (isolate()->heap_profiler() != NULL &&
2231 isolate()->heap_profiler()->is_profiling());
2233 if (!incremental_marking()->IsMarking()) {
2234 if (!logging_and_profiling) {
2235 scavenging_visitors_table_.CopyFrom(
2236 ScavengingVisitor<IGNORE_MARKS,
2237 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2239 scavenging_visitors_table_.CopyFrom(
2240 ScavengingVisitor<IGNORE_MARKS,
2241 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2244 if (!logging_and_profiling) {
2245 scavenging_visitors_table_.CopyFrom(
2246 ScavengingVisitor<TRANSFER_MARKS,
2247 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2249 scavenging_visitors_table_.CopyFrom(
2250 ScavengingVisitor<TRANSFER_MARKS,
2251 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2254 if (incremental_marking()->IsCompacting()) {
2255 // When compacting forbid short-circuiting of cons-strings.
2256 // Scavenging code relies on the fact that new space object
2257 // can't be evacuated into evacuation candidate but
2258 // short-circuiting violates this assumption.
2259 scavenging_visitors_table_.Register(
2260 StaticVisitorBase::kVisitShortcutCandidate,
2261 scavenging_visitors_table_.GetVisitorById(
2262 StaticVisitorBase::kVisitConsString));
2268 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2269 SLOW_ASSERT(HEAP->InFromSpace(object));
2270 MapWord first_word = object->map_word();
2271 SLOW_ASSERT(!first_word.IsForwardingAddress());
2272 Map* map = first_word.ToMap();
2273 map->GetHeap()->DoScavengeObject(map, p, object);
2277 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2278 int instance_size) {
2280 MaybeObject* maybe_result = AllocateRawMap();
2281 if (!maybe_result->ToObject(&result)) return maybe_result;
2283 // Map::cast cannot be used due to uninitialized map field.
2284 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2285 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2286 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2287 reinterpret_cast<Map*>(result)->set_visitor_id(
2288 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2289 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2290 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2291 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2292 reinterpret_cast<Map*>(result)->set_bit_field(0);
2293 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2294 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2295 Map::OwnsDescriptors::encode(true);
2296 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2301 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2303 ElementsKind elements_kind) {
2305 MaybeObject* maybe_result = AllocateRawMap();
2306 if (!maybe_result->To(&result)) return maybe_result;
2308 Map* map = reinterpret_cast<Map*>(result);
2309 map->set_map_no_write_barrier(meta_map());
2310 map->set_instance_type(instance_type);
2311 map->set_visitor_id(
2312 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2313 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2314 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2315 map->set_instance_size(instance_size);
2316 map->set_inobject_properties(0);
2317 map->set_pre_allocated_property_fields(0);
2318 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2319 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2320 SKIP_WRITE_BARRIER);
2321 map->init_back_pointer(undefined_value());
2322 map->set_unused_property_fields(0);
2323 map->set_instance_descriptors(empty_descriptor_array());
2324 map->set_bit_field(0);
2325 map->set_bit_field2(1 << Map::kIsExtensible);
2326 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2327 Map::OwnsDescriptors::encode(true);
2328 map->set_bit_field3(bit_field3);
2329 map->set_elements_kind(elements_kind);
2335 MaybeObject* Heap::AllocateCodeCache() {
2336 CodeCache* code_cache;
2337 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2338 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2340 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2341 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2346 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2347 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2351 MaybeObject* Heap::AllocateAccessorPair() {
2352 AccessorPair* accessors;
2353 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2354 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2356 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2357 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2362 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2363 TypeFeedbackInfo* info;
2364 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2365 if (!maybe_info->To(&info)) return maybe_info;
2367 info->initialize_storage();
2368 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2369 SKIP_WRITE_BARRIER);
2374 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2375 AliasedArgumentsEntry* entry;
2376 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2377 if (!maybe_entry->To(&entry)) return maybe_entry;
2379 entry->set_aliased_context_slot(aliased_context_slot);
2384 const Heap::StringTypeTable Heap::string_type_table[] = {
2385 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2386 {type, size, k##camel_name##MapRootIndex},
2387 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2388 #undef STRING_TYPE_ELEMENT
2392 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2393 #define CONSTANT_STRING_ELEMENT(name, contents) \
2394 {contents, k##name##RootIndex},
2395 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2396 #undef CONSTANT_STRING_ELEMENT
2400 const Heap::StructTable Heap::struct_table[] = {
2401 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2402 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2403 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2404 #undef STRUCT_TABLE_ELEMENT
2408 bool Heap::CreateInitialMaps() {
2410 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2411 if (!maybe_obj->ToObject(&obj)) return false;
2413 // Map::cast cannot be used due to uninitialized map field.
2414 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2415 set_meta_map(new_meta_map);
2416 new_meta_map->set_map(new_meta_map);
2418 { MaybeObject* maybe_obj =
2419 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2420 if (!maybe_obj->ToObject(&obj)) return false;
2422 set_fixed_array_map(Map::cast(obj));
2424 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2425 if (!maybe_obj->ToObject(&obj)) return false;
2427 set_oddball_map(Map::cast(obj));
2429 // Allocate the empty array.
2430 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2431 if (!maybe_obj->ToObject(&obj)) return false;
2433 set_empty_fixed_array(FixedArray::cast(obj));
2435 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2436 if (!maybe_obj->ToObject(&obj)) return false;
2438 set_null_value(Oddball::cast(obj));
2439 Oddball::cast(obj)->set_kind(Oddball::kNull);
2441 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2442 if (!maybe_obj->ToObject(&obj)) return false;
2444 set_undefined_value(Oddball::cast(obj));
2445 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2446 ASSERT(!InNewSpace(undefined_value()));
2448 // Allocate the empty descriptor array.
2449 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2450 if (!maybe_obj->ToObject(&obj)) return false;
2452 set_empty_descriptor_array(DescriptorArray::cast(obj));
2454 // Fix the instance_descriptors for the existing maps.
2455 meta_map()->set_code_cache(empty_fixed_array());
2456 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2457 meta_map()->init_back_pointer(undefined_value());
2458 meta_map()->set_instance_descriptors(empty_descriptor_array());
2460 fixed_array_map()->set_code_cache(empty_fixed_array());
2461 fixed_array_map()->set_dependent_code(
2462 DependentCode::cast(empty_fixed_array()));
2463 fixed_array_map()->init_back_pointer(undefined_value());
2464 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2466 oddball_map()->set_code_cache(empty_fixed_array());
2467 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2468 oddball_map()->init_back_pointer(undefined_value());
2469 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2471 // Fix prototype object for existing maps.
2472 meta_map()->set_prototype(null_value());
2473 meta_map()->set_constructor(null_value());
2475 fixed_array_map()->set_prototype(null_value());
2476 fixed_array_map()->set_constructor(null_value());
2478 oddball_map()->set_prototype(null_value());
2479 oddball_map()->set_constructor(null_value());
2481 { MaybeObject* maybe_obj =
2482 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2483 if (!maybe_obj->ToObject(&obj)) return false;
2485 set_fixed_cow_array_map(Map::cast(obj));
2486 ASSERT(fixed_array_map() != fixed_cow_array_map());
2488 { MaybeObject* maybe_obj =
2489 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2490 if (!maybe_obj->ToObject(&obj)) return false;
2492 set_scope_info_map(Map::cast(obj));
2494 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2495 if (!maybe_obj->ToObject(&obj)) return false;
2497 set_heap_number_map(Map::cast(obj));
2499 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2500 if (!maybe_obj->ToObject(&obj)) return false;
2502 set_symbol_map(Map::cast(obj));
2504 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2505 if (!maybe_obj->ToObject(&obj)) return false;
2507 set_foreign_map(Map::cast(obj));
2509 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2510 const StringTypeTable& entry = string_type_table[i];
2511 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2512 if (!maybe_obj->ToObject(&obj)) return false;
2514 roots_[entry.index] = Map::cast(obj);
2517 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2518 if (!maybe_obj->ToObject(&obj)) return false;
2520 set_undetectable_string_map(Map::cast(obj));
2521 Map::cast(obj)->set_is_undetectable();
2523 { MaybeObject* maybe_obj =
2524 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2525 if (!maybe_obj->ToObject(&obj)) return false;
2527 set_undetectable_ascii_string_map(Map::cast(obj));
2528 Map::cast(obj)->set_is_undetectable();
2530 { MaybeObject* maybe_obj =
2531 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_fixed_double_array_map(Map::cast(obj));
2536 { MaybeObject* maybe_obj =
2537 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2538 if (!maybe_obj->ToObject(&obj)) return false;
2540 set_byte_array_map(Map::cast(obj));
2542 { MaybeObject* maybe_obj =
2543 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2544 if (!maybe_obj->ToObject(&obj)) return false;
2546 set_free_space_map(Map::cast(obj));
2548 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2549 if (!maybe_obj->ToObject(&obj)) return false;
2551 set_empty_byte_array(ByteArray::cast(obj));
2553 { MaybeObject* maybe_obj =
2554 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2555 if (!maybe_obj->ToObject(&obj)) return false;
2557 set_external_pixel_array_map(Map::cast(obj));
2559 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2560 ExternalArray::kAlignedSize);
2561 if (!maybe_obj->ToObject(&obj)) return false;
2563 set_external_byte_array_map(Map::cast(obj));
2565 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2566 ExternalArray::kAlignedSize);
2567 if (!maybe_obj->ToObject(&obj)) return false;
2569 set_external_unsigned_byte_array_map(Map::cast(obj));
2571 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2572 ExternalArray::kAlignedSize);
2573 if (!maybe_obj->ToObject(&obj)) return false;
2575 set_external_short_array_map(Map::cast(obj));
2577 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2578 ExternalArray::kAlignedSize);
2579 if (!maybe_obj->ToObject(&obj)) return false;
2581 set_external_unsigned_short_array_map(Map::cast(obj));
2583 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2584 ExternalArray::kAlignedSize);
2585 if (!maybe_obj->ToObject(&obj)) return false;
2587 set_external_int_array_map(Map::cast(obj));
2589 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2590 ExternalArray::kAlignedSize);
2591 if (!maybe_obj->ToObject(&obj)) return false;
2593 set_external_unsigned_int_array_map(Map::cast(obj));
2595 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2596 ExternalArray::kAlignedSize);
2597 if (!maybe_obj->ToObject(&obj)) return false;
2599 set_external_float_array_map(Map::cast(obj));
2601 { MaybeObject* maybe_obj =
2602 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2603 if (!maybe_obj->ToObject(&obj)) return false;
2605 set_non_strict_arguments_elements_map(Map::cast(obj));
2607 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2608 ExternalArray::kAlignedSize);
2609 if (!maybe_obj->ToObject(&obj)) return false;
2611 set_external_double_array_map(Map::cast(obj));
2613 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2614 if (!maybe_obj->ToObject(&obj)) return false;
2616 set_empty_external_byte_array(ExternalArray::cast(obj));
2618 { MaybeObject* maybe_obj =
2619 AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2620 if (!maybe_obj->ToObject(&obj)) return false;
2622 set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2624 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2625 if (!maybe_obj->ToObject(&obj)) return false;
2627 set_empty_external_short_array(ExternalArray::cast(obj));
2629 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2630 kExternalUnsignedShortArray);
2631 if (!maybe_obj->ToObject(&obj)) return false;
2633 set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2635 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2636 if (!maybe_obj->ToObject(&obj)) return false;
2638 set_empty_external_int_array(ExternalArray::cast(obj));
2640 { MaybeObject* maybe_obj =
2641 AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2642 if (!maybe_obj->ToObject(&obj)) return false;
2644 set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2646 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2647 if (!maybe_obj->ToObject(&obj)) return false;
2649 set_empty_external_float_array(ExternalArray::cast(obj));
2651 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2652 if (!maybe_obj->ToObject(&obj)) return false;
2654 set_empty_external_double_array(ExternalArray::cast(obj));
2656 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2657 if (!maybe_obj->ToObject(&obj)) return false;
2659 set_empty_external_pixel_array(ExternalArray::cast(obj));
2661 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2662 if (!maybe_obj->ToObject(&obj)) return false;
2664 set_code_map(Map::cast(obj));
2666 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2667 JSGlobalPropertyCell::kSize);
2668 if (!maybe_obj->ToObject(&obj)) return false;
2670 set_global_property_cell_map(Map::cast(obj));
2672 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2673 if (!maybe_obj->ToObject(&obj)) return false;
2675 set_one_pointer_filler_map(Map::cast(obj));
2677 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2678 if (!maybe_obj->ToObject(&obj)) return false;
2680 set_two_pointer_filler_map(Map::cast(obj));
2682 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2683 const StructTable& entry = struct_table[i];
2684 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2685 if (!maybe_obj->ToObject(&obj)) return false;
2687 roots_[entry.index] = Map::cast(obj);
2690 { MaybeObject* maybe_obj =
2691 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2692 if (!maybe_obj->ToObject(&obj)) return false;
2694 set_hash_table_map(Map::cast(obj));
2696 { MaybeObject* maybe_obj =
2697 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2698 if (!maybe_obj->ToObject(&obj)) return false;
2700 set_function_context_map(Map::cast(obj));
2702 { MaybeObject* maybe_obj =
2703 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2704 if (!maybe_obj->ToObject(&obj)) return false;
2706 set_catch_context_map(Map::cast(obj));
2708 { MaybeObject* maybe_obj =
2709 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2710 if (!maybe_obj->ToObject(&obj)) return false;
2712 set_with_context_map(Map::cast(obj));
2714 { MaybeObject* maybe_obj =
2715 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2716 if (!maybe_obj->ToObject(&obj)) return false;
2718 set_block_context_map(Map::cast(obj));
2720 { MaybeObject* maybe_obj =
2721 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2722 if (!maybe_obj->ToObject(&obj)) return false;
2724 set_module_context_map(Map::cast(obj));
2726 { MaybeObject* maybe_obj =
2727 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2728 if (!maybe_obj->ToObject(&obj)) return false;
2730 set_global_context_map(Map::cast(obj));
2732 { MaybeObject* maybe_obj =
2733 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2734 if (!maybe_obj->ToObject(&obj)) return false;
2736 Map* native_context_map = Map::cast(obj);
2737 native_context_map->set_dictionary_map(true);
2738 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2739 set_native_context_map(native_context_map);
2741 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2742 SharedFunctionInfo::kAlignedSize);
2743 if (!maybe_obj->ToObject(&obj)) return false;
2745 set_shared_function_info_map(Map::cast(obj));
2747 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2748 JSMessageObject::kSize);
2749 if (!maybe_obj->ToObject(&obj)) return false;
2751 set_message_object_map(Map::cast(obj));
2754 { MaybeObject* maybe_obj =
2755 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2756 if (!maybe_obj->To(&external_map)) return false;
2758 external_map->set_is_extensible(false);
2759 set_external_map(external_map);
2761 ASSERT(!InNewSpace(empty_fixed_array()));
2766 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2767 // Statically ensure that it is safe to allocate heap numbers in paged
2769 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2770 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2773 { MaybeObject* maybe_result =
2774 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2775 if (!maybe_result->ToObject(&result)) return maybe_result;
2778 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2779 HeapNumber::cast(result)->set_value(value);
2784 MaybeObject* Heap::AllocateHeapNumber(double value) {
2785 // Use general version, if we're forced to always allocate.
2786 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2788 // This version of AllocateHeapNumber is optimized for
2789 // allocation in new space.
2790 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2792 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2793 if (!maybe_result->ToObject(&result)) return maybe_result;
2795 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2796 HeapNumber::cast(result)->set_value(value);
2801 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2803 { MaybeObject* maybe_result = AllocateRawCell();
2804 if (!maybe_result->ToObject(&result)) return maybe_result;
2806 HeapObject::cast(result)->set_map_no_write_barrier(
2807 global_property_cell_map());
2808 JSGlobalPropertyCell::cast(result)->set_value(value);
2813 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2815 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2816 if (!maybe_result->To(&result)) return maybe_result;
2817 result->set_value(value);
2822 MaybeObject* Heap::CreateOddball(const char* to_string,
2826 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2827 if (!maybe_result->ToObject(&result)) return maybe_result;
2829 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2833 bool Heap::CreateApiObjects() {
2836 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2837 if (!maybe_obj->ToObject(&obj)) return false;
2839 // Don't use Smi-only elements optimizations for objects with the neander
2840 // map. There are too many cases where element values are set directly with a
2841 // bottleneck to trap the Smi-only -> fast elements transition, and there
2842 // appears to be no benefit for optimize this case.
2843 Map* new_neander_map = Map::cast(obj);
2844 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2845 set_neander_map(new_neander_map);
2847 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2848 if (!maybe_obj->ToObject(&obj)) return false;
2851 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2852 if (!maybe_elements->ToObject(&elements)) return false;
2854 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2855 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2856 set_message_listeners(JSObject::cast(obj));
2862 void Heap::CreateJSEntryStub() {
2864 set_js_entry_code(*stub.GetCode(isolate()));
2868 void Heap::CreateJSConstructEntryStub() {
2869 JSConstructEntryStub stub;
2870 set_js_construct_entry_code(*stub.GetCode(isolate()));
2874 void Heap::CreateFixedStubs() {
2875 // Here we create roots for fixed stubs. They are needed at GC
2876 // for cooking and uncooking (check out frames.cc).
2877 // The eliminates the need for doing dictionary lookup in the
2878 // stub cache for these stubs.
2879 HandleScope scope(isolate());
2880 // gcc-4.4 has problem generating correct code of following snippet:
2881 // { JSEntryStub stub;
2882 // js_entry_code_ = *stub.GetCode();
2884 // { JSConstructEntryStub stub;
2885 // js_construct_entry_code_ = *stub.GetCode();
2887 // To workaround the problem, make separate functions without inlining.
2888 Heap::CreateJSEntryStub();
2889 Heap::CreateJSConstructEntryStub();
2891 // Create stubs that should be there, so we don't unexpectedly have to
2892 // create them if we need them during the creation of another stub.
2893 // Stub creation mixes raw pointers and handles in an unsafe manner so
2894 // we cannot create stubs while we are creating stubs.
2895 CodeStub::GenerateStubsAheadOfTime(isolate());
2899 bool Heap::CreateInitialObjects() {
2902 // The -0 value must be set before NumberFromDouble works.
2903 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2904 if (!maybe_obj->ToObject(&obj)) return false;
2906 set_minus_zero_value(HeapNumber::cast(obj));
2907 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2909 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2910 if (!maybe_obj->ToObject(&obj)) return false;
2912 set_nan_value(HeapNumber::cast(obj));
2914 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2915 if (!maybe_obj->ToObject(&obj)) return false;
2917 set_infinity_value(HeapNumber::cast(obj));
2919 // The hole has not been created yet, but we want to put something
2920 // predictable in the gaps in the string table, so lets make that Smi zero.
2921 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2923 // Allocate initial string table.
2924 { MaybeObject* maybe_obj =
2925 StringTable::Allocate(this, kInitialStringTableSize);
2926 if (!maybe_obj->ToObject(&obj)) return false;
2928 // Don't use set_string_table() due to asserts.
2929 roots_[kStringTableRootIndex] = obj;
2931 // Finish initializing oddballs after creating the string table.
2932 { MaybeObject* maybe_obj =
2933 undefined_value()->Initialize("undefined",
2935 Oddball::kUndefined);
2936 if (!maybe_obj->ToObject(&obj)) return false;
2939 // Initialize the null_value.
2940 { MaybeObject* maybe_obj =
2941 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2942 if (!maybe_obj->ToObject(&obj)) return false;
2945 { MaybeObject* maybe_obj = CreateOddball("true",
2948 if (!maybe_obj->ToObject(&obj)) return false;
2950 set_true_value(Oddball::cast(obj));
2952 { MaybeObject* maybe_obj = CreateOddball("false",
2955 if (!maybe_obj->ToObject(&obj)) return false;
2957 set_false_value(Oddball::cast(obj));
2959 { MaybeObject* maybe_obj = CreateOddball("hole",
2962 if (!maybe_obj->ToObject(&obj)) return false;
2964 set_the_hole_value(Oddball::cast(obj));
2966 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
2968 Oddball::kUninitialized);
2969 if (!maybe_obj->ToObject(&obj)) return false;
2971 set_uninitialized_value(Oddball::cast(obj));
2973 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2975 Oddball::kArgumentMarker);
2976 if (!maybe_obj->ToObject(&obj)) return false;
2978 set_arguments_marker(Oddball::cast(obj));
2980 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2983 if (!maybe_obj->ToObject(&obj)) return false;
2985 set_no_interceptor_result_sentinel(obj);
2987 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2990 if (!maybe_obj->ToObject(&obj)) return false;
2992 set_termination_exception(obj);
2994 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2995 { MaybeObject* maybe_obj =
2996 InternalizeUtf8String(constant_string_table[i].contents);
2997 if (!maybe_obj->ToObject(&obj)) return false;
2999 roots_[constant_string_table[i].index] = String::cast(obj);
3002 // Allocate the hidden string which is used to identify the hidden properties
3003 // in JSObjects. The hash code has a special value so that it will not match
3004 // the empty string when searching for the property. It cannot be part of the
3005 // loop above because it needs to be allocated manually with the special
3006 // hash code in place. The hash code for the hidden_string is zero to ensure
3007 // that it will always be at the first entry in property descriptors.
3008 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3009 OneByteVector("", 0), String::kEmptyStringHash);
3010 if (!maybe_obj->ToObject(&obj)) return false;
3012 hidden_string_ = String::cast(obj);
3014 // Allocate the code_stubs dictionary. The initial size is set to avoid
3015 // expanding the dictionary during bootstrapping.
3016 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3017 if (!maybe_obj->ToObject(&obj)) return false;
3019 set_code_stubs(UnseededNumberDictionary::cast(obj));
3022 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3023 // is set to avoid expanding the dictionary during bootstrapping.
3024 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3025 if (!maybe_obj->ToObject(&obj)) return false;
3027 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3029 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3030 if (!maybe_obj->ToObject(&obj)) return false;
3032 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3034 set_instanceof_cache_function(Smi::FromInt(0));
3035 set_instanceof_cache_map(Smi::FromInt(0));
3036 set_instanceof_cache_answer(Smi::FromInt(0));
3040 // Allocate the dictionary of intrinsic function names.
3041 { MaybeObject* maybe_obj =
3042 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3043 if (!maybe_obj->ToObject(&obj)) return false;
3045 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3047 if (!maybe_obj->ToObject(&obj)) return false;
3049 set_intrinsic_function_names(NameDictionary::cast(obj));
3051 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3052 if (!maybe_obj->ToObject(&obj)) return false;
3054 set_number_string_cache(FixedArray::cast(obj));
3056 // Allocate cache for single character one byte strings.
3057 { MaybeObject* maybe_obj =
3058 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3059 if (!maybe_obj->ToObject(&obj)) return false;
3061 set_single_character_string_cache(FixedArray::cast(obj));
3063 // Allocate cache for string split.
3064 { MaybeObject* maybe_obj = AllocateFixedArray(
3065 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3066 if (!maybe_obj->ToObject(&obj)) return false;
3068 set_string_split_cache(FixedArray::cast(obj));
3070 { MaybeObject* maybe_obj = AllocateFixedArray(
3071 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3072 if (!maybe_obj->ToObject(&obj)) return false;
3074 set_regexp_multiple_cache(FixedArray::cast(obj));
3076 // Allocate cache for external strings pointing to native source code.
3077 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3078 if (!maybe_obj->ToObject(&obj)) return false;
3080 set_natives_source_cache(FixedArray::cast(obj));
3082 // Allocate object to hold object observation state.
3083 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3084 if (!maybe_obj->ToObject(&obj)) return false;
3086 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3087 if (!maybe_obj->ToObject(&obj)) return false;
3089 set_observation_state(JSObject::cast(obj));
3091 { MaybeObject* maybe_obj = AllocateSymbol();
3092 if (!maybe_obj->ToObject(&obj)) return false;
3094 set_frozen_symbol(Symbol::cast(obj));
3096 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3097 if (!maybe_obj->ToObject(&obj)) return false;
3099 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3100 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3102 // Handling of script id generation is in Factory::NewScript.
3103 set_last_script_id(undefined_value());
3105 // Initialize keyed lookup cache.
3106 isolate_->keyed_lookup_cache()->Clear();
3108 // Initialize context slot cache.
3109 isolate_->context_slot_cache()->Clear();
3111 // Initialize descriptor cache.
3112 isolate_->descriptor_lookup_cache()->Clear();
3114 // Initialize compilation cache.
3115 isolate_->compilation_cache()->Clear();
3121 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3122 RootListIndex writable_roots[] = {
3123 kStoreBufferTopRootIndex,
3124 kStackLimitRootIndex,
3125 kNumberStringCacheRootIndex,
3126 kInstanceofCacheFunctionRootIndex,
3127 kInstanceofCacheMapRootIndex,
3128 kInstanceofCacheAnswerRootIndex,
3129 kCodeStubsRootIndex,
3130 kNonMonomorphicCacheRootIndex,
3131 kPolymorphicCodeCacheRootIndex,
3132 kLastScriptIdRootIndex,
3133 kEmptyScriptRootIndex,
3134 kRealStackLimitRootIndex,
3135 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3136 kConstructStubDeoptPCOffsetRootIndex,
3137 kGetterStubDeoptPCOffsetRootIndex,
3138 kSetterStubDeoptPCOffsetRootIndex,
3139 kStringTableRootIndex,
3142 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3143 if (root_index == writable_roots[i])
3150 Object* RegExpResultsCache::Lookup(Heap* heap,
3152 Object* key_pattern,
3153 ResultsCacheType type) {
3155 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3156 if (type == STRING_SPLIT_SUBSTRINGS) {
3157 ASSERT(key_pattern->IsString());
3158 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3159 cache = heap->string_split_cache();
3161 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3162 ASSERT(key_pattern->IsFixedArray());
3163 cache = heap->regexp_multiple_cache();
3166 uint32_t hash = key_string->Hash();
3167 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3168 ~(kArrayEntriesPerCacheEntry - 1));
3169 if (cache->get(index + kStringOffset) == key_string &&
3170 cache->get(index + kPatternOffset) == key_pattern) {
3171 return cache->get(index + kArrayOffset);
3174 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3175 if (cache->get(index + kStringOffset) == key_string &&
3176 cache->get(index + kPatternOffset) == key_pattern) {
3177 return cache->get(index + kArrayOffset);
3179 return Smi::FromInt(0);
3183 void RegExpResultsCache::Enter(Heap* heap,
3185 Object* key_pattern,
3186 FixedArray* value_array,
3187 ResultsCacheType type) {
3189 if (!key_string->IsInternalizedString()) return;
3190 if (type == STRING_SPLIT_SUBSTRINGS) {
3191 ASSERT(key_pattern->IsString());
3192 if (!key_pattern->IsInternalizedString()) return;
3193 cache = heap->string_split_cache();
3195 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3196 ASSERT(key_pattern->IsFixedArray());
3197 cache = heap->regexp_multiple_cache();
3200 uint32_t hash = key_string->Hash();
3201 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3202 ~(kArrayEntriesPerCacheEntry - 1));
3203 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3204 cache->set(index + kStringOffset, key_string);
3205 cache->set(index + kPatternOffset, key_pattern);
3206 cache->set(index + kArrayOffset, value_array);
3209 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3210 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3211 cache->set(index2 + kStringOffset, key_string);
3212 cache->set(index2 + kPatternOffset, key_pattern);
3213 cache->set(index2 + kArrayOffset, value_array);
3215 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3216 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3217 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3218 cache->set(index + kStringOffset, key_string);
3219 cache->set(index + kPatternOffset, key_pattern);
3220 cache->set(index + kArrayOffset, value_array);
3223 // If the array is a reasonably short list of substrings, convert it into a
3224 // list of internalized strings.
3225 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3226 for (int i = 0; i < value_array->length(); i++) {
3227 String* str = String::cast(value_array->get(i));
3228 Object* internalized_str;
3229 MaybeObject* maybe_string = heap->InternalizeString(str);
3230 if (maybe_string->ToObject(&internalized_str)) {
3231 value_array->set(i, internalized_str);
3235 // Convert backing store to a copy-on-write array.
3236 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3240 void RegExpResultsCache::Clear(FixedArray* cache) {
3241 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3242 cache->set(i, Smi::FromInt(0));
3247 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3248 MaybeObject* maybe_obj =
3249 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3254 int Heap::FullSizeNumberStringCacheLength() {
3255 // Compute the size of the number string cache based on the max newspace size.
3256 // The number string cache has a minimum size based on twice the initial cache
3257 // size to ensure that it is bigger after being made 'full size'.
3258 int number_string_cache_size = max_semispace_size_ / 512;
3259 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3260 Min(0x4000, number_string_cache_size));
3261 // There is a string and a number per entry so the length is twice the number
3263 return number_string_cache_size * 2;
3267 void Heap::AllocateFullSizeNumberStringCache() {
3268 // The idea is to have a small number string cache in the snapshot to keep
3269 // boot-time memory usage down. If we expand the number string cache already
3270 // while creating the snapshot then that didn't work out.
3271 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3272 MaybeObject* maybe_obj =
3273 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3275 if (maybe_obj->ToObject(&new_cache)) {
3276 // We don't bother to repopulate the cache with entries from the old cache.
3277 // It will be repopulated soon enough with new strings.
3278 set_number_string_cache(FixedArray::cast(new_cache));
3280 // If allocation fails then we just return without doing anything. It is only
3281 // a cache, so best effort is OK here.
3285 void Heap::FlushNumberStringCache() {
3286 // Flush the number to string cache.
3287 int len = number_string_cache()->length();
3288 for (int i = 0; i < len; i++) {
3289 number_string_cache()->set_undefined(this, i);
3294 static inline int double_get_hash(double d) {
3295 DoubleRepresentation rep(d);
3296 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3300 static inline int smi_get_hash(Smi* smi) {
3301 return smi->value();
3305 Object* Heap::GetNumberStringCache(Object* number) {
3307 int mask = (number_string_cache()->length() >> 1) - 1;
3308 if (number->IsSmi()) {
3309 hash = smi_get_hash(Smi::cast(number)) & mask;
3311 hash = double_get_hash(number->Number()) & mask;
3313 Object* key = number_string_cache()->get(hash * 2);
3314 if (key == number) {
3315 return String::cast(number_string_cache()->get(hash * 2 + 1));
3316 } else if (key->IsHeapNumber() &&
3317 number->IsHeapNumber() &&
3318 key->Number() == number->Number()) {
3319 return String::cast(number_string_cache()->get(hash * 2 + 1));
3321 return undefined_value();
3325 void Heap::SetNumberStringCache(Object* number, String* string) {
3327 int mask = (number_string_cache()->length() >> 1) - 1;
3328 if (number->IsSmi()) {
3329 hash = smi_get_hash(Smi::cast(number)) & mask;
3331 hash = double_get_hash(number->Number()) & mask;
3333 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3334 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3335 // The first time we have a hash collision, we move to the full sized
3336 // number string cache.
3337 AllocateFullSizeNumberStringCache();
3340 number_string_cache()->set(hash * 2, number);
3341 number_string_cache()->set(hash * 2 + 1, string);
3345 MaybeObject* Heap::NumberToString(Object* number,
3346 bool check_number_string_cache,
3347 PretenureFlag pretenure) {
3348 isolate_->counters()->number_to_string_runtime()->Increment();
3349 if (check_number_string_cache) {
3350 Object* cached = GetNumberStringCache(number);
3351 if (cached != undefined_value()) {
3357 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3359 if (number->IsSmi()) {
3360 int num = Smi::cast(number)->value();
3361 str = IntToCString(num, buffer);
3363 double num = HeapNumber::cast(number)->value();
3364 str = DoubleToCString(num, buffer);
3368 MaybeObject* maybe_js_string =
3369 AllocateStringFromOneByte(CStrVector(str), pretenure);
3370 if (maybe_js_string->ToObject(&js_string)) {
3371 SetNumberStringCache(number, String::cast(js_string));
3373 return maybe_js_string;
3377 MaybeObject* Heap::Uint32ToString(uint32_t value,
3378 bool check_number_string_cache) {
3380 MaybeObject* maybe = NumberFromUint32(value);
3381 if (!maybe->To<Object>(&number)) return maybe;
3382 return NumberToString(number, check_number_string_cache);
3386 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3387 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3391 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3392 ExternalArrayType array_type) {
3393 switch (array_type) {
3394 case kExternalByteArray:
3395 return kExternalByteArrayMapRootIndex;
3396 case kExternalUnsignedByteArray:
3397 return kExternalUnsignedByteArrayMapRootIndex;
3398 case kExternalShortArray:
3399 return kExternalShortArrayMapRootIndex;
3400 case kExternalUnsignedShortArray:
3401 return kExternalUnsignedShortArrayMapRootIndex;
3402 case kExternalIntArray:
3403 return kExternalIntArrayMapRootIndex;
3404 case kExternalUnsignedIntArray:
3405 return kExternalUnsignedIntArrayMapRootIndex;
3406 case kExternalFloatArray:
3407 return kExternalFloatArrayMapRootIndex;
3408 case kExternalDoubleArray:
3409 return kExternalDoubleArrayMapRootIndex;
3410 case kExternalPixelArray:
3411 return kExternalPixelArrayMapRootIndex;
3414 return kUndefinedValueRootIndex;
3418 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3419 ElementsKind elementsKind) {
3420 switch (elementsKind) {
3421 case EXTERNAL_BYTE_ELEMENTS:
3422 return kEmptyExternalByteArrayRootIndex;
3423 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3424 return kEmptyExternalUnsignedByteArrayRootIndex;
3425 case EXTERNAL_SHORT_ELEMENTS:
3426 return kEmptyExternalShortArrayRootIndex;
3427 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3428 return kEmptyExternalUnsignedShortArrayRootIndex;
3429 case EXTERNAL_INT_ELEMENTS:
3430 return kEmptyExternalIntArrayRootIndex;
3431 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3432 return kEmptyExternalUnsignedIntArrayRootIndex;
3433 case EXTERNAL_FLOAT_ELEMENTS:
3434 return kEmptyExternalFloatArrayRootIndex;
3435 case EXTERNAL_DOUBLE_ELEMENTS:
3436 return kEmptyExternalDoubleArrayRootIndex;
3437 case EXTERNAL_PIXEL_ELEMENTS:
3438 return kEmptyExternalPixelArrayRootIndex;
3441 return kUndefinedValueRootIndex;
3445 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3446 return ExternalArray::cast(
3447 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3453 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3454 // We need to distinguish the minus zero value and this cannot be
3455 // done after conversion to int. Doing this by comparing bit
3456 // patterns is faster than using fpclassify() et al.
3457 static const DoubleRepresentation minus_zero(-0.0);
3459 DoubleRepresentation rep(value);
3460 if (rep.bits == minus_zero.bits) {
3461 return AllocateHeapNumber(-0.0, pretenure);
3464 int int_value = FastD2I(value);
3465 if (value == int_value && Smi::IsValid(int_value)) {
3466 return Smi::FromInt(int_value);
3469 // Materialize the value in the heap.
3470 return AllocateHeapNumber(value, pretenure);
3474 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3475 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3476 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3477 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3479 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3480 if (!maybe_result->To(&result)) return maybe_result;
3481 result->set_foreign_address(address);
3486 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3487 SharedFunctionInfo* share;
3488 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3489 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3491 // Set pointer fields.
3492 share->set_name(name);
3493 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3494 share->set_code(illegal);
3495 share->set_optimized_code_map(Smi::FromInt(0));
3496 share->set_scope_info(ScopeInfo::Empty(isolate_));
3497 Code* construct_stub =
3498 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3499 share->set_construct_stub(construct_stub);
3500 share->set_instance_class_name(Object_string());
3501 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3502 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3503 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3504 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3505 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3506 share->set_ast_node_count(0);
3507 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3508 share->set_counters(0);
3510 // Set integer fields (smi or int, depending on the architecture).
3511 share->set_length(0);
3512 share->set_formal_parameter_count(0);
3513 share->set_expected_nof_properties(0);
3514 share->set_num_literals(0);
3515 share->set_start_position_and_type(0);
3516 share->set_end_position(0);
3517 share->set_function_token_position(0);
3518 // All compiler hints default to false or 0.
3519 share->set_compiler_hints(0);
3520 share->set_opt_count(0);
3526 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3531 Object* stack_trace,
3532 Object* stack_frames) {
3534 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3535 if (!maybe_result->ToObject(&result)) return maybe_result;
3537 JSMessageObject* message = JSMessageObject::cast(result);
3538 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3539 message->initialize_elements();
3540 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3541 message->set_type(type);
3542 message->set_arguments(arguments);
3543 message->set_start_position(start_position);
3544 message->set_end_position(end_position);
3545 message->set_script(script);
3546 message->set_stack_trace(stack_trace);
3547 message->set_stack_frames(stack_frames);
3553 // Returns true for a character in a range. Both limits are inclusive.
3554 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3555 // This makes uses of the the unsigned wraparound.
3556 return character - from <= to - from;
3560 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3565 // Numeric strings have a different hash algorithm not known by
3566 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3567 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3568 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3570 // Now we know the length is 2, we might as well make use of that fact
3571 // when building the new string.
3572 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3574 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3576 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3577 if (!maybe_result->ToObject(&result)) return maybe_result;
3579 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3580 dest[0] = static_cast<uint8_t>(c1);
3581 dest[1] = static_cast<uint8_t>(c2);
3585 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3586 if (!maybe_result->ToObject(&result)) return maybe_result;
3588 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3596 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3597 int first_length = first->length();
3598 if (first_length == 0) {
3602 int second_length = second->length();
3603 if (second_length == 0) {
3607 int length = first_length + second_length;
3609 // Optimization for 2-byte strings often used as keys in a decompression
3610 // dictionary. Check whether we already have the string in the string
3611 // table to prevent creation of many unneccesary strings.
3613 uint16_t c1 = first->Get(0);
3614 uint16_t c2 = second->Get(0);
3615 return MakeOrFindTwoCharacterString(this, c1, c2);
3618 bool first_is_one_byte = first->IsOneByteRepresentation();
3619 bool second_is_one_byte = second->IsOneByteRepresentation();
3620 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3621 // Make sure that an out of memory exception is thrown if the length
3622 // of the new cons string is too large.
3623 if (length > String::kMaxLength || length < 0) {
3624 isolate()->context()->mark_out_of_memory();
3625 return Failure::OutOfMemoryException(0x4);
3628 bool is_one_byte_data_in_two_byte_string = false;
3630 // At least one of the strings uses two-byte representation so we
3631 // can't use the fast case code for short ASCII strings below, but
3632 // we can try to save memory if all chars actually fit in ASCII.
3633 is_one_byte_data_in_two_byte_string =
3634 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3635 if (is_one_byte_data_in_two_byte_string) {
3636 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3640 // If the resulting string is small make a flat string.
3641 if (length < ConsString::kMinLength) {
3642 // Note that neither of the two inputs can be a slice because:
3643 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3644 ASSERT(first->IsFlat());
3645 ASSERT(second->IsFlat());
3648 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3649 if (!maybe_result->ToObject(&result)) return maybe_result;
3651 // Copy the characters into the new object.
3652 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3655 if (first->IsExternalString()) {
3656 src = ExternalAsciiString::cast(first)->GetChars();
3658 src = SeqOneByteString::cast(first)->GetChars();
3660 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3661 // Copy second part.
3662 if (second->IsExternalString()) {
3663 src = ExternalAsciiString::cast(second)->GetChars();
3665 src = SeqOneByteString::cast(second)->GetChars();
3667 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3670 if (is_one_byte_data_in_two_byte_string) {
3672 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3673 if (!maybe_result->ToObject(&result)) return maybe_result;
3675 // Copy the characters into the new object.
3676 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3677 String::WriteToFlat(first, dest, 0, first_length);
3678 String::WriteToFlat(second, dest + first_length, 0, second_length);
3679 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3684 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3685 if (!maybe_result->ToObject(&result)) return maybe_result;
3687 // Copy the characters into the new object.
3688 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3689 String::WriteToFlat(first, dest, 0, first_length);
3690 String::WriteToFlat(second, dest + first_length, 0, second_length);
3695 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3696 cons_ascii_string_map() : cons_string_map();
3699 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3700 if (!maybe_result->ToObject(&result)) return maybe_result;
3703 DisallowHeapAllocation no_gc;
3704 ConsString* cons_string = ConsString::cast(result);
3705 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3706 cons_string->set_length(length);
3707 cons_string->set_hash_field(String::kEmptyHashField);
3708 cons_string->set_first(first, mode);
3709 cons_string->set_second(second, mode);
3714 MaybeObject* Heap::AllocateSubString(String* buffer,
3717 PretenureFlag pretenure) {
3718 int length = end - start;
3720 return empty_string();
3721 } else if (length == 1) {
3722 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3723 } else if (length == 2) {
3724 // Optimization for 2-byte strings often used as keys in a decompression
3725 // dictionary. Check whether we already have the string in the string
3726 // table to prevent creation of many unnecessary strings.
3727 uint16_t c1 = buffer->Get(start);
3728 uint16_t c2 = buffer->Get(start + 1);
3729 return MakeOrFindTwoCharacterString(this, c1, c2);
3732 // Make an attempt to flatten the buffer to reduce access time.
3733 buffer = buffer->TryFlattenGetString();
3735 if (!FLAG_string_slices ||
3736 !buffer->IsFlat() ||
3737 length < SlicedString::kMinLength ||
3738 pretenure == TENURED) {
3740 // WriteToFlat takes care of the case when an indirect string has a
3741 // different encoding from its underlying string. These encodings may
3742 // differ because of externalization.
3743 bool is_one_byte = buffer->IsOneByteRepresentation();
3744 { MaybeObject* maybe_result = is_one_byte
3745 ? AllocateRawOneByteString(length, pretenure)
3746 : AllocateRawTwoByteString(length, pretenure);
3747 if (!maybe_result->ToObject(&result)) return maybe_result;
3749 String* string_result = String::cast(result);
3750 // Copy the characters into the new object.
3752 ASSERT(string_result->IsOneByteRepresentation());
3753 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3754 String::WriteToFlat(buffer, dest, start, end);
3756 ASSERT(string_result->IsTwoByteRepresentation());
3757 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3758 String::WriteToFlat(buffer, dest, start, end);
3763 ASSERT(buffer->IsFlat());
3765 if (FLAG_verify_heap) {
3766 buffer->StringVerify();
3771 // When slicing an indirect string we use its encoding for a newly created
3772 // slice and don't check the encoding of the underlying string. This is safe
3773 // even if the encodings are different because of externalization. If an
3774 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3775 // codes of the underlying string must still fit into ASCII (because
3776 // externalization must not change char codes).
3777 { Map* map = buffer->IsOneByteRepresentation()
3778 ? sliced_ascii_string_map()
3779 : sliced_string_map();
3780 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3781 if (!maybe_result->ToObject(&result)) return maybe_result;
3784 DisallowHeapAllocation no_gc;
3785 SlicedString* sliced_string = SlicedString::cast(result);
3786 sliced_string->set_length(length);
3787 sliced_string->set_hash_field(String::kEmptyHashField);
3788 if (buffer->IsConsString()) {
3789 ConsString* cons = ConsString::cast(buffer);
3790 ASSERT(cons->second()->length() == 0);
3791 sliced_string->set_parent(cons->first());
3792 sliced_string->set_offset(start);
3793 } else if (buffer->IsSlicedString()) {
3794 // Prevent nesting sliced strings.
3795 SlicedString* parent_slice = SlicedString::cast(buffer);
3796 sliced_string->set_parent(parent_slice->parent());
3797 sliced_string->set_offset(start + parent_slice->offset());
3799 sliced_string->set_parent(buffer);
3800 sliced_string->set_offset(start);
3802 ASSERT(sliced_string->parent()->IsSeqString() ||
3803 sliced_string->parent()->IsExternalString());
3808 MaybeObject* Heap::AllocateExternalStringFromAscii(
3809 const ExternalAsciiString::Resource* resource) {
3810 size_t length = resource->length();
3811 if (length > static_cast<size_t>(String::kMaxLength)) {
3812 isolate()->context()->mark_out_of_memory();
3813 return Failure::OutOfMemoryException(0x5);
3816 Map* map = external_ascii_string_map();
3818 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3819 if (!maybe_result->ToObject(&result)) return maybe_result;
3822 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3823 external_string->set_length(static_cast<int>(length));
3824 external_string->set_hash_field(String::kEmptyHashField);
3825 external_string->set_resource(resource);
3831 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3832 const ExternalTwoByteString::Resource* resource) {
3833 size_t length = resource->length();
3834 if (length > static_cast<size_t>(String::kMaxLength)) {
3835 isolate()->context()->mark_out_of_memory();
3836 return Failure::OutOfMemoryException(0x6);
3839 // For small strings we check whether the resource contains only
3840 // one byte characters. If yes, we use a different string map.
3841 static const size_t kOneByteCheckLengthLimit = 32;
3842 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3843 String::IsOneByte(resource->data(), static_cast<int>(length));
3844 Map* map = is_one_byte ?
3845 external_string_with_one_byte_data_map() : external_string_map();
3847 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3848 if (!maybe_result->ToObject(&result)) return maybe_result;
3851 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3852 external_string->set_length(static_cast<int>(length));
3853 external_string->set_hash_field(String::kEmptyHashField);
3854 external_string->set_resource(resource);
3860 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3861 if (code <= String::kMaxOneByteCharCode) {
3862 Object* value = single_character_string_cache()->get(code);
3863 if (value != undefined_value()) return value;
3866 buffer[0] = static_cast<uint8_t>(code);
3868 MaybeObject* maybe_result =
3869 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3871 if (!maybe_result->ToObject(&result)) return maybe_result;
3872 single_character_string_cache()->set(code, result);
3877 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3878 if (!maybe_result->ToObject(&result)) return maybe_result;
3880 String* answer = String::cast(result);
3881 answer->Set(0, code);
3886 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3887 if (length < 0 || length > ByteArray::kMaxLength) {
3888 return Failure::OutOfMemoryException(0x7);
3890 if (pretenure == NOT_TENURED) {
3891 return AllocateByteArray(length);
3893 int size = ByteArray::SizeFor(length);
3895 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3896 ? old_data_space_->AllocateRaw(size)
3897 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3898 if (!maybe_result->ToObject(&result)) return maybe_result;
3901 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3903 reinterpret_cast<ByteArray*>(result)->set_length(length);
3908 MaybeObject* Heap::AllocateByteArray(int length) {
3909 if (length < 0 || length > ByteArray::kMaxLength) {
3910 return Failure::OutOfMemoryException(0x8);
3912 int size = ByteArray::SizeFor(length);
3913 AllocationSpace space =
3914 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3916 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3917 if (!maybe_result->ToObject(&result)) return maybe_result;
3920 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3922 reinterpret_cast<ByteArray*>(result)->set_length(length);
3927 void Heap::CreateFillerObjectAt(Address addr, int size) {
3928 if (size == 0) return;
3929 HeapObject* filler = HeapObject::FromAddress(addr);
3930 if (size == kPointerSize) {
3931 filler->set_map_no_write_barrier(one_pointer_filler_map());
3932 } else if (size == 2 * kPointerSize) {
3933 filler->set_map_no_write_barrier(two_pointer_filler_map());
3935 filler->set_map_no_write_barrier(free_space_map());
3936 FreeSpace::cast(filler)->set_size(size);
3941 MaybeObject* Heap::AllocateExternalArray(int length,
3942 ExternalArrayType array_type,
3943 void* external_pointer,
3944 PretenureFlag pretenure) {
3945 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3947 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3950 if (!maybe_result->ToObject(&result)) return maybe_result;
3953 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3954 MapForExternalArrayType(array_type));
3955 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3956 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3963 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3965 Handle<Object> self_reference,
3967 bool crankshafted) {
3968 // Allocate ByteArray before the Code object, so that we do not risk
3969 // leaving uninitialized Code object (and breaking the heap).
3970 ByteArray* reloc_info;
3971 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3972 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3975 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3976 int obj_size = Code::SizeFor(body_size);
3977 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3978 MaybeObject* maybe_result;
3979 // Large code objects and code objects which should stay at a fixed address
3980 // are allocated in large object space.
3982 bool force_lo_space = obj_size > code_space()->AreaSize();
3983 if (force_lo_space) {
3984 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3986 maybe_result = code_space_->AllocateRaw(obj_size);
3988 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3990 if (immovable && !force_lo_space &&
3991 // Objects on the first page of each space are never moved.
3992 !code_space_->FirstPage()->Contains(result->address())) {
3993 // Discard the first code allocation, which was on a page where it could be
3995 CreateFillerObjectAt(result->address(), obj_size);
3996 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3997 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4000 // Initialize the object
4001 result->set_map_no_write_barrier(code_map());
4002 Code* code = Code::cast(result);
4003 ASSERT(!isolate_->code_range()->exists() ||
4004 isolate_->code_range()->contains(code->address()));
4005 code->set_instruction_size(desc.instr_size);
4006 code->set_relocation_info(reloc_info);
4007 code->set_flags(flags);
4008 if (code->is_call_stub() || code->is_keyed_call_stub()) {
4009 code->set_check_type(RECEIVER_MAP_CHECK);
4011 code->set_is_crankshafted(crankshafted);
4012 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4013 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4014 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4015 code->set_gc_metadata(Smi::FromInt(0));
4016 code->set_ic_age(global_ic_age_);
4017 code->set_prologue_offset(kPrologueOffsetNotSet);
4018 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4019 code->set_marked_for_deoptimization(false);
4021 // Allow self references to created code object by patching the handle to
4022 // point to the newly allocated Code object.
4023 if (!self_reference.is_null()) {
4024 *(self_reference.location()) = code;
4026 // Migrate generated code.
4027 // The generated code can contain Object** values (typically from handles)
4028 // that are dereferenced during the copy to point directly to the actual heap
4029 // objects. These pointers can include references to the code object itself,
4030 // through the self_reference parameter.
4031 code->CopyFrom(desc);
4034 if (FLAG_verify_heap) {
4042 MaybeObject* Heap::CopyCode(Code* code) {
4043 // Allocate an object the same size as the code object.
4044 int obj_size = code->Size();
4045 MaybeObject* maybe_result;
4046 if (obj_size > code_space()->AreaSize()) {
4047 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4049 maybe_result = code_space_->AllocateRaw(obj_size);
4053 if (!maybe_result->ToObject(&result)) return maybe_result;
4055 // Copy code object.
4056 Address old_addr = code->address();
4057 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4058 CopyBlock(new_addr, old_addr, obj_size);
4059 // Relocate the copy.
4060 Code* new_code = Code::cast(result);
4061 ASSERT(!isolate_->code_range()->exists() ||
4062 isolate_->code_range()->contains(code->address()));
4063 new_code->Relocate(new_addr - old_addr);
4068 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4069 // Allocate ByteArray before the Code object, so that we do not risk
4070 // leaving uninitialized Code object (and breaking the heap).
4071 Object* reloc_info_array;
4072 { MaybeObject* maybe_reloc_info_array =
4073 AllocateByteArray(reloc_info.length(), TENURED);
4074 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4075 return maybe_reloc_info_array;
4079 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4081 int new_obj_size = Code::SizeFor(new_body_size);
4083 Address old_addr = code->address();
4085 size_t relocation_offset =
4086 static_cast<size_t>(code->instruction_end() - old_addr);
4088 MaybeObject* maybe_result;
4089 if (new_obj_size > code_space()->AreaSize()) {
4090 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4092 maybe_result = code_space_->AllocateRaw(new_obj_size);
4096 if (!maybe_result->ToObject(&result)) return maybe_result;
4098 // Copy code object.
4099 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4101 // Copy header and instructions.
4102 CopyBytes(new_addr, old_addr, relocation_offset);
4104 Code* new_code = Code::cast(result);
4105 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4107 // Copy patched rinfo.
4108 CopyBytes(new_code->relocation_start(),
4110 static_cast<size_t>(reloc_info.length()));
4112 // Relocate the copy.
4113 ASSERT(!isolate_->code_range()->exists() ||
4114 isolate_->code_range()->contains(code->address()));
4115 new_code->Relocate(new_addr - old_addr);
4118 if (FLAG_verify_heap) {
4126 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4127 Handle<Object> allocation_site_info_payload) {
4128 ASSERT(gc_state_ == NOT_IN_GC);
4129 ASSERT(map->instance_type() != MAP_TYPE);
4130 // If allocation failures are disallowed, we may allocate in a different
4131 // space when new space is full and the object is not a large object.
4132 AllocationSpace retry_space =
4133 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4134 int size = map->instance_size() + AllocationSiteInfo::kSize;
4136 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4137 if (!maybe_result->ToObject(&result)) return maybe_result;
4138 // No need for write barrier since object is white and map is in old space.
4139 HeapObject::cast(result)->set_map_no_write_barrier(map);
4140 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4141 reinterpret_cast<Address>(result) + map->instance_size());
4142 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4143 alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
4148 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4149 ASSERT(gc_state_ == NOT_IN_GC);
4150 ASSERT(map->instance_type() != MAP_TYPE);
4151 // If allocation failures are disallowed, we may allocate in a different
4152 // space when new space is full and the object is not a large object.
4153 AllocationSpace retry_space =
4154 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4155 int size = map->instance_size();
4157 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4158 if (!maybe_result->ToObject(&result)) return maybe_result;
4159 // No need for write barrier since object is white and map is in old space.
4160 HeapObject::cast(result)->set_map_no_write_barrier(map);
4165 void Heap::InitializeFunction(JSFunction* function,
4166 SharedFunctionInfo* shared,
4167 Object* prototype) {
4168 ASSERT(!prototype->IsMap());
4169 function->initialize_properties();
4170 function->initialize_elements();
4171 function->set_shared(shared);
4172 function->set_code(shared->code());
4173 function->set_prototype_or_initial_map(prototype);
4174 function->set_context(undefined_value());
4175 function->set_literals_or_bindings(empty_fixed_array());
4176 function->set_next_function_link(undefined_value());
4180 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4181 // Make sure to use globals from the function's context, since the function
4182 // can be from a different context.
4183 Context* native_context = function->context()->native_context();
4185 if (function->shared()->is_generator()) {
4186 // Generator prototypes can share maps since they don't have "constructor"
4188 new_map = native_context->generator_object_prototype_map();
4190 // Each function prototype gets a fresh map to avoid unwanted sharing of
4191 // maps between prototypes of different constructors.
4192 JSFunction* object_function = native_context->object_function();
4193 ASSERT(object_function->has_initial_map());
4194 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4195 if (!maybe_map->To(&new_map)) return maybe_map;
4199 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4200 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4202 if (!function->shared()->is_generator()) {
4203 MaybeObject* maybe_failure =
4204 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4205 constructor_string(), function, DONT_ENUM);
4206 if (maybe_failure->IsFailure()) return maybe_failure;
4213 MaybeObject* Heap::AllocateFunction(Map* function_map,
4214 SharedFunctionInfo* shared,
4216 PretenureFlag pretenure) {
4217 AllocationSpace space =
4218 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4220 { MaybeObject* maybe_result = Allocate(function_map, space);
4221 if (!maybe_result->ToObject(&result)) return maybe_result;
4223 InitializeFunction(JSFunction::cast(result), shared, prototype);
4228 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4229 // To get fast allocation and map sharing for arguments objects we
4230 // allocate them based on an arguments boilerplate.
4232 JSObject* boilerplate;
4233 int arguments_object_size;
4234 bool strict_mode_callee = callee->IsJSFunction() &&
4235 !JSFunction::cast(callee)->shared()->is_classic_mode();
4236 if (strict_mode_callee) {
4238 isolate()->context()->native_context()->
4239 strict_mode_arguments_boilerplate();
4240 arguments_object_size = kArgumentsObjectSizeStrict;
4243 isolate()->context()->native_context()->arguments_boilerplate();
4244 arguments_object_size = kArgumentsObjectSize;
4247 // This calls Copy directly rather than using Heap::AllocateRaw so we
4248 // duplicate the check here.
4249 ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4251 // Check that the size of the boilerplate matches our
4252 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4253 // on the size being a known constant.
4254 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4256 // Do the allocation.
4258 { MaybeObject* maybe_result =
4259 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4260 if (!maybe_result->ToObject(&result)) return maybe_result;
4263 // Copy the content. The arguments boilerplate doesn't have any
4264 // fields that point to new space so it's safe to skip the write
4266 CopyBlock(HeapObject::cast(result)->address(),
4267 boilerplate->address(),
4268 JSObject::kHeaderSize);
4270 // Set the length property.
4271 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4272 Smi::FromInt(length),
4273 SKIP_WRITE_BARRIER);
4274 // Set the callee property for non-strict mode arguments object only.
4275 if (!strict_mode_callee) {
4276 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4280 // Check the state of the object
4281 ASSERT(JSObject::cast(result)->HasFastProperties());
4282 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4288 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4289 ASSERT(!fun->has_initial_map());
4291 // First create a new map with the size and number of in-object properties
4292 // suggested by the function.
4293 InstanceType instance_type;
4295 int in_object_properties;
4296 if (fun->shared()->is_generator()) {
4297 instance_type = JS_GENERATOR_OBJECT_TYPE;
4298 instance_size = JSGeneratorObject::kSize;
4299 in_object_properties = 0;
4301 instance_type = JS_OBJECT_TYPE;
4302 instance_size = fun->shared()->CalculateInstanceSize();
4303 in_object_properties = fun->shared()->CalculateInObjectProperties();
4306 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4307 if (!maybe_map->To(&map)) return maybe_map;
4309 // Fetch or allocate prototype.
4311 if (fun->has_instance_prototype()) {
4312 prototype = fun->instance_prototype();
4314 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4315 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4317 map->set_inobject_properties(in_object_properties);
4318 map->set_unused_property_fields(in_object_properties);
4319 map->set_prototype(prototype);
4320 ASSERT(map->has_fast_object_elements());
4322 if (!fun->shared()->is_generator()) {
4323 fun->shared()->StartInobjectSlackTracking(map);
4330 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4331 FixedArray* properties,
4333 obj->set_properties(properties);
4334 obj->initialize_elements();
4335 // TODO(1240798): Initialize the object's body using valid initial values
4336 // according to the object's initial map. For example, if the map's
4337 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4338 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4339 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4340 // verification code has to cope with (temporarily) invalid objects. See
4341 // for example, JSArray::JSArrayVerify).
4343 // We cannot always fill with one_pointer_filler_map because objects
4344 // created from API functions expect their internal fields to be initialized
4345 // with undefined_value.
4346 // Pre-allocated fields need to be initialized with undefined_value as well
4347 // so that object accesses before the constructor completes (e.g. in the
4348 // debugger) will not cause a crash.
4349 if (map->constructor()->IsJSFunction() &&
4350 JSFunction::cast(map->constructor())->shared()->
4351 IsInobjectSlackTrackingInProgress()) {
4352 // We might want to shrink the object later.
4353 ASSERT(obj->GetInternalFieldCount() == 0);
4354 filler = Heap::one_pointer_filler_map();
4356 filler = Heap::undefined_value();
4358 obj->InitializeBody(map, Heap::undefined_value(), filler);
4362 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4363 // JSFunctions should be allocated using AllocateFunction to be
4364 // properly initialized.
4365 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4367 // Both types of global objects should be allocated using
4368 // AllocateGlobalObject to be properly initialized.
4369 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4370 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4372 // Allocate the backing storage for the properties.
4373 int prop_size = map->InitialPropertiesLength();
4374 ASSERT(prop_size >= 0);
4376 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4377 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4380 // Allocate the JSObject.
4381 AllocationSpace space =
4382 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4383 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4385 MaybeObject* maybe_obj = Allocate(map, space);
4386 if (!maybe_obj->To(&obj)) return maybe_obj;
4388 // Initialize the JSObject.
4389 InitializeJSObjectFromMap(JSObject::cast(obj),
4390 FixedArray::cast(properties),
4392 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4393 JSObject::cast(obj)->HasExternalArrayElements());
4398 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4399 Handle<Object> allocation_site_info_payload) {
4400 // JSFunctions should be allocated using AllocateFunction to be
4401 // properly initialized.
4402 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4404 // Both types of global objects should be allocated using
4405 // AllocateGlobalObject to be properly initialized.
4406 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4407 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4409 // Allocate the backing storage for the properties.
4410 int prop_size = map->InitialPropertiesLength();
4411 ASSERT(prop_size >= 0);
4413 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4414 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4417 // Allocate the JSObject.
4418 AllocationSpace space = NEW_SPACE;
4419 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4421 MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4422 allocation_site_info_payload);
4423 if (!maybe_obj->To(&obj)) return maybe_obj;
4425 // Initialize the JSObject.
4426 InitializeJSObjectFromMap(JSObject::cast(obj),
4427 FixedArray::cast(properties),
4429 ASSERT(JSObject::cast(obj)->HasFastElements());
4434 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4435 PretenureFlag pretenure) {
4436 // Allocate the initial map if absent.
4437 if (!constructor->has_initial_map()) {
4438 Object* initial_map;
4439 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4440 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4442 constructor->set_initial_map(Map::cast(initial_map));
4443 Map::cast(initial_map)->set_constructor(constructor);
4445 // Allocate the object based on the constructors initial map.
4446 MaybeObject* result = AllocateJSObjectFromMap(
4447 constructor->initial_map(), pretenure);
4449 // Make sure result is NOT a global object if valid.
4450 Object* non_failure;
4451 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4457 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4458 Handle<Object> allocation_site_info_payload) {
4459 // Allocate the initial map if absent.
4460 if (!constructor->has_initial_map()) {
4461 Object* initial_map;
4462 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4463 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4465 constructor->set_initial_map(Map::cast(initial_map));
4466 Map::cast(initial_map)->set_constructor(constructor);
4468 // Allocate the object based on the constructors initial map, or the payload
4470 Map* initial_map = constructor->initial_map();
4472 JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
4473 *allocation_site_info_payload);
4474 Smi* smi = Smi::cast(cell->value());
4475 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4476 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4477 if (to_kind != initial_map->elements_kind()) {
4478 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4479 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4480 // Possibly alter the mode, since we found an updated elements kind
4481 // in the type info cell.
4482 mode = AllocationSiteInfo::GetMode(to_kind);
4485 MaybeObject* result;
4486 if (mode == TRACK_ALLOCATION_SITE) {
4487 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4488 allocation_site_info_payload);
4490 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4493 // Make sure result is NOT a global object if valid.
4494 Object* non_failure;
4495 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4501 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4502 ASSERT(function->shared()->is_generator());
4504 if (function->has_initial_map()) {
4505 map = function->initial_map();
4507 // Allocate the initial map if absent.
4508 MaybeObject* maybe_map = AllocateInitialMap(function);
4509 if (!maybe_map->To(&map)) return maybe_map;
4510 function->set_initial_map(map);
4511 map->set_constructor(function);
4513 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4514 return AllocateJSObjectFromMap(map);
4518 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4519 // Allocate a fresh map. Modules do not have a prototype.
4521 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4522 if (!maybe_map->To(&map)) return maybe_map;
4523 // Allocate the object based on the map.
4525 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4526 if (!maybe_module->To(&module)) return maybe_module;
4527 module->set_context(context);
4528 module->set_scope_info(scope_info);
4533 MaybeObject* Heap::AllocateJSArrayAndStorage(
4534 ElementsKind elements_kind,
4537 ArrayStorageAllocationMode mode,
4538 PretenureFlag pretenure) {
4539 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4541 if (!maybe_array->To(&array)) return maybe_array;
4543 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4544 // for performance reasons.
4545 ASSERT(capacity >= length);
4547 if (capacity == 0) {
4548 array->set_length(Smi::FromInt(0));
4549 array->set_elements(empty_fixed_array());
4553 FixedArrayBase* elms;
4554 MaybeObject* maybe_elms = NULL;
4555 if (IsFastDoubleElementsKind(elements_kind)) {
4556 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4557 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4559 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4560 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4563 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4564 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4565 maybe_elms = AllocateUninitializedFixedArray(capacity);
4567 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4568 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4571 if (!maybe_elms->To(&elms)) return maybe_elms;
4573 array->set_elements(elms);
4574 array->set_length(Smi::FromInt(length));
4579 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4580 ElementsKind elements_kind,
4583 Handle<Object> allocation_site_payload,
4584 ArrayStorageAllocationMode mode) {
4585 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4586 allocation_site_payload);
4588 if (!maybe_array->To(&array)) return maybe_array;
4589 return AllocateJSArrayStorage(array, length, capacity, mode);
4593 MaybeObject* Heap::AllocateJSArrayStorage(
4597 ArrayStorageAllocationMode mode) {
4598 ASSERT(capacity >= length);
4600 if (capacity == 0) {
4601 array->set_length(Smi::FromInt(0));
4602 array->set_elements(empty_fixed_array());
4606 FixedArrayBase* elms;
4607 MaybeObject* maybe_elms = NULL;
4608 ElementsKind elements_kind = array->GetElementsKind();
4609 if (IsFastDoubleElementsKind(elements_kind)) {
4610 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4611 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4613 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4614 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4617 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4618 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4619 maybe_elms = AllocateUninitializedFixedArray(capacity);
4621 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4622 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4625 if (!maybe_elms->To(&elms)) return maybe_elms;
4627 array->set_elements(elms);
4628 array->set_length(Smi::FromInt(length));
4633 MaybeObject* Heap::AllocateJSArrayWithElements(
4634 FixedArrayBase* elements,
4635 ElementsKind elements_kind,
4637 PretenureFlag pretenure) {
4638 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4640 if (!maybe_array->To(&array)) return maybe_array;
4642 array->set_elements(elements);
4643 array->set_length(Smi::FromInt(length));
4644 array->ValidateElements();
4649 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4651 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4652 // maps. Will probably depend on the identity of the handler object, too.
4654 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4655 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4656 map->set_prototype(prototype);
4658 // Allocate the proxy object.
4660 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4661 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4662 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4663 result->set_handler(handler);
4664 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4669 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4671 Object* construct_trap,
4672 Object* prototype) {
4674 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4675 // maps. Will probably depend on the identity of the handler object, too.
4677 MaybeObject* maybe_map_obj =
4678 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4679 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4680 map->set_prototype(prototype);
4682 // Allocate the proxy object.
4683 JSFunctionProxy* result;
4684 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4685 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4686 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4687 result->set_handler(handler);
4688 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4689 result->set_call_trap(call_trap);
4690 result->set_construct_trap(construct_trap);
4695 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4696 ASSERT(constructor->has_initial_map());
4697 Map* map = constructor->initial_map();
4698 ASSERT(map->is_dictionary_map());
4700 // Make sure no field properties are described in the initial map.
4701 // This guarantees us that normalizing the properties does not
4702 // require us to change property values to JSGlobalPropertyCells.
4703 ASSERT(map->NextFreePropertyIndex() == 0);
4705 // Make sure we don't have a ton of pre-allocated slots in the
4706 // global objects. They will be unused once we normalize the object.
4707 ASSERT(map->unused_property_fields() == 0);
4708 ASSERT(map->inobject_properties() == 0);
4710 // Initial size of the backing store to avoid resize of the storage during
4711 // bootstrapping. The size differs between the JS global object ad the
4713 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4715 // Allocate a dictionary object for backing storage.
4716 NameDictionary* dictionary;
4717 MaybeObject* maybe_dictionary =
4718 NameDictionary::Allocate(
4720 map->NumberOfOwnDescriptors() * 2 + initial_size);
4721 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4723 // The global object might be created from an object template with accessors.
4724 // Fill these accessors into the dictionary.
4725 DescriptorArray* descs = map->instance_descriptors();
4726 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4727 PropertyDetails details = descs->GetDetails(i);
4728 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4729 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4730 Object* value = descs->GetCallbacksObject(i);
4731 MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4732 if (!maybe_value->ToObject(&value)) return maybe_value;
4734 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4735 if (!maybe_added->To(&dictionary)) return maybe_added;
4738 // Allocate the global object and initialize it with the backing store.
4740 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4741 if (!maybe_global->To(&global)) return maybe_global;
4743 InitializeJSObjectFromMap(global, dictionary, map);
4745 // Create a new map for the global object.
4747 MaybeObject* maybe_map = map->CopyDropDescriptors();
4748 if (!maybe_map->To(&new_map)) return maybe_map;
4749 new_map->set_dictionary_map(true);
4751 // Set up the global object as a normalized object.
4752 global->set_map(new_map);
4753 global->set_properties(dictionary);
4755 // Make sure result is a global object with properties in dictionary.
4756 ASSERT(global->IsGlobalObject());
4757 ASSERT(!global->HasFastProperties());
4762 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4763 // Never used to copy functions. If functions need to be copied we
4764 // have to be careful to clear the literals array.
4765 SLOW_ASSERT(!source->IsJSFunction());
4768 Map* map = source->map();
4769 int object_size = map->instance_size();
4772 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4774 // If we're forced to always allocate, we use the general allocation
4775 // functions which may leave us with an object in old space.
4776 if (always_allocate()) {
4777 { MaybeObject* maybe_clone =
4778 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4779 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4781 Address clone_address = HeapObject::cast(clone)->address();
4782 CopyBlock(clone_address,
4785 // Update write barrier for all fields that lie beyond the header.
4786 RecordWrites(clone_address,
4787 JSObject::kHeaderSize,
4788 (object_size - JSObject::kHeaderSize) / kPointerSize);
4790 wb_mode = SKIP_WRITE_BARRIER;
4792 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4793 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4795 SLOW_ASSERT(InNewSpace(clone));
4796 // Since we know the clone is allocated in new space, we can copy
4797 // the contents without worrying about updating the write barrier.
4798 CopyBlock(HeapObject::cast(clone)->address(),
4804 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4805 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4806 FixedArray* properties = FixedArray::cast(source->properties());
4807 // Update elements if necessary.
4808 if (elements->length() > 0) {
4810 { MaybeObject* maybe_elem;
4811 if (elements->map() == fixed_cow_array_map()) {
4812 maybe_elem = FixedArray::cast(elements);
4813 } else if (source->HasFastDoubleElements()) {
4814 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4816 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4818 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4820 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4822 // Update properties if necessary.
4823 if (properties->length() > 0) {
4825 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4826 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4828 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4830 // Return the new clone.
4835 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4836 // Never used to copy functions. If functions need to be copied we
4837 // have to be careful to clear the literals array.
4838 SLOW_ASSERT(!source->IsJSFunction());
4841 Map* map = source->map();
4842 int object_size = map->instance_size();
4845 ASSERT(map->CanTrackAllocationSite());
4846 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4847 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4849 // If we're forced to always allocate, we use the general allocation
4850 // functions which may leave us with an object in old space.
4851 int adjusted_object_size = object_size;
4852 if (always_allocate()) {
4853 // We'll only track origin if we are certain to allocate in new space
4854 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4855 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4856 adjusted_object_size += AllocationSiteInfo::kSize;
4859 { MaybeObject* maybe_clone =
4860 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4861 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4863 Address clone_address = HeapObject::cast(clone)->address();
4864 CopyBlock(clone_address,
4867 // Update write barrier for all fields that lie beyond the header.
4868 int write_barrier_offset = adjusted_object_size > object_size
4869 ? JSArray::kSize + AllocationSiteInfo::kSize
4870 : JSObject::kHeaderSize;
4871 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4872 RecordWrites(clone_address,
4873 write_barrier_offset,
4874 (object_size - write_barrier_offset) / kPointerSize);
4877 // Track allocation site information, if we failed to allocate it inline.
4878 if (InNewSpace(clone) &&
4879 adjusted_object_size == object_size) {
4880 MaybeObject* maybe_alloc_info =
4881 AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4882 AllocationSiteInfo* alloc_info;
4883 if (maybe_alloc_info->To(&alloc_info)) {
4884 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4885 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4889 wb_mode = SKIP_WRITE_BARRIER;
4890 adjusted_object_size += AllocationSiteInfo::kSize;
4892 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4893 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4895 SLOW_ASSERT(InNewSpace(clone));
4896 // Since we know the clone is allocated in new space, we can copy
4897 // the contents without worrying about updating the write barrier.
4898 CopyBlock(HeapObject::cast(clone)->address(),
4903 if (adjusted_object_size > object_size) {
4904 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4905 reinterpret_cast<Address>(clone) + object_size);
4906 alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4907 alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4911 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4912 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4913 FixedArray* properties = FixedArray::cast(source->properties());
4914 // Update elements if necessary.
4915 if (elements->length() > 0) {
4917 { MaybeObject* maybe_elem;
4918 if (elements->map() == fixed_cow_array_map()) {
4919 maybe_elem = FixedArray::cast(elements);
4920 } else if (source->HasFastDoubleElements()) {
4921 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4923 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4925 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4927 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4929 // Update properties if necessary.
4930 if (properties->length() > 0) {
4932 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4933 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4935 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4937 // Return the new clone.
4942 MaybeObject* Heap::ReinitializeJSReceiver(
4943 JSReceiver* object, InstanceType type, int size) {
4944 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4946 // Allocate fresh map.
4947 // TODO(rossberg): Once we optimize proxies, cache these maps.
4949 MaybeObject* maybe = AllocateMap(type, size);
4950 if (!maybe->To<Map>(&map)) return maybe;
4952 // Check that the receiver has at least the size of the fresh object.
4953 int size_difference = object->map()->instance_size() - map->instance_size();
4954 ASSERT(size_difference >= 0);
4956 map->set_prototype(object->map()->prototype());
4958 // Allocate the backing storage for the properties.
4959 int prop_size = map->unused_property_fields() - map->inobject_properties();
4961 maybe = AllocateFixedArray(prop_size, TENURED);
4962 if (!maybe->ToObject(&properties)) return maybe;
4964 // Functions require some allocation, which might fail here.
4965 SharedFunctionInfo* shared = NULL;
4966 if (type == JS_FUNCTION_TYPE) {
4969 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4970 if (!maybe->To<String>(&name)) return maybe;
4971 maybe = AllocateSharedFunctionInfo(name);
4972 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4975 // Because of possible retries of this function after failure,
4976 // we must NOT fail after this point, where we have changed the type!
4978 // Reset the map for the object.
4979 object->set_map(map);
4980 JSObject* jsobj = JSObject::cast(object);
4982 // Reinitialize the object from the constructor map.
4983 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4985 // Functions require some minimal initialization.
4986 if (type == JS_FUNCTION_TYPE) {
4987 map->set_function_with_prototype(true);
4988 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4989 JSFunction::cast(object)->set_context(
4990 isolate()->context()->native_context());
4993 // Put in filler if the new object is smaller than the old.
4994 if (size_difference > 0) {
4995 CreateFillerObjectAt(
4996 object->address() + map->instance_size(), size_difference);
5003 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5004 JSGlobalProxy* object) {
5005 ASSERT(constructor->has_initial_map());
5006 Map* map = constructor->initial_map();
5008 // Check that the already allocated object has the same size and type as
5009 // objects allocated using the constructor.
5010 ASSERT(map->instance_size() == object->map()->instance_size());
5011 ASSERT(map->instance_type() == object->map()->instance_type());
5013 // Allocate the backing storage for the properties.
5014 int prop_size = map->unused_property_fields() - map->inobject_properties();
5016 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5017 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5020 // Reset the map for the object.
5021 object->set_map(constructor->initial_map());
5023 // Reinitialize the object from the constructor map.
5024 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5029 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5030 PretenureFlag pretenure) {
5031 int length = string.length();
5033 return Heap::LookupSingleCharacterStringFromCode(string[0]);
5036 { MaybeObject* maybe_result =
5037 AllocateRawOneByteString(string.length(), pretenure);
5038 if (!maybe_result->ToObject(&result)) return maybe_result;
5041 // Copy the characters into the new object.
5042 CopyChars(SeqOneByteString::cast(result)->GetChars(),
5049 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5050 int non_ascii_start,
5051 PretenureFlag pretenure) {
5052 // Continue counting the number of characters in the UTF-8 string, starting
5053 // from the first non-ascii character or word.
5054 Access<UnicodeCache::Utf8Decoder>
5055 decoder(isolate_->unicode_cache()->utf8_decoder());
5056 decoder->Reset(string.start() + non_ascii_start,
5057 string.length() - non_ascii_start);
5058 int utf16_length = decoder->Utf16Length();
5059 ASSERT(utf16_length > 0);
5063 int chars = non_ascii_start + utf16_length;
5064 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5065 if (!maybe_result->ToObject(&result)) return maybe_result;
5067 // Convert and copy the characters into the new object.
5068 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5069 // Copy ascii portion.
5070 uint16_t* data = twobyte->GetChars();
5071 if (non_ascii_start != 0) {
5072 const char* ascii_data = string.start();
5073 for (int i = 0; i < non_ascii_start; i++) {
5074 *data++ = *ascii_data++;
5077 // Now write the remainder.
5078 decoder->WriteUtf16(data, utf16_length);
5083 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5084 PretenureFlag pretenure) {
5085 // Check if the string is an ASCII string.
5087 int length = string.length();
5088 const uc16* start = string.start();
5090 if (String::IsOneByte(start, length)) {
5091 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5092 if (!maybe_result->ToObject(&result)) return maybe_result;
5093 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5094 } else { // It's not a one byte string.
5095 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5096 if (!maybe_result->ToObject(&result)) return maybe_result;
5097 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5103 Map* Heap::InternalizedStringMapForString(String* string) {
5104 // If the string is in new space it cannot be used as internalized.
5105 if (InNewSpace(string)) return NULL;
5107 // Find the corresponding internalized string map for strings.
5108 switch (string->map()->instance_type()) {
5109 case STRING_TYPE: return internalized_string_map();
5110 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5111 case CONS_STRING_TYPE: return cons_internalized_string_map();
5112 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5113 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5114 case EXTERNAL_ASCII_STRING_TYPE:
5115 return external_ascii_internalized_string_map();
5116 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5117 return external_internalized_string_with_one_byte_data_map();
5118 case SHORT_EXTERNAL_STRING_TYPE:
5119 return short_external_internalized_string_map();
5120 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5121 return short_external_ascii_internalized_string_map();
5122 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5123 return short_external_internalized_string_with_one_byte_data_map();
5124 default: return NULL; // No match found.
5129 static inline void WriteOneByteData(Vector<const char> vector,
5132 // Only works for ascii.
5133 ASSERT(vector.length() == len);
5134 OS::MemCopy(chars, vector.start(), len);
5137 static inline void WriteTwoByteData(Vector<const char> vector,
5140 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5141 unsigned stream_length = vector.length();
5142 while (stream_length != 0) {
5143 unsigned consumed = 0;
5144 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5145 ASSERT(c != unibrow::Utf8::kBadChar);
5146 ASSERT(consumed <= stream_length);
5147 stream_length -= consumed;
5149 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5152 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5153 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5160 ASSERT(stream_length == 0);
5165 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5166 ASSERT(s->length() == len);
5167 String::WriteToFlat(s, chars, 0, len);
5170 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5171 ASSERT(s->length() == len);
5172 String::WriteToFlat(s, chars, 0, len);
5176 template<bool is_one_byte, typename T>
5177 MaybeObject* Heap::AllocateInternalizedStringImpl(
5178 T t, int chars, uint32_t hash_field) {
5180 // Compute map and object size.
5185 if (chars > SeqOneByteString::kMaxLength) {
5186 return Failure::OutOfMemoryException(0x9);
5188 map = ascii_internalized_string_map();
5189 size = SeqOneByteString::SizeFor(chars);
5191 if (chars > SeqTwoByteString::kMaxLength) {
5192 return Failure::OutOfMemoryException(0xa);
5194 map = internalized_string_map();
5195 size = SeqTwoByteString::SizeFor(chars);
5200 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5201 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5202 : old_data_space_->AllocateRaw(size);
5203 if (!maybe_result->ToObject(&result)) return maybe_result;
5206 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5207 // Set length and hash fields of the allocated string.
5208 String* answer = String::cast(result);
5209 answer->set_length(chars);
5210 answer->set_hash_field(hash_field);
5212 ASSERT_EQ(size, answer->Size());
5215 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5217 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5223 // Need explicit instantiations.
5225 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5227 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5228 String*, int, uint32_t);
5230 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5231 Vector<const char>, int, uint32_t);
5234 MaybeObject* Heap::AllocateRawOneByteString(int length,
5235 PretenureFlag pretenure) {
5236 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5237 return Failure::OutOfMemoryException(0xb);
5240 int size = SeqOneByteString::SizeFor(length);
5241 ASSERT(size <= SeqOneByteString::kMaxSize);
5243 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5244 AllocationSpace retry_space = OLD_DATA_SPACE;
5246 if (space == NEW_SPACE) {
5247 if (size > kMaxObjectSizeInNewSpace) {
5248 // Allocate in large object space, retry space will be ignored.
5250 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5251 // Allocate in new space, retry in large object space.
5252 retry_space = LO_SPACE;
5254 } else if (space == OLD_DATA_SPACE &&
5255 size > Page::kMaxNonCodeHeapObjectSize) {
5259 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5260 if (!maybe_result->ToObject(&result)) return maybe_result;
5263 // Partially initialize the object.
5264 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5265 String::cast(result)->set_length(length);
5266 String::cast(result)->set_hash_field(String::kEmptyHashField);
5267 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5273 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5274 PretenureFlag pretenure) {
5275 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5276 return Failure::OutOfMemoryException(0xc);
5278 int size = SeqTwoByteString::SizeFor(length);
5279 ASSERT(size <= SeqTwoByteString::kMaxSize);
5280 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5281 AllocationSpace retry_space = OLD_DATA_SPACE;
5283 if (space == NEW_SPACE) {
5284 if (size > kMaxObjectSizeInNewSpace) {
5285 // Allocate in large object space, retry space will be ignored.
5287 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5288 // Allocate in new space, retry in large object space.
5289 retry_space = LO_SPACE;
5291 } else if (space == OLD_DATA_SPACE &&
5292 size > Page::kMaxNonCodeHeapObjectSize) {
5296 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5297 if (!maybe_result->ToObject(&result)) return maybe_result;
5300 // Partially initialize the object.
5301 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5302 String::cast(result)->set_length(length);
5303 String::cast(result)->set_hash_field(String::kEmptyHashField);
5304 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5309 MaybeObject* Heap::AllocateJSArray(
5310 ElementsKind elements_kind,
5311 PretenureFlag pretenure) {
5312 Context* native_context = isolate()->context()->native_context();
5313 JSFunction* array_function = native_context->array_function();
5314 Map* map = array_function->initial_map();
5315 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5316 if (transition_map != NULL) map = transition_map;
5317 return AllocateJSObjectFromMap(map, pretenure);
5321 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5322 ElementsKind elements_kind,
5323 Handle<Object> allocation_site_info_payload) {
5324 Context* native_context = isolate()->context()->native_context();
5325 JSFunction* array_function = native_context->array_function();
5326 Map* map = array_function->initial_map();
5327 Object* maybe_map_array = native_context->js_array_maps();
5328 if (!maybe_map_array->IsUndefined()) {
5329 Object* maybe_transitioned_map =
5330 FixedArray::cast(maybe_map_array)->get(elements_kind);
5331 if (!maybe_transitioned_map->IsUndefined()) {
5332 map = Map::cast(maybe_transitioned_map);
5335 return AllocateJSObjectFromMapWithAllocationSite(map,
5336 allocation_site_info_payload);
5340 MaybeObject* Heap::AllocateEmptyFixedArray() {
5341 int size = FixedArray::SizeFor(0);
5343 { MaybeObject* maybe_result =
5344 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5345 if (!maybe_result->ToObject(&result)) return maybe_result;
5347 // Initialize the object.
5348 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5350 reinterpret_cast<FixedArray*>(result)->set_length(0);
5354 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5355 return AllocateExternalArray(0, array_type, NULL, TENURED);
5359 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5360 if (length < 0 || length > FixedArray::kMaxLength) {
5361 return Failure::OutOfMemoryException(0xd);
5364 // Use the general function if we're forced to always allocate.
5365 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5366 // Allocate the raw data for a fixed array.
5367 int size = FixedArray::SizeFor(length);
5368 return size <= kMaxObjectSizeInNewSpace
5369 ? new_space_.AllocateRaw(size)
5370 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5374 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5375 int len = src->length();
5377 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5378 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5380 if (InNewSpace(obj)) {
5381 HeapObject* dst = HeapObject::cast(obj);
5382 dst->set_map_no_write_barrier(map);
5383 CopyBlock(dst->address() + kPointerSize,
5384 src->address() + kPointerSize,
5385 FixedArray::SizeFor(len) - kPointerSize);
5388 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5389 FixedArray* result = FixedArray::cast(obj);
5390 result->set_length(len);
5393 DisallowHeapAllocation no_gc;
5394 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5395 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5400 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5402 int len = src->length();
5404 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5405 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5407 HeapObject* dst = HeapObject::cast(obj);
5408 dst->set_map_no_write_barrier(map);
5410 dst->address() + FixedDoubleArray::kLengthOffset,
5411 src->address() + FixedDoubleArray::kLengthOffset,
5412 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5417 MaybeObject* Heap::AllocateFixedArray(int length) {
5418 ASSERT(length >= 0);
5419 if (length == 0) return empty_fixed_array();
5421 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5422 if (!maybe_result->ToObject(&result)) return maybe_result;
5424 // Initialize header.
5425 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5426 array->set_map_no_write_barrier(fixed_array_map());
5427 array->set_length(length);
5429 ASSERT(!InNewSpace(undefined_value()));
5430 MemsetPointer(array->data_start(), undefined_value(), length);
5435 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5436 if (length < 0 || length > FixedArray::kMaxLength) {
5437 return Failure::OutOfMemoryException(0xe);
5440 AllocationSpace space =
5441 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5442 int size = FixedArray::SizeFor(length);
5443 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5444 // Too big for new space.
5446 } else if (space == OLD_POINTER_SPACE &&
5447 size > Page::kMaxNonCodeHeapObjectSize) {
5448 // Too big for old pointer space.
5452 AllocationSpace retry_space =
5453 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5455 return AllocateRaw(size, space, retry_space);
5459 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5462 PretenureFlag pretenure,
5464 ASSERT(length >= 0);
5465 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5466 if (length == 0) return heap->empty_fixed_array();
5468 ASSERT(!heap->InNewSpace(filler));
5470 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5471 if (!maybe_result->ToObject(&result)) return maybe_result;
5474 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5475 FixedArray* array = FixedArray::cast(result);
5476 array->set_length(length);
5477 MemsetPointer(array->data_start(), filler, length);
5482 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5483 return AllocateFixedArrayWithFiller(this,
5490 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5491 PretenureFlag pretenure) {
5492 return AllocateFixedArrayWithFiller(this,
5499 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5500 if (length == 0) return empty_fixed_array();
5503 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5504 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5507 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5509 FixedArray::cast(obj)->set_length(length);
5514 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5515 int size = FixedDoubleArray::SizeFor(0);
5517 { MaybeObject* maybe_result =
5518 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5519 if (!maybe_result->ToObject(&result)) return maybe_result;
5521 // Initialize the object.
5522 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5523 fixed_double_array_map());
5524 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5529 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5531 PretenureFlag pretenure) {
5532 if (length == 0) return empty_fixed_array();
5534 Object* elements_object;
5535 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5536 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5537 FixedDoubleArray* elements =
5538 reinterpret_cast<FixedDoubleArray*>(elements_object);
5540 elements->set_map_no_write_barrier(fixed_double_array_map());
5541 elements->set_length(length);
5546 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5548 PretenureFlag pretenure) {
5549 if (length == 0) return empty_fixed_array();
5551 Object* elements_object;
5552 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5553 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5554 FixedDoubleArray* elements =
5555 reinterpret_cast<FixedDoubleArray*>(elements_object);
5557 for (int i = 0; i < length; ++i) {
5558 elements->set_the_hole(i);
5561 elements->set_map_no_write_barrier(fixed_double_array_map());
5562 elements->set_length(length);
5567 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5568 PretenureFlag pretenure) {
5569 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5570 return Failure::OutOfMemoryException(0xf);
5573 AllocationSpace space =
5574 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5575 int size = FixedDoubleArray::SizeFor(length);
5577 #ifndef V8_HOST_ARCH_64_BIT
5578 size += kPointerSize;
5581 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5582 // Too big for new space.
5584 } else if (space == OLD_DATA_SPACE &&
5585 size > Page::kMaxNonCodeHeapObjectSize) {
5586 // Too big for old data space.
5590 AllocationSpace retry_space =
5591 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5594 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5595 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5598 return EnsureDoubleAligned(this, object, size);
5602 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5604 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5605 if (!maybe_result->ToObject(&result)) return maybe_result;
5607 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5609 ASSERT(result->IsHashTable());
5614 MaybeObject* Heap::AllocateSymbol() {
5615 // Statically ensure that it is safe to allocate symbols in paged spaces.
5616 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5619 MaybeObject* maybe =
5620 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5621 if (!maybe->ToObject(&result)) return maybe;
5623 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5625 // Generate a random hash value.
5629 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5631 } while (hash == 0 && attempts < 30);
5632 if (hash == 0) hash = 1; // never return 0
5634 Symbol::cast(result)->set_hash_field(
5635 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5636 Symbol::cast(result)->set_name(undefined_value());
5638 ASSERT(result->IsSymbol());
5643 MaybeObject* Heap::AllocateNativeContext() {
5645 { MaybeObject* maybe_result =
5646 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5647 if (!maybe_result->ToObject(&result)) return maybe_result;
5649 Context* context = reinterpret_cast<Context*>(result);
5650 context->set_map_no_write_barrier(native_context_map());
5651 context->set_js_array_maps(undefined_value());
5652 ASSERT(context->IsNativeContext());
5653 ASSERT(result->IsContext());
5658 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5659 ScopeInfo* scope_info) {
5661 { MaybeObject* maybe_result =
5662 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5663 if (!maybe_result->ToObject(&result)) return maybe_result;
5665 Context* context = reinterpret_cast<Context*>(result);
5666 context->set_map_no_write_barrier(global_context_map());
5667 context->set_closure(function);
5668 context->set_previous(function->context());
5669 context->set_extension(scope_info);
5670 context->set_global_object(function->context()->global_object());
5671 ASSERT(context->IsGlobalContext());
5672 ASSERT(result->IsContext());
5677 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5679 { MaybeObject* maybe_result =
5680 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5681 if (!maybe_result->ToObject(&result)) return maybe_result;
5683 Context* context = reinterpret_cast<Context*>(result);
5684 context->set_map_no_write_barrier(module_context_map());
5685 // Instance link will be set later.
5686 context->set_extension(Smi::FromInt(0));
5691 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5692 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5694 { MaybeObject* maybe_result = AllocateFixedArray(length);
5695 if (!maybe_result->ToObject(&result)) return maybe_result;
5697 Context* context = reinterpret_cast<Context*>(result);
5698 context->set_map_no_write_barrier(function_context_map());
5699 context->set_closure(function);
5700 context->set_previous(function->context());
5701 context->set_extension(Smi::FromInt(0));
5702 context->set_global_object(function->context()->global_object());
5707 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5710 Object* thrown_object) {
5711 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5713 { MaybeObject* maybe_result =
5714 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5715 if (!maybe_result->ToObject(&result)) return maybe_result;
5717 Context* context = reinterpret_cast<Context*>(result);
5718 context->set_map_no_write_barrier(catch_context_map());
5719 context->set_closure(function);
5720 context->set_previous(previous);
5721 context->set_extension(name);
5722 context->set_global_object(previous->global_object());
5723 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5728 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5730 JSObject* extension) {
5732 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5733 if (!maybe_result->ToObject(&result)) return maybe_result;
5735 Context* context = reinterpret_cast<Context*>(result);
5736 context->set_map_no_write_barrier(with_context_map());
5737 context->set_closure(function);
5738 context->set_previous(previous);
5739 context->set_extension(extension);
5740 context->set_global_object(previous->global_object());
5745 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5747 ScopeInfo* scope_info) {
5749 { MaybeObject* maybe_result =
5750 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5751 if (!maybe_result->ToObject(&result)) return maybe_result;
5753 Context* context = reinterpret_cast<Context*>(result);
5754 context->set_map_no_write_barrier(block_context_map());
5755 context->set_closure(function);
5756 context->set_previous(previous);
5757 context->set_extension(scope_info);
5758 context->set_global_object(previous->global_object());
5763 MaybeObject* Heap::AllocateScopeInfo(int length) {
5764 FixedArray* scope_info;
5765 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5766 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5767 scope_info->set_map_no_write_barrier(scope_info_map());
5772 MaybeObject* Heap::AllocateExternal(void* value) {
5774 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5775 if (!maybe_result->To(&foreign)) return maybe_result;
5778 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5779 if (!maybe_result->To(&external)) return maybe_result;
5781 external->SetInternalField(0, foreign);
5786 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5789 #define MAKE_CASE(NAME, Name, name) \
5790 case NAME##_TYPE: map = name##_map(); break;
5791 STRUCT_LIST(MAKE_CASE)
5795 return Failure::InternalError();
5797 int size = map->instance_size();
5798 AllocationSpace space =
5799 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5801 { MaybeObject* maybe_result = Allocate(map, space);
5802 if (!maybe_result->ToObject(&result)) return maybe_result;
5804 Struct::cast(result)->InitializeBody(size);
5809 bool Heap::IsHeapIterable() {
5810 return (!old_pointer_space()->was_swept_conservatively() &&
5811 !old_data_space()->was_swept_conservatively());
5815 void Heap::EnsureHeapIsIterable() {
5816 ASSERT(AllowHeapAllocation::IsAllowed());
5817 if (!IsHeapIterable()) {
5818 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5820 ASSERT(IsHeapIterable());
5824 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5825 incremental_marking()->Step(step_size,
5826 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5828 if (incremental_marking()->IsComplete()) {
5829 bool uncommit = false;
5830 if (gc_count_at_last_idle_gc_ == gc_count_) {
5831 // No GC since the last full GC, the mutator is probably not active.
5832 isolate_->compilation_cache()->Clear();
5835 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5836 gc_count_at_last_idle_gc_ = gc_count_;
5838 new_space_.Shrink();
5839 UncommitFromSpace();
5845 bool Heap::IdleNotification(int hint) {
5846 // Hints greater than this value indicate that
5847 // the embedder is requesting a lot of GC work.
5848 const int kMaxHint = 1000;
5849 // Minimal hint that allows to do full GC.
5850 const int kMinHintForFullGC = 100;
5851 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5852 // The size factor is in range [5..250]. The numbers here are chosen from
5853 // experiments. If you changes them, make sure to test with
5854 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5855 intptr_t step_size =
5856 size_factor * IncrementalMarking::kAllocatedThreshold;
5858 if (contexts_disposed_ > 0) {
5859 if (hint >= kMaxHint) {
5860 // The embedder is requesting a lot of GC work after context disposal,
5861 // we age inline caches so that they don't keep objects from
5862 // the old context alive.
5865 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5866 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5867 incremental_marking()->IsStopped()) {
5868 HistogramTimerScope scope(isolate_->counters()->gc_context());
5869 CollectAllGarbage(kReduceMemoryFootprintMask,
5870 "idle notification: contexts disposed");
5872 AdvanceIdleIncrementalMarking(step_size);
5873 contexts_disposed_ = 0;
5875 // After context disposal there is likely a lot of garbage remaining, reset
5876 // the idle notification counters in order to trigger more incremental GCs
5877 // on subsequent idle notifications.
5882 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5883 return IdleGlobalGC();
5886 // By doing small chunks of GC work in each IdleNotification,
5887 // perform a round of incremental GCs and after that wait until
5888 // the mutator creates enough garbage to justify a new round.
5889 // An incremental GC progresses as follows:
5890 // 1. many incremental marking steps,
5891 // 2. one old space mark-sweep-compact,
5892 // 3. many lazy sweep steps.
5893 // Use mark-sweep-compact events to count incremental GCs in a round.
5895 if (incremental_marking()->IsStopped()) {
5896 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5897 !IsSweepingComplete() &&
5898 !AdvanceSweepers(static_cast<int>(step_size))) {
5903 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5904 if (EnoughGarbageSinceLastIdleRound()) {
5911 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5912 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5913 ms_count_at_last_idle_notification_ = ms_count_;
5915 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5916 mark_sweeps_since_idle_round_started_;
5918 if (remaining_mark_sweeps <= 0) {
5923 if (incremental_marking()->IsStopped()) {
5924 // If there are no more than two GCs left in this idle round and we are
5925 // allowed to do a full GC, then make those GCs full in order to compact
5927 // TODO(ulan): Once we enable code compaction for incremental marking,
5928 // we can get rid of this special case and always start incremental marking.
5929 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5930 CollectAllGarbage(kReduceMemoryFootprintMask,
5931 "idle notification: finalize idle round");
5933 incremental_marking()->Start();
5936 if (!incremental_marking()->IsStopped()) {
5937 AdvanceIdleIncrementalMarking(step_size);
5943 bool Heap::IdleGlobalGC() {
5944 static const int kIdlesBeforeScavenge = 4;
5945 static const int kIdlesBeforeMarkSweep = 7;
5946 static const int kIdlesBeforeMarkCompact = 8;
5947 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5948 static const unsigned int kGCsBetweenCleanup = 4;
5950 if (!last_idle_notification_gc_count_init_) {
5951 last_idle_notification_gc_count_ = gc_count_;
5952 last_idle_notification_gc_count_init_ = true;
5955 bool uncommit = true;
5956 bool finished = false;
5958 // Reset the number of idle notifications received when a number of
5959 // GCs have taken place. This allows another round of cleanup based
5960 // on idle notifications if enough work has been carried out to
5961 // provoke a number of garbage collections.
5962 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5963 number_idle_notifications_ =
5964 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5966 number_idle_notifications_ = 0;
5967 last_idle_notification_gc_count_ = gc_count_;
5970 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5971 CollectGarbage(NEW_SPACE, "idle notification");
5972 new_space_.Shrink();
5973 last_idle_notification_gc_count_ = gc_count_;
5974 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5975 // Before doing the mark-sweep collections we clear the
5976 // compilation cache to avoid hanging on to source code and
5977 // generated code for cached functions.
5978 isolate_->compilation_cache()->Clear();
5980 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5981 new_space_.Shrink();
5982 last_idle_notification_gc_count_ = gc_count_;
5984 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5985 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5986 new_space_.Shrink();
5987 last_idle_notification_gc_count_ = gc_count_;
5988 number_idle_notifications_ = 0;
5990 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5991 // If we have received more than kIdlesBeforeMarkCompact idle
5992 // notifications we do not perform any cleanup because we don't
5993 // expect to gain much by doing so.
5997 if (uncommit) UncommitFromSpace();
6005 void Heap::Print() {
6006 if (!HasBeenSetUp()) return;
6007 isolate()->PrintStack(stdout);
6008 AllSpaces spaces(this);
6009 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6015 void Heap::ReportCodeStatistics(const char* title) {
6016 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6017 PagedSpace::ResetCodeStatistics();
6018 // We do not look for code in new space, map space, or old space. If code
6019 // somehow ends up in those spaces, we would miss it here.
6020 code_space_->CollectCodeStatistics();
6021 lo_space_->CollectCodeStatistics();
6022 PagedSpace::ReportCodeStatistics();
6026 // This function expects that NewSpace's allocated objects histogram is
6027 // populated (via a call to CollectStatistics or else as a side effect of a
6028 // just-completed scavenge collection).
6029 void Heap::ReportHeapStatistics(const char* title) {
6031 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6033 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6034 old_generation_allocation_limit_);
6037 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6038 isolate_->global_handles()->PrintStats();
6041 PrintF("Heap statistics : ");
6042 isolate_->memory_allocator()->ReportStatistics();
6043 PrintF("To space : ");
6044 new_space_.ReportStatistics();
6045 PrintF("Old pointer space : ");
6046 old_pointer_space_->ReportStatistics();
6047 PrintF("Old data space : ");
6048 old_data_space_->ReportStatistics();
6049 PrintF("Code space : ");
6050 code_space_->ReportStatistics();
6051 PrintF("Map space : ");
6052 map_space_->ReportStatistics();
6053 PrintF("Cell space : ");
6054 cell_space_->ReportStatistics();
6055 PrintF("Large object space : ");
6056 lo_space_->ReportStatistics();
6057 PrintF(">>>>>> ========================================= >>>>>>\n");
6062 bool Heap::Contains(HeapObject* value) {
6063 return Contains(value->address());
6067 bool Heap::Contains(Address addr) {
6068 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6069 return HasBeenSetUp() &&
6070 (new_space_.ToSpaceContains(addr) ||
6071 old_pointer_space_->Contains(addr) ||
6072 old_data_space_->Contains(addr) ||
6073 code_space_->Contains(addr) ||
6074 map_space_->Contains(addr) ||
6075 cell_space_->Contains(addr) ||
6076 lo_space_->SlowContains(addr));
6080 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6081 return InSpace(value->address(), space);
6085 bool Heap::InSpace(Address addr, AllocationSpace space) {
6086 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6087 if (!HasBeenSetUp()) return false;
6091 return new_space_.ToSpaceContains(addr);
6092 case OLD_POINTER_SPACE:
6093 return old_pointer_space_->Contains(addr);
6094 case OLD_DATA_SPACE:
6095 return old_data_space_->Contains(addr);
6097 return code_space_->Contains(addr);
6099 return map_space_->Contains(addr);
6101 return cell_space_->Contains(addr);
6103 return lo_space_->SlowContains(addr);
6111 void Heap::Verify() {
6112 CHECK(HasBeenSetUp());
6114 store_buffer()->Verify();
6116 VerifyPointersVisitor visitor;
6117 IterateRoots(&visitor, VISIT_ONLY_STRONG);
6119 new_space_.Verify();
6121 old_pointer_space_->Verify(&visitor);
6122 map_space_->Verify(&visitor);
6124 VerifyPointersVisitor no_dirty_regions_visitor;
6125 old_data_space_->Verify(&no_dirty_regions_visitor);
6126 code_space_->Verify(&no_dirty_regions_visitor);
6127 cell_space_->Verify(&no_dirty_regions_visitor);
6129 lo_space_->Verify();
6134 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6135 Object* result = NULL;
6137 { MaybeObject* maybe_new_table =
6138 string_table()->LookupUtf8String(string, &result);
6139 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6141 // Can't use set_string_table because StringTable::cast knows that
6142 // StringTable is a singleton and checks for identity.
6143 roots_[kStringTableRootIndex] = new_table;
6144 ASSERT(result != NULL);
6149 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6150 Object* result = NULL;
6152 { MaybeObject* maybe_new_table =
6153 string_table()->LookupOneByteString(string, &result);
6154 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6156 // Can't use set_string_table because StringTable::cast knows that
6157 // StringTable is a singleton and checks for identity.
6158 roots_[kStringTableRootIndex] = new_table;
6159 ASSERT(result != NULL);
6164 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6167 Object* result = NULL;
6169 { MaybeObject* maybe_new_table =
6170 string_table()->LookupSubStringOneByteString(string,
6174 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6176 // Can't use set_string_table because StringTable::cast knows that
6177 // StringTable is a singleton and checks for identity.
6178 roots_[kStringTableRootIndex] = new_table;
6179 ASSERT(result != NULL);
6184 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6185 Object* result = NULL;
6187 { MaybeObject* maybe_new_table =
6188 string_table()->LookupTwoByteString(string, &result);
6189 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6191 // Can't use set_string_table because StringTable::cast knows that
6192 // StringTable is a singleton and checks for identity.
6193 roots_[kStringTableRootIndex] = new_table;
6194 ASSERT(result != NULL);
6199 MaybeObject* Heap::InternalizeString(String* string) {
6200 if (string->IsInternalizedString()) return string;
6201 Object* result = NULL;
6203 { MaybeObject* maybe_new_table =
6204 string_table()->LookupString(string, &result);
6205 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6207 // Can't use set_string_table because StringTable::cast knows that
6208 // StringTable is a singleton and checks for identity.
6209 roots_[kStringTableRootIndex] = new_table;
6210 ASSERT(result != NULL);
6215 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6216 if (string->IsInternalizedString()) {
6220 return string_table()->LookupStringIfExists(string, result);
6224 void Heap::ZapFromSpace() {
6225 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6226 new_space_.FromSpaceEnd());
6227 while (it.has_next()) {
6228 NewSpacePage* page = it.next();
6229 for (Address cursor = page->area_start(), limit = page->area_end();
6231 cursor += kPointerSize) {
6232 Memory::Address_at(cursor) = kFromSpaceZapValue;
6238 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6240 ObjectSlotCallback callback) {
6241 Address slot_address = start;
6243 // We are not collecting slots on new space objects during mutation
6244 // thus we have to scan for pointers to evacuation candidates when we
6245 // promote objects. But we should not record any slots in non-black
6246 // objects. Grey object's slots would be rescanned.
6247 // White object might not survive until the end of collection
6248 // it would be a violation of the invariant to record it's slots.
6249 bool record_slots = false;
6250 if (incremental_marking()->IsCompacting()) {
6251 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6252 record_slots = Marking::IsBlack(mark_bit);
6255 while (slot_address < end) {
6256 Object** slot = reinterpret_cast<Object**>(slot_address);
6257 Object* object = *slot;
6258 // If the store buffer becomes overfull we mark pages as being exempt from
6259 // the store buffer. These pages are scanned to find pointers that point
6260 // to the new space. In that case we may hit newly promoted objects and
6261 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6262 if (object->IsHeapObject()) {
6263 if (Heap::InFromSpace(object)) {
6264 callback(reinterpret_cast<HeapObject**>(slot),
6265 HeapObject::cast(object));
6266 Object* new_object = *slot;
6267 if (InNewSpace(new_object)) {
6268 SLOW_ASSERT(Heap::InToSpace(new_object));
6269 SLOW_ASSERT(new_object->IsHeapObject());
6270 store_buffer_.EnterDirectlyIntoStoreBuffer(
6271 reinterpret_cast<Address>(slot));
6273 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6274 } else if (record_slots &&
6275 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6276 mark_compact_collector()->RecordSlot(slot, slot, object);
6279 slot_address += kPointerSize;
6285 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6288 bool IsAMapPointerAddress(Object** addr) {
6289 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6290 int mod = a % Map::kSize;
6291 return mod >= Map::kPointerFieldsBeginOffset &&
6292 mod < Map::kPointerFieldsEndOffset;
6296 bool EverythingsAPointer(Object** addr) {
6301 static void CheckStoreBuffer(Heap* heap,
6304 Object**** store_buffer_position,
6305 Object*** store_buffer_top,
6306 CheckStoreBufferFilter filter,
6307 Address special_garbage_start,
6308 Address special_garbage_end) {
6309 Map* free_space_map = heap->free_space_map();
6310 for ( ; current < limit; current++) {
6311 Object* o = *current;
6312 Address current_address = reinterpret_cast<Address>(current);
6314 if (o == free_space_map) {
6315 Address current_address = reinterpret_cast<Address>(current);
6316 FreeSpace* free_space =
6317 FreeSpace::cast(HeapObject::FromAddress(current_address));
6318 int skip = free_space->Size();
6319 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6321 current_address += skip - kPointerSize;
6322 current = reinterpret_cast<Object**>(current_address);
6325 // Skip the current linear allocation space between top and limit which is
6326 // unmarked with the free space map, but can contain junk.
6327 if (current_address == special_garbage_start &&
6328 special_garbage_end != special_garbage_start) {
6329 current_address = special_garbage_end - kPointerSize;
6330 current = reinterpret_cast<Object**>(current_address);
6333 if (!(*filter)(current)) continue;
6334 ASSERT(current_address < special_garbage_start ||
6335 current_address >= special_garbage_end);
6336 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6337 // We have to check that the pointer does not point into new space
6338 // without trying to cast it to a heap object since the hash field of
6339 // a string can contain values like 1 and 3 which are tagged null
6341 if (!heap->InNewSpace(o)) continue;
6342 while (**store_buffer_position < current &&
6343 *store_buffer_position < store_buffer_top) {
6344 (*store_buffer_position)++;
6346 if (**store_buffer_position != current ||
6347 *store_buffer_position == store_buffer_top) {
6348 Object** obj_start = current;
6349 while (!(*obj_start)->IsMap()) obj_start--;
6356 // Check that the store buffer contains all intergenerational pointers by
6357 // scanning a page and ensuring that all pointers to young space are in the
6359 void Heap::OldPointerSpaceCheckStoreBuffer() {
6360 OldSpace* space = old_pointer_space();
6361 PageIterator pages(space);
6363 store_buffer()->SortUniq();
6365 while (pages.has_next()) {
6366 Page* page = pages.next();
6367 Object** current = reinterpret_cast<Object**>(page->area_start());
6369 Address end = page->area_end();
6371 Object*** store_buffer_position = store_buffer()->Start();
6372 Object*** store_buffer_top = store_buffer()->Top();
6374 Object** limit = reinterpret_cast<Object**>(end);
6375 CheckStoreBuffer(this,
6378 &store_buffer_position,
6380 &EverythingsAPointer,
6387 void Heap::MapSpaceCheckStoreBuffer() {
6388 MapSpace* space = map_space();
6389 PageIterator pages(space);
6391 store_buffer()->SortUniq();
6393 while (pages.has_next()) {
6394 Page* page = pages.next();
6395 Object** current = reinterpret_cast<Object**>(page->area_start());
6397 Address end = page->area_end();
6399 Object*** store_buffer_position = store_buffer()->Start();
6400 Object*** store_buffer_top = store_buffer()->Top();
6402 Object** limit = reinterpret_cast<Object**>(end);
6403 CheckStoreBuffer(this,
6406 &store_buffer_position,
6408 &IsAMapPointerAddress,
6415 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6416 LargeObjectIterator it(lo_space());
6417 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6418 // We only have code, sequential strings, or fixed arrays in large
6419 // object space, and only fixed arrays can possibly contain pointers to
6420 // the young generation.
6421 if (object->IsFixedArray()) {
6422 Object*** store_buffer_position = store_buffer()->Start();
6423 Object*** store_buffer_top = store_buffer()->Top();
6424 Object** current = reinterpret_cast<Object**>(object->address());
6426 reinterpret_cast<Object**>(object->address() + object->Size());
6427 CheckStoreBuffer(this,
6430 &store_buffer_position,
6432 &EverythingsAPointer,
6441 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6442 IterateStrongRoots(v, mode);
6443 IterateWeakRoots(v, mode);
6447 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6448 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6449 v->Synchronize(VisitorSynchronization::kStringTable);
6450 if (mode != VISIT_ALL_IN_SCAVENGE &&
6451 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6452 // Scavenge collections have special processing for this.
6453 external_string_table_.Iterate(v);
6454 error_object_list_.Iterate(v);
6456 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6460 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6461 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6462 v->Synchronize(VisitorSynchronization::kStrongRootList);
6464 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6465 v->Synchronize(VisitorSynchronization::kInternalizedString);
6467 isolate_->bootstrapper()->Iterate(v);
6468 v->Synchronize(VisitorSynchronization::kBootstrapper);
6469 isolate_->Iterate(v);
6470 v->Synchronize(VisitorSynchronization::kTop);
6471 Relocatable::Iterate(v);
6472 v->Synchronize(VisitorSynchronization::kRelocatable);
6474 #ifdef ENABLE_DEBUGGER_SUPPORT
6475 isolate_->debug()->Iterate(v);
6476 if (isolate_->deoptimizer_data() != NULL) {
6477 isolate_->deoptimizer_data()->Iterate(v);
6480 v->Synchronize(VisitorSynchronization::kDebug);
6481 isolate_->compilation_cache()->Iterate(v);
6482 v->Synchronize(VisitorSynchronization::kCompilationCache);
6484 // Iterate over local handles in handle scopes.
6485 isolate_->handle_scope_implementer()->Iterate(v);
6486 isolate_->IterateDeferredHandles(v);
6487 v->Synchronize(VisitorSynchronization::kHandleScope);
6489 // Iterate over the builtin code objects and code stubs in the
6490 // heap. Note that it is not necessary to iterate over code objects
6491 // on scavenge collections.
6492 if (mode != VISIT_ALL_IN_SCAVENGE) {
6493 isolate_->builtins()->IterateBuiltins(v);
6495 v->Synchronize(VisitorSynchronization::kBuiltins);
6497 // Iterate over global handles.
6499 case VISIT_ONLY_STRONG:
6500 isolate_->global_handles()->IterateStrongRoots(v);
6502 case VISIT_ALL_IN_SCAVENGE:
6503 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6505 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6507 isolate_->global_handles()->IterateAllRoots(v);
6510 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6512 // Iterate over pointers being held by inactive threads.
6513 isolate_->thread_manager()->Iterate(v);
6514 v->Synchronize(VisitorSynchronization::kThreadManager);
6516 // Iterate over the pointers the Serialization/Deserialization code is
6518 // During garbage collection this keeps the partial snapshot cache alive.
6519 // During deserialization of the startup snapshot this creates the partial
6520 // snapshot cache and deserializes the objects it refers to. During
6521 // serialization this does nothing, since the partial snapshot cache is
6522 // empty. However the next thing we do is create the partial snapshot,
6523 // filling up the partial snapshot cache with objects it needs as we go.
6524 SerializerDeserializer::Iterate(v);
6525 // We don't do a v->Synchronize call here, because in debug mode that will
6526 // output a flag to the snapshot. However at this point the serializer and
6527 // deserializer are deliberately a little unsynchronized (see above) so the
6528 // checking of the sync flag in the snapshot would fail.
6532 // TODO(1236194): Since the heap size is configurable on the command line
6533 // and through the API, we should gracefully handle the case that the heap
6534 // size is not big enough to fit all the initial objects.
6535 bool Heap::ConfigureHeap(int max_semispace_size,
6536 intptr_t max_old_gen_size,
6537 intptr_t max_executable_size) {
6538 if (HasBeenSetUp()) return false;
6540 if (FLAG_stress_compaction) {
6541 // This will cause more frequent GCs when stressing.
6542 max_semispace_size_ = Page::kPageSize;
6545 if (max_semispace_size > 0) {
6546 if (max_semispace_size < Page::kPageSize) {
6547 max_semispace_size = Page::kPageSize;
6548 if (FLAG_trace_gc) {
6549 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6550 Page::kPageSize >> 10);
6553 max_semispace_size_ = max_semispace_size;
6556 if (Snapshot::IsEnabled()) {
6557 // If we are using a snapshot we always reserve the default amount
6558 // of memory for each semispace because code in the snapshot has
6559 // write-barrier code that relies on the size and alignment of new
6560 // space. We therefore cannot use a larger max semispace size
6561 // than the default reserved semispace size.
6562 if (max_semispace_size_ > reserved_semispace_size_) {
6563 max_semispace_size_ = reserved_semispace_size_;
6564 if (FLAG_trace_gc) {
6565 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6566 reserved_semispace_size_ >> 10);
6570 // If we are not using snapshots we reserve space for the actual
6571 // max semispace size.
6572 reserved_semispace_size_ = max_semispace_size_;
6575 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6576 if (max_executable_size > 0) {
6577 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6580 // The max executable size must be less than or equal to the max old
6582 if (max_executable_size_ > max_old_generation_size_) {
6583 max_executable_size_ = max_old_generation_size_;
6586 // The new space size must be a power of two to support single-bit testing
6588 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6589 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6590 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6591 external_allocation_limit_ = 16 * max_semispace_size_;
6593 // The old generation is paged and needs at least one page for each space.
6594 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6595 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6597 RoundUp(max_old_generation_size_,
6605 bool Heap::ConfigureHeapDefault() {
6606 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6607 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6608 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6612 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6613 *stats->start_marker = HeapStats::kStartMarker;
6614 *stats->end_marker = HeapStats::kEndMarker;
6615 *stats->new_space_size = new_space_.SizeAsInt();
6616 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6617 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6618 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6619 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6620 *stats->old_data_space_capacity = old_data_space_->Capacity();
6621 *stats->code_space_size = code_space_->SizeOfObjects();
6622 *stats->code_space_capacity = code_space_->Capacity();
6623 *stats->map_space_size = map_space_->SizeOfObjects();
6624 *stats->map_space_capacity = map_space_->Capacity();
6625 *stats->cell_space_size = cell_space_->SizeOfObjects();
6626 *stats->cell_space_capacity = cell_space_->Capacity();
6627 *stats->lo_space_size = lo_space_->Size();
6628 isolate_->global_handles()->RecordStats(stats);
6629 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6630 *stats->memory_allocator_capacity =
6631 isolate()->memory_allocator()->Size() +
6632 isolate()->memory_allocator()->Available();
6633 *stats->os_error = OS::GetLastError();
6634 isolate()->memory_allocator()->Available();
6635 if (take_snapshot) {
6636 HeapIterator iterator(this);
6637 for (HeapObject* obj = iterator.next();
6639 obj = iterator.next()) {
6640 InstanceType type = obj->map()->instance_type();
6641 ASSERT(0 <= type && type <= LAST_TYPE);
6642 stats->objects_per_type[type]++;
6643 stats->size_per_type[type] += obj->Size();
6649 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6650 return old_pointer_space_->SizeOfObjects()
6651 + old_data_space_->SizeOfObjects()
6652 + code_space_->SizeOfObjects()
6653 + map_space_->SizeOfObjects()
6654 + cell_space_->SizeOfObjects()
6655 + lo_space_->SizeOfObjects();
6659 intptr_t Heap::PromotedExternalMemorySize() {
6660 if (amount_of_external_allocated_memory_
6661 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6662 return amount_of_external_allocated_memory_
6663 - amount_of_external_allocated_memory_at_last_global_gc_;
6667 V8_DECLARE_ONCE(initialize_gc_once);
6669 static void InitializeGCOnce() {
6670 InitializeScavengingVisitorsTables();
6671 NewSpaceScavenger::Initialize();
6672 MarkCompactCollector::Initialize();
6675 bool Heap::SetUp() {
6677 allocation_timeout_ = FLAG_gc_interval;
6680 // Initialize heap spaces and initial maps and objects. Whenever something
6681 // goes wrong, just return false. The caller should check the results and
6682 // call Heap::TearDown() to release allocated memory.
6684 // If the heap is not yet configured (e.g. through the API), configure it.
6685 // Configuration is based on the flags new-space-size (really the semispace
6686 // size) and old-space-size if set or the initial values of semispace_size_
6687 // and old_generation_size_ otherwise.
6689 if (!ConfigureHeapDefault()) return false;
6692 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6694 MarkMapPointersAsEncoded(false);
6696 // Set up memory allocator.
6697 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6700 // Set up new space.
6701 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6705 // Initialize old pointer space.
6706 old_pointer_space_ =
6708 max_old_generation_size_,
6711 if (old_pointer_space_ == NULL) return false;
6712 if (!old_pointer_space_->SetUp()) return false;
6714 // Initialize old data space.
6717 max_old_generation_size_,
6720 if (old_data_space_ == NULL) return false;
6721 if (!old_data_space_->SetUp()) return false;
6723 // Initialize the code space, set its maximum capacity to the old
6724 // generation size. It needs executable memory.
6725 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6726 // virtual address space, so that they can call each other with near calls.
6727 if (code_range_size_ > 0) {
6728 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6734 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6735 if (code_space_ == NULL) return false;
6736 if (!code_space_->SetUp()) return false;
6738 // Initialize map space.
6739 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6740 if (map_space_ == NULL) return false;
6741 if (!map_space_->SetUp()) return false;
6743 // Initialize global property cell space.
6744 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6745 if (cell_space_ == NULL) return false;
6746 if (!cell_space_->SetUp()) return false;
6748 // The large object code space may contain code or data. We set the memory
6749 // to be non-executable here for safety, but this means we need to enable it
6750 // explicitly when allocating large code objects.
6751 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6752 if (lo_space_ == NULL) return false;
6753 if (!lo_space_->SetUp()) return false;
6755 // Set up the seed that is used to randomize the string hash function.
6756 ASSERT(hash_seed() == 0);
6757 if (FLAG_randomize_hashes) {
6758 if (FLAG_hash_seed == 0) {
6760 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6762 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6766 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6767 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6769 store_buffer()->SetUp();
6771 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6773 relocation_mutex_locked_by_optimizer_thread_ = false;
6779 bool Heap::CreateHeapObjects() {
6780 // Create initial maps.
6781 if (!CreateInitialMaps()) return false;
6782 if (!CreateApiObjects()) return false;
6784 // Create initial objects
6785 if (!CreateInitialObjects()) return false;
6787 native_contexts_list_ = undefined_value();
6792 void Heap::SetStackLimits() {
6793 ASSERT(isolate_ != NULL);
6794 ASSERT(isolate_ == isolate());
6795 // On 64 bit machines, pointers are generally out of range of Smis. We write
6796 // something that looks like an out of range Smi to the GC.
6798 // Set up the special root array entries containing the stack limits.
6799 // These are actually addresses, but the tag makes the GC ignore it.
6800 roots_[kStackLimitRootIndex] =
6801 reinterpret_cast<Object*>(
6802 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6803 roots_[kRealStackLimitRootIndex] =
6804 reinterpret_cast<Object*>(
6805 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6809 void Heap::TearDown() {
6811 if (FLAG_verify_heap) {
6816 if (FLAG_print_cumulative_gc_stat) {
6818 PrintF("gc_count=%d ", gc_count_);
6819 PrintF("mark_sweep_count=%d ", ms_count_);
6820 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6821 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6822 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6823 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6824 get_max_alive_after_gc());
6825 PrintF("total_marking_time=%.1f ", marking_time());
6826 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6830 isolate_->global_handles()->TearDown();
6832 external_string_table_.TearDown();
6834 error_object_list_.TearDown();
6836 new_space_.TearDown();
6838 if (old_pointer_space_ != NULL) {
6839 old_pointer_space_->TearDown();
6840 delete old_pointer_space_;
6841 old_pointer_space_ = NULL;
6844 if (old_data_space_ != NULL) {
6845 old_data_space_->TearDown();
6846 delete old_data_space_;
6847 old_data_space_ = NULL;
6850 if (code_space_ != NULL) {
6851 code_space_->TearDown();
6856 if (map_space_ != NULL) {
6857 map_space_->TearDown();
6862 if (cell_space_ != NULL) {
6863 cell_space_->TearDown();
6868 if (lo_space_ != NULL) {
6869 lo_space_->TearDown();
6874 store_buffer()->TearDown();
6875 incremental_marking()->TearDown();
6877 isolate_->memory_allocator()->TearDown();
6879 delete relocation_mutex_;
6883 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6884 ASSERT(callback != NULL);
6885 GCPrologueCallbackPair pair(callback, gc_type);
6886 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6887 return gc_prologue_callbacks_.Add(pair);
6891 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6892 ASSERT(callback != NULL);
6893 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6894 if (gc_prologue_callbacks_[i].callback == callback) {
6895 gc_prologue_callbacks_.Remove(i);
6903 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6904 ASSERT(callback != NULL);
6905 GCEpilogueCallbackPair pair(callback, gc_type);
6906 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6907 return gc_epilogue_callbacks_.Add(pair);
6911 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6912 ASSERT(callback != NULL);
6913 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6914 if (gc_epilogue_callbacks_[i].callback == callback) {
6915 gc_epilogue_callbacks_.Remove(i);
6925 class PrintHandleVisitor: public ObjectVisitor {
6927 void VisitPointers(Object** start, Object** end) {
6928 for (Object** p = start; p < end; p++)
6929 PrintF(" handle %p to %p\n",
6930 reinterpret_cast<void*>(p),
6931 reinterpret_cast<void*>(*p));
6935 void Heap::PrintHandles() {
6936 PrintF("Handles:\n");
6937 PrintHandleVisitor v;
6938 isolate_->handle_scope_implementer()->Iterate(&v);
6944 Space* AllSpaces::next() {
6945 switch (counter_++) {
6947 return heap_->new_space();
6948 case OLD_POINTER_SPACE:
6949 return heap_->old_pointer_space();
6950 case OLD_DATA_SPACE:
6951 return heap_->old_data_space();
6953 return heap_->code_space();
6955 return heap_->map_space();
6957 return heap_->cell_space();
6959 return heap_->lo_space();
6966 PagedSpace* PagedSpaces::next() {
6967 switch (counter_++) {
6968 case OLD_POINTER_SPACE:
6969 return heap_->old_pointer_space();
6970 case OLD_DATA_SPACE:
6971 return heap_->old_data_space();
6973 return heap_->code_space();
6975 return heap_->map_space();
6977 return heap_->cell_space();
6985 OldSpace* OldSpaces::next() {
6986 switch (counter_++) {
6987 case OLD_POINTER_SPACE:
6988 return heap_->old_pointer_space();
6989 case OLD_DATA_SPACE:
6990 return heap_->old_data_space();
6992 return heap_->code_space();
6999 SpaceIterator::SpaceIterator(Heap* heap)
7001 current_space_(FIRST_SPACE),
7007 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7009 current_space_(FIRST_SPACE),
7011 size_func_(size_func) {
7015 SpaceIterator::~SpaceIterator() {
7016 // Delete active iterator if any.
7021 bool SpaceIterator::has_next() {
7022 // Iterate until no more spaces.
7023 return current_space_ != LAST_SPACE;
7027 ObjectIterator* SpaceIterator::next() {
7028 if (iterator_ != NULL) {
7031 // Move to the next space
7033 if (current_space_ > LAST_SPACE) {
7038 // Return iterator for the new current space.
7039 return CreateIterator();
7043 // Create an iterator for the space to iterate.
7044 ObjectIterator* SpaceIterator::CreateIterator() {
7045 ASSERT(iterator_ == NULL);
7047 switch (current_space_) {
7049 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7051 case OLD_POINTER_SPACE:
7053 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7055 case OLD_DATA_SPACE:
7056 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7059 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7062 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7065 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7068 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7072 // Return the newly allocated iterator;
7073 ASSERT(iterator_ != NULL);
7078 class HeapObjectsFilter {
7080 virtual ~HeapObjectsFilter() {}
7081 virtual bool SkipObject(HeapObject* object) = 0;
7085 class UnreachableObjectsFilter : public HeapObjectsFilter {
7087 UnreachableObjectsFilter() {
7088 MarkReachableObjects();
7091 ~UnreachableObjectsFilter() {
7092 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7095 bool SkipObject(HeapObject* object) {
7096 MarkBit mark_bit = Marking::MarkBitFrom(object);
7097 return !mark_bit.Get();
7101 class MarkingVisitor : public ObjectVisitor {
7103 MarkingVisitor() : marking_stack_(10) {}
7105 void VisitPointers(Object** start, Object** end) {
7106 for (Object** p = start; p < end; p++) {
7107 if (!(*p)->IsHeapObject()) continue;
7108 HeapObject* obj = HeapObject::cast(*p);
7109 MarkBit mark_bit = Marking::MarkBitFrom(obj);
7110 if (!mark_bit.Get()) {
7112 marking_stack_.Add(obj);
7117 void TransitiveClosure() {
7118 while (!marking_stack_.is_empty()) {
7119 HeapObject* obj = marking_stack_.RemoveLast();
7125 List<HeapObject*> marking_stack_;
7128 void MarkReachableObjects() {
7129 Heap* heap = Isolate::Current()->heap();
7130 MarkingVisitor visitor;
7131 heap->IterateRoots(&visitor, VISIT_ALL);
7132 visitor.TransitiveClosure();
7135 DisallowHeapAllocation no_allocation_;
7139 HeapIterator::HeapIterator(Heap* heap)
7141 filtering_(HeapIterator::kNoFiltering),
7147 HeapIterator::HeapIterator(Heap* heap,
7148 HeapIterator::HeapObjectsFiltering filtering)
7150 filtering_(filtering),
7156 HeapIterator::~HeapIterator() {
7161 void HeapIterator::Init() {
7162 // Start the iteration.
7163 space_iterator_ = new SpaceIterator(heap_);
7164 switch (filtering_) {
7165 case kFilterUnreachable:
7166 filter_ = new UnreachableObjectsFilter;
7171 object_iterator_ = space_iterator_->next();
7175 void HeapIterator::Shutdown() {
7177 // Assert that in filtering mode we have iterated through all
7178 // objects. Otherwise, heap will be left in an inconsistent state.
7179 if (filtering_ != kNoFiltering) {
7180 ASSERT(object_iterator_ == NULL);
7183 // Make sure the last iterator is deallocated.
7184 delete space_iterator_;
7185 space_iterator_ = NULL;
7186 object_iterator_ = NULL;
7192 HeapObject* HeapIterator::next() {
7193 if (filter_ == NULL) return NextObject();
7195 HeapObject* obj = NextObject();
7196 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7201 HeapObject* HeapIterator::NextObject() {
7202 // No iterator means we are done.
7203 if (object_iterator_ == NULL) return NULL;
7205 if (HeapObject* obj = object_iterator_->next_object()) {
7206 // If the current iterator has more objects we are fine.
7209 // Go though the spaces looking for one that has objects.
7210 while (space_iterator_->has_next()) {
7211 object_iterator_ = space_iterator_->next();
7212 if (HeapObject* obj = object_iterator_->next_object()) {
7217 // Done with the last space.
7218 object_iterator_ = NULL;
7223 void HeapIterator::reset() {
7224 // Restart the iterator.
7232 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7234 class PathTracer::MarkVisitor: public ObjectVisitor {
7236 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7237 void VisitPointers(Object** start, Object** end) {
7238 // Scan all HeapObject pointers in [start, end)
7239 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7240 if ((*p)->IsHeapObject())
7241 tracer_->MarkRecursively(p, this);
7246 PathTracer* tracer_;
7250 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7252 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7253 void VisitPointers(Object** start, Object** end) {
7254 // Scan all HeapObject pointers in [start, end)
7255 for (Object** p = start; p < end; p++) {
7256 if ((*p)->IsHeapObject())
7257 tracer_->UnmarkRecursively(p, this);
7262 PathTracer* tracer_;
7266 void PathTracer::VisitPointers(Object** start, Object** end) {
7267 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7268 // Visit all HeapObject pointers in [start, end)
7269 for (Object** p = start; !done && (p < end); p++) {
7270 if ((*p)->IsHeapObject()) {
7272 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7278 void PathTracer::Reset() {
7279 found_target_ = false;
7280 object_stack_.Clear();
7284 void PathTracer::TracePathFrom(Object** root) {
7285 ASSERT((search_target_ == kAnyGlobalObject) ||
7286 search_target_->IsHeapObject());
7287 found_target_in_trace_ = false;
7290 MarkVisitor mark_visitor(this);
7291 MarkRecursively(root, &mark_visitor);
7293 UnmarkVisitor unmark_visitor(this);
7294 UnmarkRecursively(root, &unmark_visitor);
7300 static bool SafeIsNativeContext(HeapObject* obj) {
7301 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7305 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7306 if (!(*p)->IsHeapObject()) return;
7308 HeapObject* obj = HeapObject::cast(*p);
7310 Object* map = obj->map();
7312 if (!map->IsHeapObject()) return; // visited before
7314 if (found_target_in_trace_) return; // stop if target found
7315 object_stack_.Add(obj);
7316 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7317 (obj == search_target_)) {
7318 found_target_in_trace_ = true;
7319 found_target_ = true;
7323 bool is_native_context = SafeIsNativeContext(obj);
7326 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7328 Address map_addr = map_p->address();
7330 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7332 // Scan the object body.
7333 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7334 // This is specialized to scan Context's properly.
7335 Object** start = reinterpret_cast<Object**>(obj->address() +
7336 Context::kHeaderSize);
7337 Object** end = reinterpret_cast<Object**>(obj->address() +
7338 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7339 mark_visitor->VisitPointers(start, end);
7341 obj->IterateBody(map_p->instance_type(),
7342 obj->SizeFromMap(map_p),
7346 // Scan the map after the body because the body is a lot more interesting
7347 // when doing leak detection.
7348 MarkRecursively(&map, mark_visitor);
7350 if (!found_target_in_trace_) // don't pop if found the target
7351 object_stack_.RemoveLast();
7355 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7356 if (!(*p)->IsHeapObject()) return;
7358 HeapObject* obj = HeapObject::cast(*p);
7360 Object* map = obj->map();
7362 if (map->IsHeapObject()) return; // unmarked already
7364 Address map_addr = reinterpret_cast<Address>(map);
7366 map_addr -= kMarkTag;
7368 ASSERT_TAG_ALIGNED(map_addr);
7370 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7372 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7374 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7376 obj->IterateBody(Map::cast(map_p)->instance_type(),
7377 obj->SizeFromMap(Map::cast(map_p)),
7382 void PathTracer::ProcessResults() {
7383 if (found_target_) {
7384 PrintF("=====================================\n");
7385 PrintF("==== Path to object ====\n");
7386 PrintF("=====================================\n\n");
7388 ASSERT(!object_stack_.is_empty());
7389 for (int i = 0; i < object_stack_.length(); i++) {
7390 if (i > 0) PrintF("\n |\n |\n V\n\n");
7391 Object* obj = object_stack_[i];
7394 PrintF("=====================================\n");
7399 // Triggers a depth-first traversal of reachable objects from one
7400 // given root object and finds a path to a specific heap object and
7402 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7403 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7404 tracer.VisitPointer(&root);
7408 // Triggers a depth-first traversal of reachable objects from roots
7409 // and finds a path to a specific heap object and prints it.
7410 void Heap::TracePathToObject(Object* target) {
7411 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7412 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7416 // Triggers a depth-first traversal of reachable objects from roots
7417 // and finds a path to any global object and prints it. Useful for
7418 // determining the source for leaks of global objects.
7419 void Heap::TracePathToGlobal() {
7420 PathTracer tracer(PathTracer::kAnyGlobalObject,
7421 PathTracer::FIND_ALL,
7423 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7428 static intptr_t CountTotalHolesSize(Heap* heap) {
7429 intptr_t holes_size = 0;
7430 OldSpaces spaces(heap);
7431 for (OldSpace* space = spaces.next();
7433 space = spaces.next()) {
7434 holes_size += space->Waste() + space->Available();
7440 GCTracer::GCTracer(Heap* heap,
7441 const char* gc_reason,
7442 const char* collector_reason)
7444 start_object_size_(0),
7445 start_memory_size_(0),
7448 allocated_since_last_gc_(0),
7449 spent_in_mutator_(0),
7450 promoted_objects_size_(0),
7451 nodes_died_in_new_space_(0),
7452 nodes_copied_in_new_space_(0),
7455 gc_reason_(gc_reason),
7456 collector_reason_(collector_reason) {
7457 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7458 start_time_ = OS::TimeCurrentMillis();
7459 start_object_size_ = heap_->SizeOfObjects();
7460 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7462 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7466 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7468 allocated_since_last_gc_ =
7469 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7471 if (heap_->last_gc_end_timestamp_ > 0) {
7472 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7475 steps_count_ = heap_->incremental_marking()->steps_count();
7476 steps_took_ = heap_->incremental_marking()->steps_took();
7477 longest_step_ = heap_->incremental_marking()->longest_step();
7478 steps_count_since_last_gc_ =
7479 heap_->incremental_marking()->steps_count_since_last_gc();
7480 steps_took_since_last_gc_ =
7481 heap_->incremental_marking()->steps_took_since_last_gc();
7485 GCTracer::~GCTracer() {
7486 // Printf ONE line iff flag is set.
7487 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7489 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7491 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7492 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7494 double time = heap_->last_gc_end_timestamp_ - start_time_;
7496 // Update cumulative GC statistics if required.
7497 if (FLAG_print_cumulative_gc_stat) {
7498 heap_->total_gc_time_ms_ += time;
7499 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7500 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7501 heap_->alive_after_last_gc_);
7503 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7506 } else if (FLAG_trace_gc_verbose) {
7507 heap_->total_gc_time_ms_ += time;
7510 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7512 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7514 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7515 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7517 if (!FLAG_trace_gc_nvp) {
7518 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7520 double end_memory_size_mb =
7521 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7523 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7525 static_cast<double>(start_object_size_) / MB,
7526 static_cast<double>(start_memory_size_) / MB,
7527 SizeOfHeapObjects(),
7528 end_memory_size_mb);
7530 if (external_time > 0) PrintF("%d / ", external_time);
7531 PrintF("%.1f ms", time);
7532 if (steps_count_ > 0) {
7533 if (collector_ == SCAVENGER) {
7534 PrintF(" (+ %.1f ms in %d steps since last GC)",
7535 steps_took_since_last_gc_,
7536 steps_count_since_last_gc_);
7538 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7539 "biggest step %.1f ms)",
7546 if (gc_reason_ != NULL) {
7547 PrintF(" [%s]", gc_reason_);
7550 if (collector_reason_ != NULL) {
7551 PrintF(" [%s]", collector_reason_);
7556 PrintF("pause=%.1f ", time);
7557 PrintF("mutator=%.1f ", spent_in_mutator_);
7559 switch (collector_) {
7563 case MARK_COMPACTOR:
7571 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7572 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7573 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7574 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7575 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7576 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7577 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7578 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7579 PrintF("compaction_ptrs=%.1f ",
7580 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7581 PrintF("intracompaction_ptrs=%.1f ",
7582 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7583 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7584 PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
7585 PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
7587 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7588 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7589 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7590 in_free_list_or_wasted_before_gc_);
7591 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7593 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7594 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7595 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7596 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7597 PrintF("nodes_promoted=%d ", nodes_promoted_);
7599 if (collector_ == SCAVENGER) {
7600 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7601 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7603 PrintF("stepscount=%d ", steps_count_);
7604 PrintF("stepstook=%.1f ", steps_took_);
7605 PrintF("longeststep=%.1f ", longest_step_);
7611 heap_->PrintShortHeapStatistics();
7615 const char* GCTracer::CollectorString() {
7616 switch (collector_) {
7619 case MARK_COMPACTOR:
7620 return "Mark-sweep";
7622 return "Unknown GC";
7626 int KeyedLookupCache::Hash(Map* map, Name* name) {
7627 // Uses only lower 32 bits if pointers are larger.
7628 uintptr_t addr_hash =
7629 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7630 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7634 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7635 int index = (Hash(map, name) & kHashMask);
7636 for (int i = 0; i < kEntriesPerBucket; i++) {
7637 Key& key = keys_[index + i];
7638 if ((key.map == map) && key.name->Equals(name)) {
7639 return field_offsets_[index + i];
7646 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7647 if (!name->IsUniqueName()) {
7648 String* internalized_string;
7649 if (!HEAP->InternalizeStringIfExists(
7650 String::cast(name), &internalized_string)) {
7653 name = internalized_string;
7655 // This cache is cleared only between mark compact passes, so we expect the
7656 // cache to only contain old space names.
7657 ASSERT(!HEAP->InNewSpace(name));
7659 int index = (Hash(map, name) & kHashMask);
7660 // After a GC there will be free slots, so we use them in order (this may
7661 // help to get the most frequently used one in position 0).
7662 for (int i = 0; i< kEntriesPerBucket; i++) {
7663 Key& key = keys_[index];
7664 Object* free_entry_indicator = NULL;
7665 if (key.map == free_entry_indicator) {
7668 field_offsets_[index + i] = field_offset;
7672 // No free entry found in this bucket, so we move them all down one and
7673 // put the new entry at position zero.
7674 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7675 Key& key = keys_[index + i];
7676 Key& key2 = keys_[index + i - 1];
7678 field_offsets_[index + i] = field_offsets_[index + i - 1];
7681 // Write the new first entry.
7682 Key& key = keys_[index];
7685 field_offsets_[index] = field_offset;
7689 void KeyedLookupCache::Clear() {
7690 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7694 void DescriptorLookupCache::Clear() {
7695 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7700 void Heap::GarbageCollectionGreedyCheck() {
7701 ASSERT(FLAG_gc_greedy);
7702 if (isolate_->bootstrapper()->IsActive()) return;
7703 if (disallow_allocation_failure()) return;
7704 CollectGarbage(NEW_SPACE);
7709 TranscendentalCache::SubCache::SubCache(Type t)
7711 isolate_(Isolate::Current()) {
7712 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7713 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7714 for (int i = 0; i < kCacheSize; i++) {
7715 elements_[i].in[0] = in0;
7716 elements_[i].in[1] = in1;
7717 elements_[i].output = NULL;
7722 void TranscendentalCache::Clear() {
7723 for (int i = 0; i < kNumberOfCaches; i++) {
7724 if (caches_[i] != NULL) {
7732 void ExternalStringTable::CleanUp() {
7734 for (int i = 0; i < new_space_strings_.length(); ++i) {
7735 if (new_space_strings_[i] == heap_->the_hole_value()) {
7738 if (heap_->InNewSpace(new_space_strings_[i])) {
7739 new_space_strings_[last++] = new_space_strings_[i];
7741 old_space_strings_.Add(new_space_strings_[i]);
7744 new_space_strings_.Rewind(last);
7745 new_space_strings_.Trim();
7748 for (int i = 0; i < old_space_strings_.length(); ++i) {
7749 if (old_space_strings_[i] == heap_->the_hole_value()) {
7752 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7753 old_space_strings_[last++] = old_space_strings_[i];
7755 old_space_strings_.Rewind(last);
7756 old_space_strings_.Trim();
7758 if (FLAG_verify_heap) {
7765 void ExternalStringTable::TearDown() {
7766 new_space_strings_.Free();
7767 old_space_strings_.Free();
7771 // Update all references.
7772 void ErrorObjectList::UpdateReferences() {
7773 for (int i = 0; i < list_.length(); i++) {
7774 HeapObject* object = HeapObject::cast(list_[i]);
7775 MapWord first_word = object->map_word();
7776 if (first_word.IsForwardingAddress()) {
7777 list_[i] = first_word.ToForwardingAddress();
7783 // Unforwarded objects in new space are dead and removed from the list.
7784 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7785 if (list_.is_empty()) return;
7787 int write_index = 0;
7788 for (int i = 0; i < list_.length(); i++) {
7789 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7790 if (first_word.IsForwardingAddress()) {
7791 list_[write_index++] = first_word.ToForwardingAddress();
7794 list_.Rewind(write_index);
7796 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7797 // objects in the list, just remove dead ones, as to not confuse the
7798 // loop in DeferredFormatStackTrace.
7799 for (int i = 0; i < list_.length(); i++) {
7800 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7801 list_[i] = first_word.IsForwardingAddress()
7802 ? first_word.ToForwardingAddress()
7803 : heap->the_hole_value();
7809 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7810 // If formatting the stack trace causes a GC, this method will be
7811 // recursively called. In that case, skip the recursive call, since
7812 // the loop modifies the list while iterating over it.
7813 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7815 HandleScope scope(isolate);
7816 Handle<String> stack_key = isolate->factory()->stack_string();
7817 int write_index = 0;
7818 int budget = kBudgetPerGC;
7819 for (int i = 0; i < list_.length(); i++) {
7820 Object* object = list_[i];
7821 JSFunction* getter_fun;
7823 { DisallowHeapAllocation no_gc;
7824 // Skip possible holes in the list.
7825 if (object->IsTheHole()) continue;
7826 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7827 list_[write_index++] = object;
7831 // Check whether the stack property is backed by the original getter.
7832 LookupResult lookup(isolate);
7833 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7834 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7835 Object* callback = lookup.GetCallbackObject();
7836 if (!callback->IsAccessorPair()) continue;
7837 Object* getter_obj = AccessorPair::cast(callback)->getter();
7838 if (!getter_obj->IsJSFunction()) continue;
7839 getter_fun = JSFunction::cast(getter_obj);
7840 String* key = isolate->heap()->hidden_stack_trace_string();
7841 Object* value = getter_fun->GetHiddenProperty(key);
7842 if (key != value) continue;
7846 HandleScope scope(isolate);
7847 bool has_exception = false;
7849 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7851 Handle<Object> object_handle(object, isolate);
7852 Handle<Object> getter_handle(getter_fun, isolate);
7853 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7854 ASSERT(*map == HeapObject::cast(*object_handle)->map());
7855 if (has_exception) {
7856 // Hit an exception (most likely a stack overflow).
7857 // Wrap up this pass and retry after another GC.
7858 isolate->clear_pending_exception();
7859 // We use the handle since calling the getter might have caused a GC.
7860 list_[write_index++] = *object_handle;
7864 list_.Rewind(write_index);
7870 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7871 for (int i = 0; i < list_.length(); i++) {
7872 HeapObject* object = HeapObject::cast(list_[i]);
7873 if (!Marking::MarkBitFrom(object).Get()) {
7874 list_[i] = heap->the_hole_value();
7880 void ErrorObjectList::TearDown() {
7885 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7886 chunk->set_next_chunk(chunks_queued_for_free_);
7887 chunks_queued_for_free_ = chunk;
7891 void Heap::FreeQueuedChunks() {
7892 if (chunks_queued_for_free_ == NULL) return;
7895 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7896 next = chunk->next_chunk();
7897 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7899 if (chunk->owner()->identity() == LO_SPACE) {
7900 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7901 // If FromAnyPointerAddress encounters a slot that belongs to a large
7902 // chunk queued for deletion it will fail to find the chunk because
7903 // it try to perform a search in the list of pages owned by of the large
7904 // object space and queued chunks were detached from that list.
7905 // To work around this we split large chunk into normal kPageSize aligned
7906 // pieces and initialize size, owner and flags field of every piece.
7907 // If FromAnyPointerAddress encounters a slot that belongs to one of
7908 // these smaller pieces it will treat it as a slot on a normal Page.
7909 Address chunk_end = chunk->address() + chunk->size();
7910 MemoryChunk* inner = MemoryChunk::FromAddress(
7911 chunk->address() + Page::kPageSize);
7912 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7913 while (inner <= inner_last) {
7914 // Size of a large chunk is always a multiple of
7915 // OS::AllocateAlignment() so there is always
7916 // enough space for a fake MemoryChunk header.
7917 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7918 // Guard against overflow.
7919 if (area_end < inner->address()) area_end = chunk_end;
7920 inner->SetArea(inner->address(), area_end);
7921 inner->set_size(Page::kPageSize);
7922 inner->set_owner(lo_space());
7923 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7924 inner = MemoryChunk::FromAddress(
7925 inner->address() + Page::kPageSize);
7929 isolate_->heap()->store_buffer()->Compact();
7930 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7931 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7932 next = chunk->next_chunk();
7933 isolate_->memory_allocator()->Free(chunk);
7935 chunks_queued_for_free_ = NULL;
7939 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7940 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7941 // Tag the page pointer to make it findable in the dump file.
7943 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7945 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7947 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7948 reinterpret_cast<Address>(p);
7949 remembered_unmapped_pages_index_++;
7950 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7954 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7955 memset(object_counts_, 0, sizeof(object_counts_));
7956 memset(object_sizes_, 0, sizeof(object_sizes_));
7957 if (clear_last_time_stats) {
7958 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7959 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7964 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7967 void Heap::CheckpointObjectStats() {
7968 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7969 Counters* counters = isolate()->counters();
7970 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7971 counters->count_of_##name()->Increment( \
7972 static_cast<int>(object_counts_[name])); \
7973 counters->count_of_##name()->Decrement( \
7974 static_cast<int>(object_counts_last_time_[name])); \
7975 counters->size_of_##name()->Increment( \
7976 static_cast<int>(object_sizes_[name])); \
7977 counters->size_of_##name()->Decrement( \
7978 static_cast<int>(object_sizes_last_time_[name]));
7979 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7980 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7982 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7983 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7984 counters->count_of_CODE_TYPE_##name()->Increment( \
7985 static_cast<int>(object_counts_[index])); \
7986 counters->count_of_CODE_TYPE_##name()->Decrement( \
7987 static_cast<int>(object_counts_last_time_[index])); \
7988 counters->size_of_CODE_TYPE_##name()->Increment( \
7989 static_cast<int>(object_sizes_[index])); \
7990 counters->size_of_CODE_TYPE_##name()->Decrement( \
7991 static_cast<int>(object_sizes_last_time_[index]));
7992 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7993 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7994 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7995 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7996 counters->count_of_FIXED_ARRAY_##name()->Increment( \
7997 static_cast<int>(object_counts_[index])); \
7998 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7999 static_cast<int>(object_counts_last_time_[index])); \
8000 counters->size_of_FIXED_ARRAY_##name()->Increment( \
8001 static_cast<int>(object_sizes_[index])); \
8002 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
8003 static_cast<int>(object_sizes_last_time_[index]));
8004 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8005 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8007 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8008 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8013 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8014 if (FLAG_parallel_recompilation) {
8015 heap_->relocation_mutex_->Lock();
8017 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8018 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8023 } } // namespace v8::internal