1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
109 property_cell_space_(NULL),
111 gc_state_(NOT_IN_GC),
112 gc_post_processing_depth_(0),
115 remembered_unmapped_pages_index_(0),
116 unflattened_strings_length_(0),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
121 new_space_high_promotion_mode_active_(false),
122 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false),
128 store_buffer_rebuilder_(store_buffer()),
129 hidden_string_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
133 total_regexp_code_generated_(0),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 low_survival_rate_period_length_(0),
139 previous_survival_rate_trend_(Heap::STABLE),
140 survival_rate_trend_(Heap::STABLE),
142 total_gc_time_ms_(0.0),
143 max_alive_after_gc_(0),
144 min_in_mutator_(kMaxInt),
145 alive_after_last_gc_(0),
146 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 gc_count_at_last_idle_gc_(0),
157 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158 gcs_since_last_deopt_(0),
160 no_weak_embedded_maps_verification_scope_depth_(0),
162 promotion_queue_(this),
164 chunks_queued_for_free_(NULL),
165 relocation_mutex_(NULL) {
166 // Allow build-time customization of the max semispace size. Building
167 // V8 with snapshots and a non-default max semispace size is much
168 // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 intptr_t max_virtual = OS::MaxVirtualMemory();
175 if (max_virtual > 0) {
176 if (code_range_size_ > 0) {
177 // Reserve no more than 1/8 of the memory for the code range.
178 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183 native_contexts_list_ = NULL;
184 array_buffers_list_ = Smi::FromInt(0);
185 allocation_sites_list_ = Smi::FromInt(0);
186 mark_compact_collector_.heap_ = this;
187 external_string_table_.heap_ = this;
188 // Put a dummy entry in the remembered pages so we can find the list the
189 // minidump even if there are no real unmapped pages.
190 RememberUnmappedPage(NULL, false);
192 ClearObjectStats(true);
196 intptr_t Heap::Capacity() {
197 if (!HasBeenSetUp()) return 0;
199 return new_space_.Capacity() +
200 old_pointer_space_->Capacity() +
201 old_data_space_->Capacity() +
202 code_space_->Capacity() +
203 map_space_->Capacity() +
204 cell_space_->Capacity() +
205 property_cell_space_->Capacity();
209 intptr_t Heap::CommittedMemory() {
210 if (!HasBeenSetUp()) return 0;
212 return new_space_.CommittedMemory() +
213 old_pointer_space_->CommittedMemory() +
214 old_data_space_->CommittedMemory() +
215 code_space_->CommittedMemory() +
216 map_space_->CommittedMemory() +
217 cell_space_->CommittedMemory() +
218 property_cell_space_->CommittedMemory() +
223 size_t Heap::CommittedPhysicalMemory() {
224 if (!HasBeenSetUp()) return 0;
226 return new_space_.CommittedPhysicalMemory() +
227 old_pointer_space_->CommittedPhysicalMemory() +
228 old_data_space_->CommittedPhysicalMemory() +
229 code_space_->CommittedPhysicalMemory() +
230 map_space_->CommittedPhysicalMemory() +
231 cell_space_->CommittedPhysicalMemory() +
232 property_cell_space_->CommittedPhysicalMemory() +
233 lo_space_->CommittedPhysicalMemory();
237 intptr_t Heap::CommittedMemoryExecutable() {
238 if (!HasBeenSetUp()) return 0;
240 return isolate()->memory_allocator()->SizeExecutable();
244 intptr_t Heap::Available() {
245 if (!HasBeenSetUp()) return 0;
247 return new_space_.Available() +
248 old_pointer_space_->Available() +
249 old_data_space_->Available() +
250 code_space_->Available() +
251 map_space_->Available() +
252 cell_space_->Available() +
253 property_cell_space_->Available();
257 bool Heap::HasBeenSetUp() {
258 return old_pointer_space_ != NULL &&
259 old_data_space_ != NULL &&
260 code_space_ != NULL &&
261 map_space_ != NULL &&
262 cell_space_ != NULL &&
263 property_cell_space_ != NULL &&
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269 if (IntrusiveMarking::IsMarked(object)) {
270 return IntrusiveMarking::SizeOfMarkedObject(object);
272 return object->SizeFromMap(object->map());
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277 const char** reason) {
278 // Is global GC requested?
279 if (space != NEW_SPACE) {
280 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281 *reason = "GC in old space requested";
282 return MARK_COMPACTOR;
285 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286 *reason = "GC in old space forced by flags";
287 return MARK_COMPACTOR;
290 // Is enough data promoted to justify a global GC?
291 if (OldGenerationAllocationLimitReached()) {
292 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293 *reason = "promotion limit reached";
294 return MARK_COMPACTOR;
297 // Have allocation in OLD and LO failed?
298 if (old_gen_exhausted_) {
299 isolate_->counters()->
300 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301 *reason = "old generations exhausted";
302 return MARK_COMPACTOR;
305 // Is there enough space left in OLD to guarantee that a scavenge can
308 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309 // for object promotion. It counts only the bytes that the memory
310 // allocator has not yet allocated from the OS and assigned to any space,
311 // and does not count available bytes already in the old space or code
312 // space. Undercounting is safe---we may get an unrequested full GC when
313 // a scavenge would have succeeded.
314 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315 isolate_->counters()->
316 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317 *reason = "scavenge might not succeed";
318 return MARK_COMPACTOR;
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330 // Heap::ReportHeapStatistics will also log NewSpace statistics when
331 // compiled --log-gc is set. The following logic is used to avoid
334 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335 if (FLAG_heap_stats) {
336 ReportHeapStatistics("Before GC");
337 } else if (FLAG_log_gc) {
338 new_space_.ReportStatistics();
340 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
343 new_space_.CollectStatistics();
344 new_space_.ReportStatistics();
345 new_space_.ClearHistograms();
351 void Heap::PrintShortHeapStatistics() {
352 if (!FLAG_trace_gc_verbose) return;
353 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
354 ", available: %6" V8_PTR_PREFIX "d KB\n",
355 isolate_->memory_allocator()->Size() / KB,
356 isolate_->memory_allocator()->Available() / KB);
357 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB"
359 ", committed: %6" V8_PTR_PREFIX "d KB\n",
360 new_space_.Size() / KB,
361 new_space_.Available() / KB,
362 new_space_.CommittedMemory() / KB);
363 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
364 ", available: %6" V8_PTR_PREFIX "d KB"
365 ", committed: %6" V8_PTR_PREFIX "d KB\n",
366 old_pointer_space_->SizeOfObjects() / KB,
367 old_pointer_space_->Available() / KB,
368 old_pointer_space_->CommittedMemory() / KB);
369 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
370 ", available: %6" V8_PTR_PREFIX "d KB"
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 old_data_space_->SizeOfObjects() / KB,
373 old_data_space_->Available() / KB,
374 old_data_space_->CommittedMemory() / KB);
375 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
376 ", available: %6" V8_PTR_PREFIX "d KB"
377 ", committed: %6" V8_PTR_PREFIX "d KB\n",
378 code_space_->SizeOfObjects() / KB,
379 code_space_->Available() / KB,
380 code_space_->CommittedMemory() / KB);
381 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
382 ", available: %6" V8_PTR_PREFIX "d KB"
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 map_space_->SizeOfObjects() / KB,
385 map_space_->Available() / KB,
386 map_space_->CommittedMemory() / KB);
387 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
388 ", available: %6" V8_PTR_PREFIX "d KB"
389 ", committed: %6" V8_PTR_PREFIX "d KB\n",
390 cell_space_->SizeOfObjects() / KB,
391 cell_space_->Available() / KB,
392 cell_space_->CommittedMemory() / KB);
393 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394 ", available: %6" V8_PTR_PREFIX "d KB"
395 ", committed: %6" V8_PTR_PREFIX "d KB\n",
396 property_cell_space_->SizeOfObjects() / KB,
397 property_cell_space_->Available() / KB,
398 property_cell_space_->CommittedMemory() / KB);
399 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400 ", available: %6" V8_PTR_PREFIX "d KB"
401 ", committed: %6" V8_PTR_PREFIX "d KB\n",
402 lo_space_->SizeOfObjects() / KB,
403 lo_space_->Available() / KB,
404 lo_space_->CommittedMemory() / KB);
405 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
406 ", available: %6" V8_PTR_PREFIX "d KB"
407 ", committed: %6" V8_PTR_PREFIX "d KB\n",
408 this->SizeOfObjects() / KB,
409 this->Available() / KB,
410 this->CommittedMemory() / KB);
411 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412 amount_of_external_allocated_memory_ / KB);
413 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420 // Similar to the before GC, we use some complicated logic to ensure that
421 // NewSpace statistics are logged exactly once when --log-gc is turned on.
423 if (FLAG_heap_stats) {
424 new_space_.CollectStatistics();
425 ReportHeapStatistics("After GC");
426 } else if (FLAG_log_gc) {
427 new_space_.ReportStatistics();
430 if (FLAG_log_gc) new_space_.ReportStatistics();
435 void Heap::GarbageCollectionPrologue() {
436 { AllowHeapAllocation for_the_first_part_of_prologue;
437 isolate_->transcendental_cache()->Clear();
438 ClearJSFunctionResultCaches();
440 unflattened_strings_length_ = 0;
442 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443 mark_compact_collector()->EnableCodeFlushing(true);
447 if (FLAG_verify_heap) {
454 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
456 if (FLAG_gc_verbose) Print();
458 ReportStatisticsBeforeGC();
461 store_buffer()->GCPrologue();
465 intptr_t Heap::SizeOfObjects() {
467 AllSpaces spaces(this);
468 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469 total += space->SizeOfObjects();
475 void Heap::RepairFreeListsAfterBoot() {
476 PagedSpaces spaces(this);
477 for (PagedSpace* space = spaces.next();
479 space = spaces.next()) {
480 space->RepairFreeListsAfterBoot();
485 void Heap::GarbageCollectionEpilogue() {
486 store_buffer()->GCEpilogue();
488 // In release mode, we only zap the from space under heap verification.
489 if (Heap::ShouldZapGarbage()) {
494 if (FLAG_verify_heap) {
499 AllowHeapAllocation for_the_rest_of_the_epilogue;
502 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503 if (FLAG_print_handles) PrintHandles();
504 if (FLAG_gc_verbose) Print();
505 if (FLAG_code_stats) ReportCodeStatistics("After GC");
507 if (FLAG_deopt_every_n_garbage_collections > 0) {
508 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509 Deoptimizer::DeoptimizeAll(isolate());
510 gcs_since_last_deopt_ = 0;
514 isolate_->counters()->alive_after_last_gc()->Set(
515 static_cast<int>(SizeOfObjects()));
517 isolate_->counters()->string_table_capacity()->Set(
518 string_table()->Capacity());
519 isolate_->counters()->number_of_symbols()->Set(
520 string_table()->NumberOfElements());
522 if (CommittedMemory() > 0) {
523 isolate_->counters()->external_fragmentation_total()->AddSample(
524 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
526 isolate_->counters()->heap_fraction_map_space()->AddSample(
528 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529 isolate_->counters()->heap_fraction_cell_space()->AddSample(
531 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532 isolate_->counters()->heap_fraction_property_cell_space()->
533 AddSample(static_cast<int>(
534 (property_cell_space()->CommittedMemory() * 100.0) /
537 isolate_->counters()->heap_sample_total_committed()->AddSample(
538 static_cast<int>(CommittedMemory() / KB));
539 isolate_->counters()->heap_sample_total_used()->AddSample(
540 static_cast<int>(SizeOfObjects() / KB));
541 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542 static_cast<int>(map_space()->CommittedMemory() / KB));
543 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544 static_cast<int>(cell_space()->CommittedMemory() / KB));
545 isolate_->counters()->
546 heap_sample_property_cell_space_committed()->
547 AddSample(static_cast<int>(
548 property_cell_space()->CommittedMemory() / KB));
551 #define UPDATE_COUNTERS_FOR_SPACE(space) \
552 isolate_->counters()->space##_bytes_available()->Set( \
553 static_cast<int>(space()->Available())); \
554 isolate_->counters()->space##_bytes_committed()->Set( \
555 static_cast<int>(space()->CommittedMemory())); \
556 isolate_->counters()->space##_bytes_used()->Set( \
557 static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
559 if (space()->CommittedMemory() > 0) { \
560 isolate_->counters()->external_fragmentation_##space()->AddSample( \
561 static_cast<int>(100 - \
562 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
565 UPDATE_COUNTERS_FOR_SPACE(space) \
566 UPDATE_FRAGMENTATION_FOR_SPACE(space)
568 UPDATE_COUNTERS_FOR_SPACE(new_space)
569 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
581 ReportStatisticsAfterGC();
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584 isolate_->debug()->AfterGarbageCollection();
585 #endif // ENABLE_DEBUGGER_SUPPORT
589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590 // Since we are ignoring the return value, the exact choice of space does
591 // not matter, so long as we do not specify NEW_SPACE, which would not
593 mark_compact_collector_.SetFlags(flags);
594 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595 mark_compact_collector_.SetFlags(kNoGCFlags);
599 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600 // Since we are ignoring the return value, the exact choice of space does
601 // not matter, so long as we do not specify NEW_SPACE, which would not
603 // Major GC would invoke weak handle callbacks on weakly reachable
604 // handles, but won't collect weakly reachable objects until next
605 // major GC. Therefore if we collect aggressively and weak handle callback
606 // has been invoked, we rerun major GC to release objects which become
608 // Note: as weak callbacks can execute arbitrary code, we cannot
609 // hope that eventually there will be no weak callbacks invocations.
610 // Therefore stop recollecting after several attempts.
611 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612 kReduceMemoryFootprintMask);
613 isolate_->compilation_cache()->Clear();
614 const int kMaxNumberOfAttempts = 7;
615 const int kMinNumberOfAttempts = 2;
616 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618 attempt + 1 >= kMinNumberOfAttempts) {
622 mark_compact_collector()->SetFlags(kNoGCFlags);
625 incremental_marking()->UncommitMarkingDeque();
629 bool Heap::CollectGarbage(AllocationSpace space,
630 GarbageCollector collector,
631 const char* gc_reason,
632 const char* collector_reason) {
633 // The VM is in the GC state until exiting this function.
634 VMState<GC> state(isolate_);
637 // Reset the allocation timeout to the GC interval, but make sure to
638 // allow at least a few allocations after a collection. The reason
639 // for this is that we have a lot of allocation sequences and we
640 // assume that a garbage collection will allow the subsequent
641 // allocation attempts to go through.
642 allocation_timeout_ = Max(6, FLAG_gc_interval);
645 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646 if (FLAG_trace_incremental_marking) {
647 PrintF("[IncrementalMarking] Scavenge during marking.\n");
651 if (collector == MARK_COMPACTOR &&
652 !mark_compact_collector()->abort_incremental_marking() &&
653 !incremental_marking()->IsStopped() &&
654 !incremental_marking()->should_hurry() &&
655 FLAG_incremental_marking_steps) {
656 // Make progress in incremental marking.
657 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660 if (!incremental_marking()->IsComplete()) {
661 if (FLAG_trace_incremental_marking) {
662 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
664 collector = SCAVENGER;
665 collector_reason = "incremental marking delaying mark-sweep";
669 bool next_gc_likely_to_collect_more = false;
671 { GCTracer tracer(this, gc_reason, collector_reason);
672 ASSERT(AllowHeapAllocation::IsAllowed());
673 DisallowHeapAllocation no_allocation_during_gc;
674 GarbageCollectionPrologue();
675 // The GC count was incremented in the prologue. Tell the tracer about
677 tracer.set_gc_count(gc_count_);
679 // Tell the tracer which collector we've selected.
680 tracer.set_collector(collector);
683 HistogramTimerScope histogram_timer_scope(
684 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685 : isolate_->counters()->gc_compactor());
686 next_gc_likely_to_collect_more =
687 PerformGarbageCollection(collector, &tracer);
690 GarbageCollectionEpilogue();
693 // Start incremental marking for the next cycle. The heap snapshot
694 // generator needs incremental marking to stay off after it aborted.
695 if (!mark_compact_collector()->abort_incremental_marking() &&
696 incremental_marking()->IsStopped() &&
697 incremental_marking()->WorthActivating() &&
698 NextGCIsLikelyToBeFull()) {
699 incremental_marking()->Start();
702 return next_gc_likely_to_collect_more;
706 int Heap::NotifyContextDisposed() {
707 if (FLAG_parallel_recompilation) {
708 // Flush the queued recompilation tasks.
709 isolate()->optimizing_compiler_thread()->Flush();
711 flush_monomorphic_ics_ = true;
712 return ++contexts_disposed_;
716 void Heap::PerformScavenge() {
717 GCTracer tracer(this, NULL, NULL);
718 if (incremental_marking()->IsStopped()) {
719 PerformGarbageCollection(SCAVENGER, &tracer);
721 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
726 void Heap::MoveElements(FixedArray* array,
730 if (len == 0) return;
732 ASSERT(array->map() != HEAP->fixed_cow_array_map());
733 Object** dst_objects = array->data_start() + dst_index;
734 OS::MemMove(dst_objects,
735 array->data_start() + src_index,
737 if (!InNewSpace(array)) {
738 for (int i = 0; i < len; i++) {
739 // TODO(hpayer): check store buffer for entries
740 if (InNewSpace(dst_objects[i])) {
741 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
745 incremental_marking()->RecordWrites(array);
750 // Helper class for verifying the string table.
751 class StringTableVerifier : public ObjectVisitor {
753 void VisitPointers(Object** start, Object** end) {
754 // Visit all HeapObject pointers in [start, end).
755 for (Object** p = start; p < end; p++) {
756 if ((*p)->IsHeapObject()) {
757 // Check that the string is actually internalized.
758 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
759 (*p)->IsInternalizedString());
766 static void VerifyStringTable() {
767 StringTableVerifier verifier;
768 HEAP->string_table()->IterateElements(&verifier);
770 #endif // VERIFY_HEAP
773 static bool AbortIncrementalMarkingAndCollectGarbage(
775 AllocationSpace space,
776 const char* gc_reason = NULL) {
777 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
778 bool result = heap->CollectGarbage(space, gc_reason);
779 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
784 void Heap::ReserveSpace(
786 Address *locations_out) {
787 bool gc_performed = true;
789 static const int kThreshold = 20;
790 while (gc_performed && counter++ < kThreshold) {
791 gc_performed = false;
792 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
793 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
794 if (sizes[space] != 0) {
795 MaybeObject* allocation;
796 if (space == NEW_SPACE) {
797 allocation = new_space()->AllocateRaw(sizes[space]);
799 allocation = paged_space(space)->AllocateRaw(sizes[space]);
802 if (!allocation->To<FreeListNode>(&node)) {
803 if (space == NEW_SPACE) {
804 Heap::CollectGarbage(NEW_SPACE,
805 "failed to reserve space in the new space");
807 AbortIncrementalMarkingAndCollectGarbage(
809 static_cast<AllocationSpace>(space),
810 "failed to reserve space in paged space");
815 // Mark with a free list node, in case we have a GC before
817 node->set_size(this, sizes[space]);
818 locations_out[space] = node->address();
825 // Failed to reserve the space after several attempts.
826 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
831 void Heap::EnsureFromSpaceIsCommitted() {
832 if (new_space_.CommitFromSpaceIfNeeded()) return;
834 // Committing memory to from space failed.
835 // Memory is exhausted and we will die.
836 V8::FatalProcessOutOfMemory("Committing semi space failed.");
840 void Heap::ClearJSFunctionResultCaches() {
841 if (isolate_->bootstrapper()->IsActive()) return;
843 Object* context = native_contexts_list_;
844 while (!context->IsUndefined()) {
845 // Get the caches for this context. GC can happen when the context
846 // is not fully initialized, so the caches can be undefined.
847 Object* caches_or_undefined =
848 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
849 if (!caches_or_undefined->IsUndefined()) {
850 FixedArray* caches = FixedArray::cast(caches_or_undefined);
852 int length = caches->length();
853 for (int i = 0; i < length; i++) {
854 JSFunctionResultCache::cast(caches->get(i))->Clear();
857 // Get the next context:
858 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
863 void Heap::ClearNormalizedMapCaches() {
864 if (isolate_->bootstrapper()->IsActive() &&
865 !incremental_marking()->IsMarking()) {
869 Object* context = native_contexts_list_;
870 while (!context->IsUndefined()) {
871 // GC can happen when the context is not fully initialized,
872 // so the cache can be undefined.
874 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
875 if (!cache->IsUndefined()) {
876 NormalizedMapCache::cast(cache)->Clear();
878 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
883 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
884 double survival_rate =
885 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
886 start_new_space_size;
888 if (survival_rate > kYoungSurvivalRateHighThreshold) {
889 high_survival_rate_period_length_++;
891 high_survival_rate_period_length_ = 0;
894 if (survival_rate < kYoungSurvivalRateLowThreshold) {
895 low_survival_rate_period_length_++;
897 low_survival_rate_period_length_ = 0;
900 double survival_rate_diff = survival_rate_ - survival_rate;
902 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
903 set_survival_rate_trend(DECREASING);
904 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
905 set_survival_rate_trend(INCREASING);
907 set_survival_rate_trend(STABLE);
910 survival_rate_ = survival_rate;
913 bool Heap::PerformGarbageCollection(GarbageCollector collector,
915 bool next_gc_likely_to_collect_more = false;
917 if (collector != SCAVENGER) {
918 PROFILE(isolate_, CodeMovingGCEvent());
922 if (FLAG_verify_heap) {
928 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
931 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
932 VMState<EXTERNAL> state(isolate_);
933 HandleScope handle_scope(isolate_);
934 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
937 EnsureFromSpaceIsCommitted();
939 int start_new_space_size = Heap::new_space()->SizeAsInt();
941 if (IsHighSurvivalRate()) {
942 // We speed up the incremental marker if it is running so that it
943 // does not fall behind the rate of promotion, which would cause a
944 // constantly growing old space.
945 incremental_marking()->NotifyOfHighPromotionRate();
948 if (collector == MARK_COMPACTOR) {
949 // Perform mark-sweep with optional compaction.
953 UpdateSurvivalRateTrend(start_new_space_size);
955 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
957 old_generation_allocation_limit_ =
958 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
960 old_gen_exhausted_ = false;
966 UpdateSurvivalRateTrend(start_new_space_size);
969 if (!new_space_high_promotion_mode_active_ &&
970 new_space_.Capacity() == new_space_.MaximumCapacity() &&
971 IsStableOrIncreasingSurvivalTrend() &&
972 IsHighSurvivalRate()) {
973 // Stable high survival rates even though young generation is at
974 // maximum capacity indicates that most objects will be promoted.
975 // To decrease scavenger pauses and final mark-sweep pauses, we
976 // have to limit maximal capacity of the young generation.
977 SetNewSpaceHighPromotionModeActive(true);
979 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
980 new_space_.InitialCapacity() / MB);
982 // Support for global pre-tenuring uses the high promotion mode as a
983 // heuristic indicator of whether to pretenure or not, we trigger
984 // deoptimization here to take advantage of pre-tenuring as soon as
986 if (FLAG_pretenuring) {
987 isolate_->stack_guard()->FullDeopt();
989 } else if (new_space_high_promotion_mode_active_ &&
990 IsStableOrDecreasingSurvivalTrend() &&
991 IsLowSurvivalRate()) {
992 // Decreasing low survival rates might indicate that the above high
993 // promotion mode is over and we should allow the young generation
995 SetNewSpaceHighPromotionModeActive(false);
997 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
998 new_space_.MaximumCapacity() / MB);
1000 // Trigger deoptimization here to turn off pre-tenuring as soon as
1002 if (FLAG_pretenuring) {
1003 isolate_->stack_guard()->FullDeopt();
1007 if (new_space_high_promotion_mode_active_ &&
1008 new_space_.Capacity() > new_space_.InitialCapacity()) {
1009 new_space_.Shrink();
1012 isolate_->counters()->objs_since_last_young()->Set(0);
1014 // Callbacks that fire after this point might trigger nested GCs and
1015 // restart incremental marking, the assertion can't be moved down.
1016 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1018 gc_post_processing_depth_++;
1019 { AllowHeapAllocation allow_allocation;
1020 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1021 next_gc_likely_to_collect_more =
1022 isolate_->global_handles()->PostGarbageCollectionProcessing(
1025 gc_post_processing_depth_--;
1027 // Update relocatables.
1028 Relocatable::PostGarbageCollectionProcessing();
1030 if (collector == MARK_COMPACTOR) {
1031 // Register the amount of external allocated memory.
1032 amount_of_external_allocated_memory_at_last_global_gc_ =
1033 amount_of_external_allocated_memory_;
1037 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1038 VMState<EXTERNAL> state(isolate_);
1039 HandleScope handle_scope(isolate_);
1040 CallGCEpilogueCallbacks(gc_type);
1044 if (FLAG_verify_heap) {
1045 VerifyStringTable();
1049 return next_gc_likely_to_collect_more;
1053 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1054 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1055 global_gc_prologue_callback_();
1057 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1058 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1059 gc_prologue_callbacks_[i].callback(gc_type, flags);
1065 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1066 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1067 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1068 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1071 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1072 global_gc_epilogue_callback_();
1077 void Heap::MarkCompact(GCTracer* tracer) {
1078 gc_state_ = MARK_COMPACT;
1079 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1081 mark_compact_collector_.Prepare(tracer);
1084 tracer->set_full_gc_count(ms_count_);
1086 MarkCompactPrologue();
1088 mark_compact_collector_.CollectGarbage();
1090 LOG(isolate_, ResourceEvent("markcompact", "end"));
1092 gc_state_ = NOT_IN_GC;
1094 isolate_->counters()->objs_since_last_full()->Set(0);
1096 contexts_disposed_ = 0;
1098 flush_monomorphic_ics_ = false;
1102 void Heap::MarkCompactPrologue() {
1103 // At any old GC clear the keyed lookup cache to enable collection of unused
1105 isolate_->keyed_lookup_cache()->Clear();
1106 isolate_->context_slot_cache()->Clear();
1107 isolate_->descriptor_lookup_cache()->Clear();
1108 RegExpResultsCache::Clear(string_split_cache());
1109 RegExpResultsCache::Clear(regexp_multiple_cache());
1111 isolate_->compilation_cache()->MarkCompactPrologue();
1113 CompletelyClearInstanceofCache();
1115 FlushNumberStringCache();
1116 if (FLAG_cleanup_code_caches_at_gc) {
1117 polymorphic_code_cache()->set_cache(undefined_value());
1120 ClearNormalizedMapCaches();
1124 // Helper class for copying HeapObjects
1125 class ScavengeVisitor: public ObjectVisitor {
1127 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1129 void VisitPointer(Object** p) { ScavengePointer(p); }
1131 void VisitPointers(Object** start, Object** end) {
1132 // Copy all HeapObject pointers in [start, end)
1133 for (Object** p = start; p < end; p++) ScavengePointer(p);
1137 void ScavengePointer(Object** p) {
1138 Object* object = *p;
1139 if (!heap_->InNewSpace(object)) return;
1140 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1141 reinterpret_cast<HeapObject*>(object));
1149 // Visitor class to verify pointers in code or data space do not point into
1151 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1153 void VisitPointers(Object** start, Object**end) {
1154 for (Object** current = start; current < end; current++) {
1155 if ((*current)->IsHeapObject()) {
1156 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1163 static void VerifyNonPointerSpacePointers() {
1164 // Verify that there are no pointers to new space in spaces where we
1165 // do not expect them.
1166 VerifyNonPointerSpacePointersVisitor v;
1167 HeapObjectIterator code_it(HEAP->code_space());
1168 for (HeapObject* object = code_it.Next();
1169 object != NULL; object = code_it.Next())
1170 object->Iterate(&v);
1172 // The old data space was normally swept conservatively so that the iterator
1173 // doesn't work, so we normally skip the next bit.
1174 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1175 HeapObjectIterator data_it(HEAP->old_data_space());
1176 for (HeapObject* object = data_it.Next();
1177 object != NULL; object = data_it.Next())
1178 object->Iterate(&v);
1181 #endif // VERIFY_HEAP
1184 void Heap::CheckNewSpaceExpansionCriteria() {
1185 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1186 survived_since_last_expansion_ > new_space_.Capacity() &&
1187 !new_space_high_promotion_mode_active_) {
1188 // Grow the size of new space if there is room to grow, enough data
1189 // has survived scavenge since the last expansion and we are not in
1190 // high promotion mode.
1192 survived_since_last_expansion_ = 0;
1197 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1198 return heap->InNewSpace(*p) &&
1199 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1203 void Heap::ScavengeStoreBufferCallback(
1206 StoreBufferEvent event) {
1207 heap->store_buffer_rebuilder_.Callback(page, event);
1211 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1212 if (event == kStoreBufferStartScanningPagesEvent) {
1213 start_of_current_page_ = NULL;
1214 current_page_ = NULL;
1215 } else if (event == kStoreBufferScanningPageEvent) {
1216 if (current_page_ != NULL) {
1217 // If this page already overflowed the store buffer during this iteration.
1218 if (current_page_->scan_on_scavenge()) {
1219 // Then we should wipe out the entries that have been added for it.
1220 store_buffer_->SetTop(start_of_current_page_);
1221 } else if (store_buffer_->Top() - start_of_current_page_ >=
1222 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1223 // Did we find too many pointers in the previous page? The heuristic is
1224 // that no page can take more then 1/5 the remaining slots in the store
1226 current_page_->set_scan_on_scavenge(true);
1227 store_buffer_->SetTop(start_of_current_page_);
1229 // In this case the page we scanned took a reasonable number of slots in
1230 // the store buffer. It has now been rehabilitated and is no longer
1231 // marked scan_on_scavenge.
1232 ASSERT(!current_page_->scan_on_scavenge());
1235 start_of_current_page_ = store_buffer_->Top();
1236 current_page_ = page;
1237 } else if (event == kStoreBufferFullEvent) {
1238 // The current page overflowed the store buffer again. Wipe out its entries
1239 // in the store buffer and mark it scan-on-scavenge again. This may happen
1240 // several times while scanning.
1241 if (current_page_ == NULL) {
1242 // Store Buffer overflowed while scanning promoted objects. These are not
1243 // in any particular page, though they are likely to be clustered by the
1244 // allocation routines.
1245 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1247 // Store Buffer overflowed while scanning a particular old space page for
1248 // pointers to new space.
1249 ASSERT(current_page_ == page);
1250 ASSERT(page != NULL);
1251 current_page_->set_scan_on_scavenge(true);
1252 ASSERT(start_of_current_page_ != store_buffer_->Top());
1253 store_buffer_->SetTop(start_of_current_page_);
1261 void PromotionQueue::Initialize() {
1262 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1263 // entries (where each is a pair of intptr_t). This allows us to simplify
1264 // the test fpr when to switch pages.
1265 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1267 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1269 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1270 emergency_stack_ = NULL;
1275 void PromotionQueue::RelocateQueueHead() {
1276 ASSERT(emergency_stack_ == NULL);
1278 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1279 intptr_t* head_start = rear_;
1280 intptr_t* head_end =
1281 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1284 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1286 emergency_stack_ = new List<Entry>(2 * entries_count);
1288 while (head_start != head_end) {
1289 int size = static_cast<int>(*(head_start++));
1290 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1291 emergency_stack_->Add(Entry(obj, size));
1297 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1299 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1301 virtual Object* RetainAs(Object* object) {
1302 if (!heap_->InFromSpace(object)) {
1306 MapWord map_word = HeapObject::cast(object)->map_word();
1307 if (map_word.IsForwardingAddress()) {
1308 return map_word.ToForwardingAddress();
1318 void Heap::Scavenge() {
1319 RelocationLock relocation_lock(this);
1322 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1325 gc_state_ = SCAVENGE;
1327 // Implements Cheney's copying algorithm
1328 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1330 // Clear descriptor cache.
1331 isolate_->descriptor_lookup_cache()->Clear();
1333 // Used for updating survived_since_last_expansion_ at function end.
1334 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1336 CheckNewSpaceExpansionCriteria();
1338 SelectScavengingVisitorsTable();
1340 incremental_marking()->PrepareForScavenge();
1342 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1343 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1345 // Flip the semispaces. After flipping, to space is empty, from space has
1348 new_space_.ResetAllocationInfo();
1350 // We need to sweep newly copied objects which can be either in the
1351 // to space or promoted to the old generation. For to-space
1352 // objects, we treat the bottom of the to space as a queue. Newly
1353 // copied and unswept objects lie between a 'front' mark and the
1354 // allocation pointer.
1356 // Promoted objects can go into various old-generation spaces, and
1357 // can be allocated internally in the spaces (from the free list).
1358 // We treat the top of the to space as a queue of addresses of
1359 // promoted objects. The addresses of newly promoted and unswept
1360 // objects lie between a 'front' mark and a 'rear' mark that is
1361 // updated as a side effect of promoting an object.
1363 // There is guaranteed to be enough room at the top of the to space
1364 // for the addresses of promoted objects: every object promoted
1365 // frees up its size in bytes from the top of the new space, and
1366 // objects are at least one pointer in size.
1367 Address new_space_front = new_space_.ToSpaceStart();
1368 promotion_queue_.Initialize();
1371 store_buffer()->Clean();
1374 ScavengeVisitor scavenge_visitor(this);
1376 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1378 // Copy objects reachable from the old generation.
1380 StoreBufferRebuildScope scope(this,
1382 &ScavengeStoreBufferCallback);
1383 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1386 // Copy objects reachable from simple cells by scavenging cell values
1388 HeapObjectIterator cell_iterator(cell_space_);
1389 for (HeapObject* heap_object = cell_iterator.Next();
1390 heap_object != NULL;
1391 heap_object = cell_iterator.Next()) {
1392 if (heap_object->IsCell()) {
1393 Cell* cell = Cell::cast(heap_object);
1394 Address value_address = cell->ValueAddress();
1395 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1399 // Copy objects reachable from global property cells by scavenging global
1400 // property cell values directly.
1401 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1402 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1403 heap_object != NULL;
1404 heap_object = js_global_property_cell_iterator.Next()) {
1405 if (heap_object->IsPropertyCell()) {
1406 PropertyCell* cell = PropertyCell::cast(heap_object);
1407 Address value_address = cell->ValueAddress();
1408 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1409 Address type_address = cell->TypeAddress();
1410 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1414 // Copy objects reachable from the code flushing candidates list.
1415 MarkCompactCollector* collector = mark_compact_collector();
1416 if (collector->is_code_flushing_enabled()) {
1417 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1420 // Scavenge object reachable from the native contexts list directly.
1421 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1423 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1425 while (isolate()->global_handles()->IterateObjectGroups(
1426 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1427 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1429 isolate()->global_handles()->RemoveObjectGroups();
1430 isolate()->global_handles()->RemoveImplicitRefGroups();
1432 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1433 &IsUnscavengedHeapObject);
1434 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1436 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1438 UpdateNewSpaceReferencesInExternalStringTable(
1439 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1441 promotion_queue_.Destroy();
1443 if (!FLAG_watch_ic_patching) {
1444 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1446 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1448 ScavengeWeakObjectRetainer weak_object_retainer(this);
1449 ProcessWeakReferences(&weak_object_retainer);
1451 ASSERT(new_space_front == new_space_.top());
1454 new_space_.set_age_mark(new_space_.top());
1456 new_space_.LowerInlineAllocationLimit(
1457 new_space_.inline_allocation_limit_step());
1459 // Update how much has survived scavenge.
1460 IncrementYoungSurvivorsCounter(static_cast<int>(
1461 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1463 LOG(isolate_, ResourceEvent("scavenge", "end"));
1465 gc_state_ = NOT_IN_GC;
1467 scavenges_since_last_idle_round_++;
1471 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1473 MapWord first_word = HeapObject::cast(*p)->map_word();
1475 if (!first_word.IsForwardingAddress()) {
1476 // Unreachable external string can be finalized.
1477 heap->FinalizeExternalString(String::cast(*p));
1481 // String is still reachable.
1482 return String::cast(first_word.ToForwardingAddress());
1486 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1487 ExternalStringTableUpdaterCallback updater_func) {
1489 if (FLAG_verify_heap) {
1490 external_string_table_.Verify();
1494 if (external_string_table_.new_space_strings_.is_empty()) return;
1496 Object** start = &external_string_table_.new_space_strings_[0];
1497 Object** end = start + external_string_table_.new_space_strings_.length();
1498 Object** last = start;
1500 for (Object** p = start; p < end; ++p) {
1501 ASSERT(InFromSpace(*p));
1502 String* target = updater_func(this, p);
1504 if (target == NULL) continue;
1506 ASSERT(target->IsExternalString());
1508 if (InNewSpace(target)) {
1509 // String is still in new space. Update the table entry.
1513 // String got promoted. Move it to the old string list.
1514 external_string_table_.AddOldString(target);
1518 ASSERT(last <= end);
1519 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1523 void Heap::UpdateReferencesInExternalStringTable(
1524 ExternalStringTableUpdaterCallback updater_func) {
1526 // Update old space string references.
1527 if (external_string_table_.old_space_strings_.length() > 0) {
1528 Object** start = &external_string_table_.old_space_strings_[0];
1529 Object** end = start + external_string_table_.old_space_strings_.length();
1530 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1533 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1538 struct WeakListVisitor;
1542 static Object* VisitWeakList(Heap* heap,
1544 WeakObjectRetainer* retainer,
1545 bool record_slots) {
1546 Object* undefined = heap->undefined_value();
1547 Object* head = undefined;
1549 MarkCompactCollector* collector = heap->mark_compact_collector();
1550 while (list != undefined) {
1551 // Check whether to keep the candidate in the list.
1552 T* candidate = reinterpret_cast<T*>(list);
1553 Object* retained = retainer->RetainAs(list);
1554 if (retained != NULL) {
1555 if (head == undefined) {
1556 // First element in the list.
1559 // Subsequent elements in the list.
1560 ASSERT(tail != NULL);
1561 WeakListVisitor<T>::SetWeakNext(tail, retained);
1563 Object** next_slot =
1564 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1565 collector->RecordSlot(next_slot, next_slot, retained);
1568 // Retained object is new tail.
1569 ASSERT(!retained->IsUndefined());
1570 candidate = reinterpret_cast<T*>(retained);
1574 // tail is a live object, visit it.
1575 WeakListVisitor<T>::VisitLiveObject(
1576 heap, tail, retainer, record_slots);
1578 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1581 // Move to next element in the list.
1582 list = WeakListVisitor<T>::WeakNext(candidate);
1585 // Terminate the list if there is one or more elements.
1587 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1594 struct WeakListVisitor<JSFunction> {
1595 static void SetWeakNext(JSFunction* function, Object* next) {
1596 function->set_next_function_link(next);
1599 static Object* WeakNext(JSFunction* function) {
1600 return function->next_function_link();
1603 static int WeakNextOffset() {
1604 return JSFunction::kNextFunctionLinkOffset;
1607 static void VisitLiveObject(Heap*, JSFunction*,
1608 WeakObjectRetainer*, bool) {
1611 static void VisitPhantomObject(Heap*, JSFunction*) {
1617 struct WeakListVisitor<Context> {
1618 static void SetWeakNext(Context* context, Object* next) {
1619 context->set(Context::NEXT_CONTEXT_LINK,
1621 UPDATE_WRITE_BARRIER);
1624 static Object* WeakNext(Context* context) {
1625 return context->get(Context::NEXT_CONTEXT_LINK);
1628 static void VisitLiveObject(Heap* heap,
1630 WeakObjectRetainer* retainer,
1631 bool record_slots) {
1632 // Process the weak list of optimized functions for the context.
1633 Object* function_list_head =
1634 VisitWeakList<JSFunction>(
1636 context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1639 context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1641 UPDATE_WRITE_BARRIER);
1643 Object** optimized_functions =
1644 HeapObject::RawField(
1645 context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1646 heap->mark_compact_collector()->RecordSlot(
1647 optimized_functions, optimized_functions, function_list_head);
1651 static void VisitPhantomObject(Heap*, Context*) {
1654 static int WeakNextOffset() {
1655 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1660 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1661 // We don't record weak slots during marking or scavenges.
1662 // Instead we do it once when we complete mark-compact cycle.
1663 // Note that write barrier has no effect if we are already in the middle of
1664 // compacting mark-sweep cycle and we have to record slots manually.
1666 gc_state() == MARK_COMPACT &&
1667 mark_compact_collector()->is_compacting();
1668 ProcessArrayBuffers(retainer, record_slots);
1669 ProcessNativeContexts(retainer, record_slots);
1670 ProcessAllocationSites(retainer, record_slots);
1673 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1674 bool record_slots) {
1676 VisitWeakList<Context>(
1677 this, native_contexts_list(), retainer, record_slots);
1678 // Update the head of the list of contexts.
1679 native_contexts_list_ = head;
1684 struct WeakListVisitor<JSArrayBufferView> {
1685 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1686 obj->set_weak_next(next);
1689 static Object* WeakNext(JSArrayBufferView* obj) {
1690 return obj->weak_next();
1693 static void VisitLiveObject(Heap*,
1694 JSArrayBufferView* obj,
1695 WeakObjectRetainer* retainer,
1696 bool record_slots) {}
1698 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1700 static int WeakNextOffset() {
1701 return JSArrayBufferView::kWeakNextOffset;
1707 struct WeakListVisitor<JSArrayBuffer> {
1708 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1709 obj->set_weak_next(next);
1712 static Object* WeakNext(JSArrayBuffer* obj) {
1713 return obj->weak_next();
1716 static void VisitLiveObject(Heap* heap,
1717 JSArrayBuffer* array_buffer,
1718 WeakObjectRetainer* retainer,
1719 bool record_slots) {
1720 Object* typed_array_obj =
1721 VisitWeakList<JSArrayBufferView>(
1723 array_buffer->weak_first_view(),
1724 retainer, record_slots);
1725 array_buffer->set_weak_first_view(typed_array_obj);
1726 if (typed_array_obj != heap->undefined_value() && record_slots) {
1727 Object** slot = HeapObject::RawField(
1728 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1729 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1733 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1734 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1737 static int WeakNextOffset() {
1738 return JSArrayBuffer::kWeakNextOffset;
1743 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1744 bool record_slots) {
1745 Object* array_buffer_obj =
1746 VisitWeakList<JSArrayBuffer>(this,
1747 array_buffers_list(),
1748 retainer, record_slots);
1749 set_array_buffers_list(array_buffer_obj);
1753 void Heap::TearDownArrayBuffers() {
1754 Object* undefined = undefined_value();
1755 for (Object* o = array_buffers_list(); o != undefined;) {
1756 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1757 Runtime::FreeArrayBuffer(isolate(), buffer);
1758 o = buffer->weak_next();
1760 array_buffers_list_ = undefined;
1765 struct WeakListVisitor<AllocationSite> {
1766 static void SetWeakNext(AllocationSite* obj, Object* next) {
1767 obj->set_weak_next(next);
1770 static Object* WeakNext(AllocationSite* obj) {
1771 return obj->weak_next();
1774 static void VisitLiveObject(Heap* heap,
1775 AllocationSite* array_buffer,
1776 WeakObjectRetainer* retainer,
1777 bool record_slots) {}
1779 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1781 static int WeakNextOffset() {
1782 return AllocationSite::kWeakNextOffset;
1787 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1788 bool record_slots) {
1789 Object* allocation_site_obj =
1790 VisitWeakList<AllocationSite>(this,
1791 allocation_sites_list(),
1792 retainer, record_slots);
1793 set_allocation_sites_list(allocation_site_obj);
1797 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1798 DisallowHeapAllocation no_allocation;
1800 // Both the external string table and the string table may contain
1801 // external strings, but neither lists them exhaustively, nor is the
1802 // intersection set empty. Therefore we iterate over the external string
1803 // table first, ignoring internalized strings, and then over the
1804 // internalized string table.
1806 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1808 explicit ExternalStringTableVisitorAdapter(
1809 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1810 virtual void VisitPointers(Object** start, Object** end) {
1811 for (Object** p = start; p < end; p++) {
1812 // Visit non-internalized external strings,
1813 // since internalized strings are listed in the string table.
1814 if (!(*p)->IsInternalizedString()) {
1815 ASSERT((*p)->IsExternalString());
1816 visitor_->VisitExternalString(Utils::ToLocal(
1817 Handle<String>(String::cast(*p))));
1822 v8::ExternalResourceVisitor* visitor_;
1823 } external_string_table_visitor(visitor);
1825 external_string_table_.Iterate(&external_string_table_visitor);
1827 class StringTableVisitorAdapter : public ObjectVisitor {
1829 explicit StringTableVisitorAdapter(
1830 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1831 virtual void VisitPointers(Object** start, Object** end) {
1832 for (Object** p = start; p < end; p++) {
1833 if ((*p)->IsExternalString()) {
1834 ASSERT((*p)->IsInternalizedString());
1835 visitor_->VisitExternalString(Utils::ToLocal(
1836 Handle<String>(String::cast(*p))));
1841 v8::ExternalResourceVisitor* visitor_;
1842 } string_table_visitor(visitor);
1844 string_table()->IterateElements(&string_table_visitor);
1848 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1850 static inline void VisitPointer(Heap* heap, Object** p) {
1851 Object* object = *p;
1852 if (!heap->InNewSpace(object)) return;
1853 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1854 reinterpret_cast<HeapObject*>(object));
1859 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1860 Address new_space_front) {
1862 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1863 // The addresses new_space_front and new_space_.top() define a
1864 // queue of unprocessed copied objects. Process them until the
1866 while (new_space_front != new_space_.top()) {
1867 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1868 HeapObject* object = HeapObject::FromAddress(new_space_front);
1870 NewSpaceScavenger::IterateBody(object->map(), object);
1873 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1877 // Promote and process all the to-be-promoted objects.
1879 StoreBufferRebuildScope scope(this,
1881 &ScavengeStoreBufferCallback);
1882 while (!promotion_queue()->is_empty()) {
1885 promotion_queue()->remove(&target, &size);
1887 // Promoted object might be already partially visited
1888 // during old space pointer iteration. Thus we search specificly
1889 // for pointers to from semispace instead of looking for pointers
1891 ASSERT(!target->IsMap());
1892 IterateAndMarkPointersToFromSpace(target->address(),
1893 target->address() + size,
1898 // Take another spin if there are now unswept objects in new space
1899 // (there are currently no more unswept promoted objects).
1900 } while (new_space_front != new_space_.top());
1902 return new_space_front;
1906 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1909 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1913 static HeapObject* EnsureDoubleAligned(Heap* heap,
1916 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1917 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1918 return HeapObject::FromAddress(object->address() + kPointerSize);
1920 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1927 enum LoggingAndProfiling {
1928 LOGGING_AND_PROFILING_ENABLED,
1929 LOGGING_AND_PROFILING_DISABLED
1933 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1936 template<MarksHandling marks_handling,
1937 LoggingAndProfiling logging_and_profiling_mode>
1938 class ScavengingVisitor : public StaticVisitorBase {
1940 static void Initialize() {
1941 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1942 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1943 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1944 table_.Register(kVisitByteArray, &EvacuateByteArray);
1945 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1946 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1948 table_.Register(kVisitNativeContext,
1949 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950 template VisitSpecialized<Context::kSize>);
1952 table_.Register(kVisitConsString,
1953 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954 template VisitSpecialized<ConsString::kSize>);
1956 table_.Register(kVisitSlicedString,
1957 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958 template VisitSpecialized<SlicedString::kSize>);
1960 table_.Register(kVisitSymbol,
1961 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1962 template VisitSpecialized<Symbol::kSize>);
1964 table_.Register(kVisitSharedFunctionInfo,
1965 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1966 template VisitSpecialized<SharedFunctionInfo::kSize>);
1968 table_.Register(kVisitJSWeakMap,
1969 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1972 table_.Register(kVisitJSWeakSet,
1973 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1976 table_.Register(kVisitJSArrayBuffer,
1977 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1980 table_.Register(kVisitJSTypedArray,
1981 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1984 table_.Register(kVisitJSDataView,
1985 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1988 table_.Register(kVisitJSRegExp,
1989 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1992 if (marks_handling == IGNORE_MARKS) {
1993 table_.Register(kVisitJSFunction,
1994 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1995 template VisitSpecialized<JSFunction::kSize>);
1997 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2000 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2002 kVisitDataObjectGeneric>();
2004 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2006 kVisitJSObjectGeneric>();
2008 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2010 kVisitStructGeneric>();
2013 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2018 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2019 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
2021 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2022 bool should_record = false;
2024 should_record = FLAG_heap_stats;
2026 should_record = should_record || FLAG_log_gc;
2027 if (should_record) {
2028 if (heap->new_space()->Contains(obj)) {
2029 heap->new_space()->RecordAllocation(obj);
2031 heap->new_space()->RecordPromotion(obj);
2036 // Helper function used by CopyObject to copy a source object to an
2037 // allocated target object and update the forwarding pointer in the source
2038 // object. Returns the target object.
2039 INLINE(static void MigrateObject(Heap* heap,
2043 // Copy the content of source to target.
2044 heap->CopyBlock(target->address(), source->address(), size);
2046 // Set the forwarding address.
2047 source->set_map_word(MapWord::FromForwardingAddress(target));
2049 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2050 // Update NewSpace stats if necessary.
2051 RecordCopiedObject(heap, target);
2052 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2053 Isolate* isolate = heap->isolate();
2054 if (isolate->logger()->is_logging_code_events() ||
2055 isolate->cpu_profiler()->is_profiling()) {
2056 if (target->IsSharedFunctionInfo()) {
2057 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2058 source->address(), target->address()));
2063 if (marks_handling == TRANSFER_MARKS) {
2064 if (Marking::TransferColor(source, target)) {
2065 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2071 template<ObjectContents object_contents,
2072 SizeRestriction size_restriction,
2074 static inline void EvacuateObject(Map* map,
2078 SLOW_ASSERT((size_restriction != SMALL) ||
2079 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2080 SLOW_ASSERT(object->Size() == object_size);
2082 int allocation_size = object_size;
2083 if (alignment != kObjectAlignment) {
2084 ASSERT(alignment == kDoubleAlignment);
2085 allocation_size += kPointerSize;
2088 Heap* heap = map->GetHeap();
2089 if (heap->ShouldBePromoted(object->address(), object_size)) {
2090 MaybeObject* maybe_result;
2092 if ((size_restriction != SMALL) &&
2093 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2094 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2097 if (object_contents == DATA_OBJECT) {
2098 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2101 heap->old_pointer_space()->AllocateRaw(allocation_size);
2105 Object* result = NULL; // Initialization to please compiler.
2106 if (maybe_result->ToObject(&result)) {
2107 HeapObject* target = HeapObject::cast(result);
2109 if (alignment != kObjectAlignment) {
2110 target = EnsureDoubleAligned(heap, target, allocation_size);
2113 // Order is important: slot might be inside of the target if target
2114 // was allocated over a dead object and slot comes from the store
2117 MigrateObject(heap, object, target, object_size);
2119 if (object_contents == POINTER_OBJECT) {
2120 if (map->instance_type() == JS_FUNCTION_TYPE) {
2121 heap->promotion_queue()->insert(
2122 target, JSFunction::kNonWeakFieldsEndOffset);
2124 heap->promotion_queue()->insert(target, object_size);
2128 heap->tracer()->increment_promoted_objects_size(object_size);
2132 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2133 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2134 Object* result = allocation->ToObjectUnchecked();
2135 HeapObject* target = HeapObject::cast(result);
2137 if (alignment != kObjectAlignment) {
2138 target = EnsureDoubleAligned(heap, target, allocation_size);
2141 // Order is important: slot might be inside of the target if target
2142 // was allocated over a dead object and slot comes from the store
2145 MigrateObject(heap, object, target, object_size);
2150 static inline void EvacuateJSFunction(Map* map,
2152 HeapObject* object) {
2153 ObjectEvacuationStrategy<POINTER_OBJECT>::
2154 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2156 HeapObject* target = *slot;
2157 MarkBit mark_bit = Marking::MarkBitFrom(target);
2158 if (Marking::IsBlack(mark_bit)) {
2159 // This object is black and it might not be rescanned by marker.
2160 // We should explicitly record code entry slot for compaction because
2161 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2162 // miss it as it is not HeapObject-tagged.
2163 Address code_entry_slot =
2164 target->address() + JSFunction::kCodeEntryOffset;
2165 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2166 map->GetHeap()->mark_compact_collector()->
2167 RecordCodeEntrySlot(code_entry_slot, code);
2172 static inline void EvacuateFixedArray(Map* map,
2174 HeapObject* object) {
2175 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2176 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2183 static inline void EvacuateFixedDoubleArray(Map* map,
2185 HeapObject* object) {
2186 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2187 int object_size = FixedDoubleArray::SizeFor(length);
2188 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2196 static inline void EvacuateByteArray(Map* map,
2198 HeapObject* object) {
2199 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2200 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2201 map, slot, object, object_size);
2205 static inline void EvacuateSeqOneByteString(Map* map,
2207 HeapObject* object) {
2208 int object_size = SeqOneByteString::cast(object)->
2209 SeqOneByteStringSize(map->instance_type());
2210 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2211 map, slot, object, object_size);
2215 static inline void EvacuateSeqTwoByteString(Map* map,
2217 HeapObject* object) {
2218 int object_size = SeqTwoByteString::cast(object)->
2219 SeqTwoByteStringSize(map->instance_type());
2220 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2221 map, slot, object, object_size);
2225 static inline bool IsShortcutCandidate(int type) {
2226 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2229 static inline void EvacuateShortcutCandidate(Map* map,
2231 HeapObject* object) {
2232 ASSERT(IsShortcutCandidate(map->instance_type()));
2234 Heap* heap = map->GetHeap();
2236 if (marks_handling == IGNORE_MARKS &&
2237 ConsString::cast(object)->unchecked_second() ==
2238 heap->empty_string()) {
2240 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2244 if (!heap->InNewSpace(first)) {
2245 object->set_map_word(MapWord::FromForwardingAddress(first));
2249 MapWord first_word = first->map_word();
2250 if (first_word.IsForwardingAddress()) {
2251 HeapObject* target = first_word.ToForwardingAddress();
2254 object->set_map_word(MapWord::FromForwardingAddress(target));
2258 heap->DoScavengeObject(first->map(), slot, first);
2259 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2263 int object_size = ConsString::kSize;
2264 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2265 map, slot, object, object_size);
2268 template<ObjectContents object_contents>
2269 class ObjectEvacuationStrategy {
2271 template<int object_size>
2272 static inline void VisitSpecialized(Map* map,
2274 HeapObject* object) {
2275 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2276 map, slot, object, object_size);
2279 static inline void Visit(Map* map,
2281 HeapObject* object) {
2282 int object_size = map->instance_size();
2283 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2284 map, slot, object, object_size);
2288 static VisitorDispatchTable<ScavengingCallback> table_;
2292 template<MarksHandling marks_handling,
2293 LoggingAndProfiling logging_and_profiling_mode>
2294 VisitorDispatchTable<ScavengingCallback>
2295 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2298 static void InitializeScavengingVisitorsTables() {
2299 ScavengingVisitor<TRANSFER_MARKS,
2300 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2301 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2302 ScavengingVisitor<TRANSFER_MARKS,
2303 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2304 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2308 void Heap::SelectScavengingVisitorsTable() {
2309 bool logging_and_profiling =
2310 isolate()->logger()->is_logging() ||
2311 isolate()->cpu_profiler()->is_profiling() ||
2312 (isolate()->heap_profiler() != NULL &&
2313 isolate()->heap_profiler()->is_profiling());
2315 if (!incremental_marking()->IsMarking()) {
2316 if (!logging_and_profiling) {
2317 scavenging_visitors_table_.CopyFrom(
2318 ScavengingVisitor<IGNORE_MARKS,
2319 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2321 scavenging_visitors_table_.CopyFrom(
2322 ScavengingVisitor<IGNORE_MARKS,
2323 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2326 if (!logging_and_profiling) {
2327 scavenging_visitors_table_.CopyFrom(
2328 ScavengingVisitor<TRANSFER_MARKS,
2329 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2331 scavenging_visitors_table_.CopyFrom(
2332 ScavengingVisitor<TRANSFER_MARKS,
2333 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2336 if (incremental_marking()->IsCompacting()) {
2337 // When compacting forbid short-circuiting of cons-strings.
2338 // Scavenging code relies on the fact that new space object
2339 // can't be evacuated into evacuation candidate but
2340 // short-circuiting violates this assumption.
2341 scavenging_visitors_table_.Register(
2342 StaticVisitorBase::kVisitShortcutCandidate,
2343 scavenging_visitors_table_.GetVisitorById(
2344 StaticVisitorBase::kVisitConsString));
2350 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2351 SLOW_ASSERT(HEAP->InFromSpace(object));
2352 MapWord first_word = object->map_word();
2353 SLOW_ASSERT(!first_word.IsForwardingAddress());
2354 Map* map = first_word.ToMap();
2355 map->GetHeap()->DoScavengeObject(map, p, object);
2359 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2360 int instance_size) {
2362 MaybeObject* maybe_result = AllocateRawMap();
2363 if (!maybe_result->ToObject(&result)) return maybe_result;
2365 // Map::cast cannot be used due to uninitialized map field.
2366 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2367 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2368 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2369 reinterpret_cast<Map*>(result)->set_visitor_id(
2370 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2371 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2372 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2373 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2374 reinterpret_cast<Map*>(result)->set_bit_field(0);
2375 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2376 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2377 Map::OwnsDescriptors::encode(true);
2378 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2383 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2385 ElementsKind elements_kind) {
2387 MaybeObject* maybe_result = AllocateRawMap();
2388 if (!maybe_result->To(&result)) return maybe_result;
2390 Map* map = reinterpret_cast<Map*>(result);
2391 map->set_map_no_write_barrier(meta_map());
2392 map->set_instance_type(instance_type);
2393 map->set_visitor_id(
2394 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2395 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2396 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2397 map->set_instance_size(instance_size);
2398 map->set_inobject_properties(0);
2399 map->set_pre_allocated_property_fields(0);
2400 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2401 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2402 SKIP_WRITE_BARRIER);
2403 map->init_back_pointer(undefined_value());
2404 map->set_unused_property_fields(0);
2405 map->set_instance_descriptors(empty_descriptor_array());
2406 map->set_bit_field(0);
2407 map->set_bit_field2(1 << Map::kIsExtensible);
2408 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2409 Map::OwnsDescriptors::encode(true);
2410 map->set_bit_field3(bit_field3);
2411 map->set_elements_kind(elements_kind);
2417 MaybeObject* Heap::AllocateCodeCache() {
2418 CodeCache* code_cache;
2419 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2420 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2422 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2423 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2428 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2429 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2433 MaybeObject* Heap::AllocateAccessorPair() {
2434 AccessorPair* accessors;
2435 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2436 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2438 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2439 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2444 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2445 TypeFeedbackInfo* info;
2446 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2447 if (!maybe_info->To(&info)) return maybe_info;
2449 info->initialize_storage();
2450 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2451 SKIP_WRITE_BARRIER);
2456 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2457 AliasedArgumentsEntry* entry;
2458 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2459 if (!maybe_entry->To(&entry)) return maybe_entry;
2461 entry->set_aliased_context_slot(aliased_context_slot);
2466 const Heap::StringTypeTable Heap::string_type_table[] = {
2467 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2468 {type, size, k##camel_name##MapRootIndex},
2469 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2470 #undef STRING_TYPE_ELEMENT
2474 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2475 #define CONSTANT_STRING_ELEMENT(name, contents) \
2476 {contents, k##name##RootIndex},
2477 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2478 #undef CONSTANT_STRING_ELEMENT
2482 const Heap::StructTable Heap::struct_table[] = {
2483 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2484 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2485 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2486 #undef STRUCT_TABLE_ELEMENT
2490 bool Heap::CreateInitialMaps() {
2492 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2493 if (!maybe_obj->ToObject(&obj)) return false;
2495 // Map::cast cannot be used due to uninitialized map field.
2496 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2497 set_meta_map(new_meta_map);
2498 new_meta_map->set_map(new_meta_map);
2500 { MaybeObject* maybe_obj =
2501 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2502 if (!maybe_obj->ToObject(&obj)) return false;
2504 set_fixed_array_map(Map::cast(obj));
2506 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2507 if (!maybe_obj->ToObject(&obj)) return false;
2509 set_oddball_map(Map::cast(obj));
2511 // Allocate the empty array.
2512 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2513 if (!maybe_obj->ToObject(&obj)) return false;
2515 set_empty_fixed_array(FixedArray::cast(obj));
2517 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2518 if (!maybe_obj->ToObject(&obj)) return false;
2520 set_null_value(Oddball::cast(obj));
2521 Oddball::cast(obj)->set_kind(Oddball::kNull);
2523 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2524 if (!maybe_obj->ToObject(&obj)) return false;
2526 set_undefined_value(Oddball::cast(obj));
2527 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2528 ASSERT(!InNewSpace(undefined_value()));
2530 // Allocate the empty descriptor array.
2531 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_empty_descriptor_array(DescriptorArray::cast(obj));
2536 // Fix the instance_descriptors for the existing maps.
2537 meta_map()->set_code_cache(empty_fixed_array());
2538 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2539 meta_map()->init_back_pointer(undefined_value());
2540 meta_map()->set_instance_descriptors(empty_descriptor_array());
2542 fixed_array_map()->set_code_cache(empty_fixed_array());
2543 fixed_array_map()->set_dependent_code(
2544 DependentCode::cast(empty_fixed_array()));
2545 fixed_array_map()->init_back_pointer(undefined_value());
2546 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2548 oddball_map()->set_code_cache(empty_fixed_array());
2549 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2550 oddball_map()->init_back_pointer(undefined_value());
2551 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2553 // Fix prototype object for existing maps.
2554 meta_map()->set_prototype(null_value());
2555 meta_map()->set_constructor(null_value());
2557 fixed_array_map()->set_prototype(null_value());
2558 fixed_array_map()->set_constructor(null_value());
2560 oddball_map()->set_prototype(null_value());
2561 oddball_map()->set_constructor(null_value());
2563 { MaybeObject* maybe_obj =
2564 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2565 if (!maybe_obj->ToObject(&obj)) return false;
2567 set_fixed_cow_array_map(Map::cast(obj));
2568 ASSERT(fixed_array_map() != fixed_cow_array_map());
2570 { MaybeObject* maybe_obj =
2571 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2572 if (!maybe_obj->ToObject(&obj)) return false;
2574 set_scope_info_map(Map::cast(obj));
2576 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2577 if (!maybe_obj->ToObject(&obj)) return false;
2579 set_heap_number_map(Map::cast(obj));
2581 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2582 if (!maybe_obj->ToObject(&obj)) return false;
2584 set_symbol_map(Map::cast(obj));
2586 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2587 if (!maybe_obj->ToObject(&obj)) return false;
2589 set_foreign_map(Map::cast(obj));
2591 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2592 const StringTypeTable& entry = string_type_table[i];
2593 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2594 if (!maybe_obj->ToObject(&obj)) return false;
2596 roots_[entry.index] = Map::cast(obj);
2599 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2600 if (!maybe_obj->ToObject(&obj)) return false;
2602 set_undetectable_string_map(Map::cast(obj));
2603 Map::cast(obj)->set_is_undetectable();
2605 { MaybeObject* maybe_obj =
2606 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2607 if (!maybe_obj->ToObject(&obj)) return false;
2609 set_undetectable_ascii_string_map(Map::cast(obj));
2610 Map::cast(obj)->set_is_undetectable();
2612 { MaybeObject* maybe_obj =
2613 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2614 if (!maybe_obj->ToObject(&obj)) return false;
2616 set_fixed_double_array_map(Map::cast(obj));
2618 { MaybeObject* maybe_obj =
2619 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2620 if (!maybe_obj->ToObject(&obj)) return false;
2622 set_byte_array_map(Map::cast(obj));
2624 { MaybeObject* maybe_obj =
2625 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2626 if (!maybe_obj->ToObject(&obj)) return false;
2628 set_free_space_map(Map::cast(obj));
2630 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2631 if (!maybe_obj->ToObject(&obj)) return false;
2633 set_empty_byte_array(ByteArray::cast(obj));
2635 { MaybeObject* maybe_obj =
2636 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2637 if (!maybe_obj->ToObject(&obj)) return false;
2639 set_external_pixel_array_map(Map::cast(obj));
2641 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2642 ExternalArray::kAlignedSize);
2643 if (!maybe_obj->ToObject(&obj)) return false;
2645 set_external_byte_array_map(Map::cast(obj));
2647 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2648 ExternalArray::kAlignedSize);
2649 if (!maybe_obj->ToObject(&obj)) return false;
2651 set_external_unsigned_byte_array_map(Map::cast(obj));
2653 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2654 ExternalArray::kAlignedSize);
2655 if (!maybe_obj->ToObject(&obj)) return false;
2657 set_external_short_array_map(Map::cast(obj));
2659 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2660 ExternalArray::kAlignedSize);
2661 if (!maybe_obj->ToObject(&obj)) return false;
2663 set_external_unsigned_short_array_map(Map::cast(obj));
2665 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2666 ExternalArray::kAlignedSize);
2667 if (!maybe_obj->ToObject(&obj)) return false;
2669 set_external_int_array_map(Map::cast(obj));
2671 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2672 ExternalArray::kAlignedSize);
2673 if (!maybe_obj->ToObject(&obj)) return false;
2675 set_external_unsigned_int_array_map(Map::cast(obj));
2677 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2678 ExternalArray::kAlignedSize);
2679 if (!maybe_obj->ToObject(&obj)) return false;
2681 set_external_float_array_map(Map::cast(obj));
2683 { MaybeObject* maybe_obj =
2684 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2685 if (!maybe_obj->ToObject(&obj)) return false;
2687 set_non_strict_arguments_elements_map(Map::cast(obj));
2689 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2690 ExternalArray::kAlignedSize);
2691 if (!maybe_obj->ToObject(&obj)) return false;
2693 set_external_double_array_map(Map::cast(obj));
2695 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2696 if (!maybe_obj->ToObject(&obj)) return false;
2698 set_empty_external_byte_array(ExternalArray::cast(obj));
2700 { MaybeObject* maybe_obj =
2701 AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2702 if (!maybe_obj->ToObject(&obj)) return false;
2704 set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2706 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2707 if (!maybe_obj->ToObject(&obj)) return false;
2709 set_empty_external_short_array(ExternalArray::cast(obj));
2711 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2712 kExternalUnsignedShortArray);
2713 if (!maybe_obj->ToObject(&obj)) return false;
2715 set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2717 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2718 if (!maybe_obj->ToObject(&obj)) return false;
2720 set_empty_external_int_array(ExternalArray::cast(obj));
2722 { MaybeObject* maybe_obj =
2723 AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2724 if (!maybe_obj->ToObject(&obj)) return false;
2726 set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2728 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2729 if (!maybe_obj->ToObject(&obj)) return false;
2731 set_empty_external_float_array(ExternalArray::cast(obj));
2733 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2734 if (!maybe_obj->ToObject(&obj)) return false;
2736 set_empty_external_double_array(ExternalArray::cast(obj));
2738 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2739 if (!maybe_obj->ToObject(&obj)) return false;
2741 set_empty_external_pixel_array(ExternalArray::cast(obj));
2743 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2744 if (!maybe_obj->ToObject(&obj)) return false;
2746 set_code_map(Map::cast(obj));
2748 { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2749 if (!maybe_obj->ToObject(&obj)) return false;
2751 set_cell_map(Map::cast(obj));
2753 { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2754 PropertyCell::kSize);
2755 if (!maybe_obj->ToObject(&obj)) return false;
2757 set_global_property_cell_map(Map::cast(obj));
2759 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2760 if (!maybe_obj->ToObject(&obj)) return false;
2762 set_one_pointer_filler_map(Map::cast(obj));
2764 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2765 if (!maybe_obj->ToObject(&obj)) return false;
2767 set_two_pointer_filler_map(Map::cast(obj));
2769 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2770 const StructTable& entry = struct_table[i];
2771 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2772 if (!maybe_obj->ToObject(&obj)) return false;
2774 roots_[entry.index] = Map::cast(obj);
2777 { MaybeObject* maybe_obj =
2778 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2779 if (!maybe_obj->ToObject(&obj)) return false;
2781 set_hash_table_map(Map::cast(obj));
2783 { MaybeObject* maybe_obj =
2784 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2785 if (!maybe_obj->ToObject(&obj)) return false;
2787 set_function_context_map(Map::cast(obj));
2789 { MaybeObject* maybe_obj =
2790 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2791 if (!maybe_obj->ToObject(&obj)) return false;
2793 set_catch_context_map(Map::cast(obj));
2795 { MaybeObject* maybe_obj =
2796 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2797 if (!maybe_obj->ToObject(&obj)) return false;
2799 set_with_context_map(Map::cast(obj));
2801 { MaybeObject* maybe_obj =
2802 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2803 if (!maybe_obj->ToObject(&obj)) return false;
2805 set_block_context_map(Map::cast(obj));
2807 { MaybeObject* maybe_obj =
2808 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2809 if (!maybe_obj->ToObject(&obj)) return false;
2811 set_module_context_map(Map::cast(obj));
2813 { MaybeObject* maybe_obj =
2814 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2815 if (!maybe_obj->ToObject(&obj)) return false;
2817 set_global_context_map(Map::cast(obj));
2819 { MaybeObject* maybe_obj =
2820 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2821 if (!maybe_obj->ToObject(&obj)) return false;
2823 Map* native_context_map = Map::cast(obj);
2824 native_context_map->set_dictionary_map(true);
2825 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2826 set_native_context_map(native_context_map);
2828 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2829 SharedFunctionInfo::kAlignedSize);
2830 if (!maybe_obj->ToObject(&obj)) return false;
2832 set_shared_function_info_map(Map::cast(obj));
2834 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2835 JSMessageObject::kSize);
2836 if (!maybe_obj->ToObject(&obj)) return false;
2838 set_message_object_map(Map::cast(obj));
2841 { MaybeObject* maybe_obj =
2842 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2843 if (!maybe_obj->To(&external_map)) return false;
2845 external_map->set_is_extensible(false);
2846 set_external_map(external_map);
2848 ASSERT(!InNewSpace(empty_fixed_array()));
2853 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2854 // Statically ensure that it is safe to allocate heap numbers in paged
2856 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2857 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2860 { MaybeObject* maybe_result =
2861 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2862 if (!maybe_result->ToObject(&result)) return maybe_result;
2865 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2866 HeapNumber::cast(result)->set_value(value);
2871 MaybeObject* Heap::AllocateHeapNumber(double value) {
2872 // Use general version, if we're forced to always allocate.
2873 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2875 // This version of AllocateHeapNumber is optimized for
2876 // allocation in new space.
2877 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2879 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2880 if (!maybe_result->ToObject(&result)) return maybe_result;
2882 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2883 HeapNumber::cast(result)->set_value(value);
2888 MaybeObject* Heap::AllocateCell(Object* value) {
2890 { MaybeObject* maybe_result = AllocateRawCell();
2891 if (!maybe_result->ToObject(&result)) return maybe_result;
2893 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2894 Cell::cast(result)->set_value(value);
2899 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2901 MaybeObject* maybe_result = AllocateRawPropertyCell();
2902 if (!maybe_result->ToObject(&result)) return maybe_result;
2904 HeapObject::cast(result)->set_map_no_write_barrier(
2905 global_property_cell_map());
2906 PropertyCell* cell = PropertyCell::cast(result);
2907 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2908 SKIP_WRITE_BARRIER);
2909 cell->set_value(value);
2910 cell->set_type(Type::None());
2911 maybe_result = cell->SetValueInferType(value);
2912 if (maybe_result->IsFailure()) return maybe_result;
2917 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2919 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2920 if (!maybe_result->To(&result)) return maybe_result;
2921 result->set_value(value);
2926 MaybeObject* Heap::AllocateAllocationSite() {
2928 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2930 if (!maybe_result->ToObject(&result)) return maybe_result;
2931 AllocationSite* site = AllocationSite::cast(result);
2935 site->set_weak_next(allocation_sites_list());
2936 set_allocation_sites_list(site);
2941 MaybeObject* Heap::CreateOddball(const char* to_string,
2945 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2946 if (!maybe_result->ToObject(&result)) return maybe_result;
2948 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2952 bool Heap::CreateApiObjects() {
2955 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2956 if (!maybe_obj->ToObject(&obj)) return false;
2958 // Don't use Smi-only elements optimizations for objects with the neander
2959 // map. There are too many cases where element values are set directly with a
2960 // bottleneck to trap the Smi-only -> fast elements transition, and there
2961 // appears to be no benefit for optimize this case.
2962 Map* new_neander_map = Map::cast(obj);
2963 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2964 set_neander_map(new_neander_map);
2966 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2967 if (!maybe_obj->ToObject(&obj)) return false;
2970 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2971 if (!maybe_elements->ToObject(&elements)) return false;
2973 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2974 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2975 set_message_listeners(JSObject::cast(obj));
2981 void Heap::CreateJSEntryStub() {
2983 set_js_entry_code(*stub.GetCode(isolate()));
2987 void Heap::CreateJSConstructEntryStub() {
2988 JSConstructEntryStub stub;
2989 set_js_construct_entry_code(*stub.GetCode(isolate()));
2993 void Heap::CreateFixedStubs() {
2994 // Here we create roots for fixed stubs. They are needed at GC
2995 // for cooking and uncooking (check out frames.cc).
2996 // The eliminates the need for doing dictionary lookup in the
2997 // stub cache for these stubs.
2998 HandleScope scope(isolate());
2999 // gcc-4.4 has problem generating correct code of following snippet:
3000 // { JSEntryStub stub;
3001 // js_entry_code_ = *stub.GetCode();
3003 // { JSConstructEntryStub stub;
3004 // js_construct_entry_code_ = *stub.GetCode();
3006 // To workaround the problem, make separate functions without inlining.
3007 Heap::CreateJSEntryStub();
3008 Heap::CreateJSConstructEntryStub();
3010 // Create stubs that should be there, so we don't unexpectedly have to
3011 // create them if we need them during the creation of another stub.
3012 // Stub creation mixes raw pointers and handles in an unsafe manner so
3013 // we cannot create stubs while we are creating stubs.
3014 CodeStub::GenerateStubsAheadOfTime(isolate());
3018 bool Heap::CreateInitialObjects() {
3021 // The -0 value must be set before NumberFromDouble works.
3022 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3023 if (!maybe_obj->ToObject(&obj)) return false;
3025 set_minus_zero_value(HeapNumber::cast(obj));
3026 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3028 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3029 if (!maybe_obj->ToObject(&obj)) return false;
3031 set_nan_value(HeapNumber::cast(obj));
3033 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3034 if (!maybe_obj->ToObject(&obj)) return false;
3036 set_infinity_value(HeapNumber::cast(obj));
3038 // The hole has not been created yet, but we want to put something
3039 // predictable in the gaps in the string table, so lets make that Smi zero.
3040 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3042 // Allocate initial string table.
3043 { MaybeObject* maybe_obj =
3044 StringTable::Allocate(this, kInitialStringTableSize);
3045 if (!maybe_obj->ToObject(&obj)) return false;
3047 // Don't use set_string_table() due to asserts.
3048 roots_[kStringTableRootIndex] = obj;
3050 // Finish initializing oddballs after creating the string table.
3051 { MaybeObject* maybe_obj =
3052 undefined_value()->Initialize("undefined",
3054 Oddball::kUndefined);
3055 if (!maybe_obj->ToObject(&obj)) return false;
3058 // Initialize the null_value.
3059 { MaybeObject* maybe_obj =
3060 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3061 if (!maybe_obj->ToObject(&obj)) return false;
3064 { MaybeObject* maybe_obj = CreateOddball("true",
3067 if (!maybe_obj->ToObject(&obj)) return false;
3069 set_true_value(Oddball::cast(obj));
3071 { MaybeObject* maybe_obj = CreateOddball("false",
3074 if (!maybe_obj->ToObject(&obj)) return false;
3076 set_false_value(Oddball::cast(obj));
3078 { MaybeObject* maybe_obj = CreateOddball("hole",
3081 if (!maybe_obj->ToObject(&obj)) return false;
3083 set_the_hole_value(Oddball::cast(obj));
3085 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3087 Oddball::kUninitialized);
3088 if (!maybe_obj->ToObject(&obj)) return false;
3090 set_uninitialized_value(Oddball::cast(obj));
3092 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3094 Oddball::kArgumentMarker);
3095 if (!maybe_obj->ToObject(&obj)) return false;
3097 set_arguments_marker(Oddball::cast(obj));
3099 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3102 if (!maybe_obj->ToObject(&obj)) return false;
3104 set_no_interceptor_result_sentinel(obj);
3106 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3109 if (!maybe_obj->ToObject(&obj)) return false;
3111 set_termination_exception(obj);
3113 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3114 { MaybeObject* maybe_obj =
3115 InternalizeUtf8String(constant_string_table[i].contents);
3116 if (!maybe_obj->ToObject(&obj)) return false;
3118 roots_[constant_string_table[i].index] = String::cast(obj);
3121 // Allocate the hidden string which is used to identify the hidden properties
3122 // in JSObjects. The hash code has a special value so that it will not match
3123 // the empty string when searching for the property. It cannot be part of the
3124 // loop above because it needs to be allocated manually with the special
3125 // hash code in place. The hash code for the hidden_string is zero to ensure
3126 // that it will always be at the first entry in property descriptors.
3127 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3128 OneByteVector("", 0), String::kEmptyStringHash);
3129 if (!maybe_obj->ToObject(&obj)) return false;
3131 hidden_string_ = String::cast(obj);
3133 // Allocate the code_stubs dictionary. The initial size is set to avoid
3134 // expanding the dictionary during bootstrapping.
3135 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3136 if (!maybe_obj->ToObject(&obj)) return false;
3138 set_code_stubs(UnseededNumberDictionary::cast(obj));
3141 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3142 // is set to avoid expanding the dictionary during bootstrapping.
3143 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3144 if (!maybe_obj->ToObject(&obj)) return false;
3146 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3148 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3149 if (!maybe_obj->ToObject(&obj)) return false;
3151 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3153 set_instanceof_cache_function(Smi::FromInt(0));
3154 set_instanceof_cache_map(Smi::FromInt(0));
3155 set_instanceof_cache_answer(Smi::FromInt(0));
3159 // Allocate the dictionary of intrinsic function names.
3160 { MaybeObject* maybe_obj =
3161 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3162 if (!maybe_obj->ToObject(&obj)) return false;
3164 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3166 if (!maybe_obj->ToObject(&obj)) return false;
3168 set_intrinsic_function_names(NameDictionary::cast(obj));
3170 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3171 if (!maybe_obj->ToObject(&obj)) return false;
3173 set_number_string_cache(FixedArray::cast(obj));
3175 // Allocate cache for single character one byte strings.
3176 { MaybeObject* maybe_obj =
3177 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3178 if (!maybe_obj->ToObject(&obj)) return false;
3180 set_single_character_string_cache(FixedArray::cast(obj));
3182 // Allocate cache for string split.
3183 { MaybeObject* maybe_obj = AllocateFixedArray(
3184 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3185 if (!maybe_obj->ToObject(&obj)) return false;
3187 set_string_split_cache(FixedArray::cast(obj));
3189 { MaybeObject* maybe_obj = AllocateFixedArray(
3190 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3191 if (!maybe_obj->ToObject(&obj)) return false;
3193 set_regexp_multiple_cache(FixedArray::cast(obj));
3195 // Allocate cache for external strings pointing to native source code.
3196 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3197 if (!maybe_obj->ToObject(&obj)) return false;
3199 set_natives_source_cache(FixedArray::cast(obj));
3201 // Allocate object to hold object observation state.
3202 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3203 if (!maybe_obj->ToObject(&obj)) return false;
3205 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3206 if (!maybe_obj->ToObject(&obj)) return false;
3208 set_observation_state(JSObject::cast(obj));
3210 { MaybeObject* maybe_obj = AllocateSymbol();
3211 if (!maybe_obj->ToObject(&obj)) return false;
3213 set_frozen_symbol(Symbol::cast(obj));
3215 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3216 if (!maybe_obj->ToObject(&obj)) return false;
3218 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3219 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3221 { MaybeObject* maybe_obj = AllocateSymbol();
3222 if (!maybe_obj->ToObject(&obj)) return false;
3224 set_observed_symbol(Symbol::cast(obj));
3226 set_i18n_template_one(the_hole_value());
3227 set_i18n_template_two(the_hole_value());
3229 // Handling of script id generation is in Factory::NewScript.
3230 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3232 // Initialize keyed lookup cache.
3233 isolate_->keyed_lookup_cache()->Clear();
3235 // Initialize context slot cache.
3236 isolate_->context_slot_cache()->Clear();
3238 // Initialize descriptor cache.
3239 isolate_->descriptor_lookup_cache()->Clear();
3241 // Initialize compilation cache.
3242 isolate_->compilation_cache()->Clear();
3248 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3249 RootListIndex writable_roots[] = {
3250 kStoreBufferTopRootIndex,
3251 kStackLimitRootIndex,
3252 kNumberStringCacheRootIndex,
3253 kInstanceofCacheFunctionRootIndex,
3254 kInstanceofCacheMapRootIndex,
3255 kInstanceofCacheAnswerRootIndex,
3256 kCodeStubsRootIndex,
3257 kNonMonomorphicCacheRootIndex,
3258 kPolymorphicCodeCacheRootIndex,
3259 kLastScriptIdRootIndex,
3260 kEmptyScriptRootIndex,
3261 kRealStackLimitRootIndex,
3262 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3263 kConstructStubDeoptPCOffsetRootIndex,
3264 kGetterStubDeoptPCOffsetRootIndex,
3265 kSetterStubDeoptPCOffsetRootIndex,
3266 kStringTableRootIndex,
3269 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3270 if (root_index == writable_roots[i])
3277 Object* RegExpResultsCache::Lookup(Heap* heap,
3279 Object* key_pattern,
3280 ResultsCacheType type) {
3282 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3283 if (type == STRING_SPLIT_SUBSTRINGS) {
3284 ASSERT(key_pattern->IsString());
3285 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3286 cache = heap->string_split_cache();
3288 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3289 ASSERT(key_pattern->IsFixedArray());
3290 cache = heap->regexp_multiple_cache();
3293 uint32_t hash = key_string->Hash();
3294 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3295 ~(kArrayEntriesPerCacheEntry - 1));
3296 if (cache->get(index + kStringOffset) == key_string &&
3297 cache->get(index + kPatternOffset) == key_pattern) {
3298 return cache->get(index + kArrayOffset);
3301 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3302 if (cache->get(index + kStringOffset) == key_string &&
3303 cache->get(index + kPatternOffset) == key_pattern) {
3304 return cache->get(index + kArrayOffset);
3306 return Smi::FromInt(0);
3310 void RegExpResultsCache::Enter(Heap* heap,
3312 Object* key_pattern,
3313 FixedArray* value_array,
3314 ResultsCacheType type) {
3316 if (!key_string->IsInternalizedString()) return;
3317 if (type == STRING_SPLIT_SUBSTRINGS) {
3318 ASSERT(key_pattern->IsString());
3319 if (!key_pattern->IsInternalizedString()) return;
3320 cache = heap->string_split_cache();
3322 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3323 ASSERT(key_pattern->IsFixedArray());
3324 cache = heap->regexp_multiple_cache();
3327 uint32_t hash = key_string->Hash();
3328 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3329 ~(kArrayEntriesPerCacheEntry - 1));
3330 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3331 cache->set(index + kStringOffset, key_string);
3332 cache->set(index + kPatternOffset, key_pattern);
3333 cache->set(index + kArrayOffset, value_array);
3336 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3337 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3338 cache->set(index2 + kStringOffset, key_string);
3339 cache->set(index2 + kPatternOffset, key_pattern);
3340 cache->set(index2 + kArrayOffset, value_array);
3342 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3343 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3344 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3345 cache->set(index + kStringOffset, key_string);
3346 cache->set(index + kPatternOffset, key_pattern);
3347 cache->set(index + kArrayOffset, value_array);
3350 // If the array is a reasonably short list of substrings, convert it into a
3351 // list of internalized strings.
3352 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3353 for (int i = 0; i < value_array->length(); i++) {
3354 String* str = String::cast(value_array->get(i));
3355 Object* internalized_str;
3356 MaybeObject* maybe_string = heap->InternalizeString(str);
3357 if (maybe_string->ToObject(&internalized_str)) {
3358 value_array->set(i, internalized_str);
3362 // Convert backing store to a copy-on-write array.
3363 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3367 void RegExpResultsCache::Clear(FixedArray* cache) {
3368 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3369 cache->set(i, Smi::FromInt(0));
3374 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3375 MaybeObject* maybe_obj =
3376 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3381 int Heap::FullSizeNumberStringCacheLength() {
3382 // Compute the size of the number string cache based on the max newspace size.
3383 // The number string cache has a minimum size based on twice the initial cache
3384 // size to ensure that it is bigger after being made 'full size'.
3385 int number_string_cache_size = max_semispace_size_ / 512;
3386 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3387 Min(0x4000, number_string_cache_size));
3388 // There is a string and a number per entry so the length is twice the number
3390 return number_string_cache_size * 2;
3394 void Heap::AllocateFullSizeNumberStringCache() {
3395 // The idea is to have a small number string cache in the snapshot to keep
3396 // boot-time memory usage down. If we expand the number string cache already
3397 // while creating the snapshot then that didn't work out.
3398 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3399 MaybeObject* maybe_obj =
3400 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3402 if (maybe_obj->ToObject(&new_cache)) {
3403 // We don't bother to repopulate the cache with entries from the old cache.
3404 // It will be repopulated soon enough with new strings.
3405 set_number_string_cache(FixedArray::cast(new_cache));
3407 // If allocation fails then we just return without doing anything. It is only
3408 // a cache, so best effort is OK here.
3412 void Heap::FlushNumberStringCache() {
3413 // Flush the number to string cache.
3414 int len = number_string_cache()->length();
3415 for (int i = 0; i < len; i++) {
3416 number_string_cache()->set_undefined(this, i);
3421 static inline int double_get_hash(double d) {
3422 DoubleRepresentation rep(d);
3423 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3427 static inline int smi_get_hash(Smi* smi) {
3428 return smi->value();
3432 Object* Heap::GetNumberStringCache(Object* number) {
3434 int mask = (number_string_cache()->length() >> 1) - 1;
3435 if (number->IsSmi()) {
3436 hash = smi_get_hash(Smi::cast(number)) & mask;
3438 hash = double_get_hash(number->Number()) & mask;
3440 Object* key = number_string_cache()->get(hash * 2);
3441 if (key == number) {
3442 return String::cast(number_string_cache()->get(hash * 2 + 1));
3443 } else if (key->IsHeapNumber() &&
3444 number->IsHeapNumber() &&
3445 key->Number() == number->Number()) {
3446 return String::cast(number_string_cache()->get(hash * 2 + 1));
3448 return undefined_value();
3452 void Heap::SetNumberStringCache(Object* number, String* string) {
3454 int mask = (number_string_cache()->length() >> 1) - 1;
3455 if (number->IsSmi()) {
3456 hash = smi_get_hash(Smi::cast(number)) & mask;
3458 hash = double_get_hash(number->Number()) & mask;
3460 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3461 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3462 // The first time we have a hash collision, we move to the full sized
3463 // number string cache.
3464 AllocateFullSizeNumberStringCache();
3467 number_string_cache()->set(hash * 2, number);
3468 number_string_cache()->set(hash * 2 + 1, string);
3472 MaybeObject* Heap::NumberToString(Object* number,
3473 bool check_number_string_cache,
3474 PretenureFlag pretenure) {
3475 isolate_->counters()->number_to_string_runtime()->Increment();
3476 if (check_number_string_cache) {
3477 Object* cached = GetNumberStringCache(number);
3478 if (cached != undefined_value()) {
3484 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3486 if (number->IsSmi()) {
3487 int num = Smi::cast(number)->value();
3488 str = IntToCString(num, buffer);
3490 double num = HeapNumber::cast(number)->value();
3491 str = DoubleToCString(num, buffer);
3495 MaybeObject* maybe_js_string =
3496 AllocateStringFromOneByte(CStrVector(str), pretenure);
3497 if (maybe_js_string->ToObject(&js_string)) {
3498 SetNumberStringCache(number, String::cast(js_string));
3500 return maybe_js_string;
3504 MaybeObject* Heap::Uint32ToString(uint32_t value,
3505 bool check_number_string_cache) {
3507 MaybeObject* maybe = NumberFromUint32(value);
3508 if (!maybe->To<Object>(&number)) return maybe;
3509 return NumberToString(number, check_number_string_cache);
3513 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3514 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3518 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3519 ExternalArrayType array_type) {
3520 switch (array_type) {
3521 case kExternalByteArray:
3522 return kExternalByteArrayMapRootIndex;
3523 case kExternalUnsignedByteArray:
3524 return kExternalUnsignedByteArrayMapRootIndex;
3525 case kExternalShortArray:
3526 return kExternalShortArrayMapRootIndex;
3527 case kExternalUnsignedShortArray:
3528 return kExternalUnsignedShortArrayMapRootIndex;
3529 case kExternalIntArray:
3530 return kExternalIntArrayMapRootIndex;
3531 case kExternalUnsignedIntArray:
3532 return kExternalUnsignedIntArrayMapRootIndex;
3533 case kExternalFloatArray:
3534 return kExternalFloatArrayMapRootIndex;
3535 case kExternalDoubleArray:
3536 return kExternalDoubleArrayMapRootIndex;
3537 case kExternalPixelArray:
3538 return kExternalPixelArrayMapRootIndex;
3541 return kUndefinedValueRootIndex;
3545 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3546 ElementsKind elementsKind) {
3547 switch (elementsKind) {
3548 case EXTERNAL_BYTE_ELEMENTS:
3549 return kEmptyExternalByteArrayRootIndex;
3550 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3551 return kEmptyExternalUnsignedByteArrayRootIndex;
3552 case EXTERNAL_SHORT_ELEMENTS:
3553 return kEmptyExternalShortArrayRootIndex;
3554 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3555 return kEmptyExternalUnsignedShortArrayRootIndex;
3556 case EXTERNAL_INT_ELEMENTS:
3557 return kEmptyExternalIntArrayRootIndex;
3558 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3559 return kEmptyExternalUnsignedIntArrayRootIndex;
3560 case EXTERNAL_FLOAT_ELEMENTS:
3561 return kEmptyExternalFloatArrayRootIndex;
3562 case EXTERNAL_DOUBLE_ELEMENTS:
3563 return kEmptyExternalDoubleArrayRootIndex;
3564 case EXTERNAL_PIXEL_ELEMENTS:
3565 return kEmptyExternalPixelArrayRootIndex;
3568 return kUndefinedValueRootIndex;
3573 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3574 return ExternalArray::cast(
3575 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3581 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3582 // We need to distinguish the minus zero value and this cannot be
3583 // done after conversion to int. Doing this by comparing bit
3584 // patterns is faster than using fpclassify() et al.
3585 static const DoubleRepresentation minus_zero(-0.0);
3587 DoubleRepresentation rep(value);
3588 if (rep.bits == minus_zero.bits) {
3589 return AllocateHeapNumber(-0.0, pretenure);
3592 int int_value = FastD2I(value);
3593 if (value == int_value && Smi::IsValid(int_value)) {
3594 return Smi::FromInt(int_value);
3597 // Materialize the value in the heap.
3598 return AllocateHeapNumber(value, pretenure);
3602 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3603 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3604 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3605 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3607 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3608 if (!maybe_result->To(&result)) return maybe_result;
3609 result->set_foreign_address(address);
3614 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3615 SharedFunctionInfo* share;
3616 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3617 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3619 // Set pointer fields.
3620 share->set_name(name);
3621 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3622 share->set_code(illegal);
3623 share->set_optimized_code_map(Smi::FromInt(0));
3624 share->set_scope_info(ScopeInfo::Empty(isolate_));
3625 Code* construct_stub =
3626 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3627 share->set_construct_stub(construct_stub);
3628 share->set_instance_class_name(Object_string());
3629 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3630 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3631 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3632 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3633 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3634 share->set_ast_node_count(0);
3635 share->set_counters(0);
3637 // Set integer fields (smi or int, depending on the architecture).
3638 share->set_length(0);
3639 share->set_formal_parameter_count(0);
3640 share->set_expected_nof_properties(0);
3641 share->set_num_literals(0);
3642 share->set_start_position_and_type(0);
3643 share->set_end_position(0);
3644 share->set_function_token_position(0);
3645 // All compiler hints default to false or 0.
3646 share->set_compiler_hints(0);
3647 share->set_opt_count(0);
3653 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3658 Object* stack_trace,
3659 Object* stack_frames) {
3661 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3662 if (!maybe_result->ToObject(&result)) return maybe_result;
3664 JSMessageObject* message = JSMessageObject::cast(result);
3665 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3666 message->initialize_elements();
3667 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3668 message->set_type(type);
3669 message->set_arguments(arguments);
3670 message->set_start_position(start_position);
3671 message->set_end_position(end_position);
3672 message->set_script(script);
3673 message->set_stack_trace(stack_trace);
3674 message->set_stack_frames(stack_frames);
3680 // Returns true for a character in a range. Both limits are inclusive.
3681 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3682 // This makes uses of the the unsigned wraparound.
3683 return character - from <= to - from;
3687 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3692 // Numeric strings have a different hash algorithm not known by
3693 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3694 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3695 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3697 // Now we know the length is 2, we might as well make use of that fact
3698 // when building the new string.
3699 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3701 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3703 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3704 if (!maybe_result->ToObject(&result)) return maybe_result;
3706 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3707 dest[0] = static_cast<uint8_t>(c1);
3708 dest[1] = static_cast<uint8_t>(c2);
3712 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3713 if (!maybe_result->ToObject(&result)) return maybe_result;
3715 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3723 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3724 int first_length = first->length();
3725 if (first_length == 0) {
3729 int second_length = second->length();
3730 if (second_length == 0) {
3734 int length = first_length + second_length;
3736 // Optimization for 2-byte strings often used as keys in a decompression
3737 // dictionary. Check whether we already have the string in the string
3738 // table to prevent creation of many unneccesary strings.
3740 uint16_t c1 = first->Get(0);
3741 uint16_t c2 = second->Get(0);
3742 return MakeOrFindTwoCharacterString(this, c1, c2);
3745 bool first_is_one_byte = first->IsOneByteRepresentation();
3746 bool second_is_one_byte = second->IsOneByteRepresentation();
3747 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3748 // Make sure that an out of memory exception is thrown if the length
3749 // of the new cons string is too large.
3750 if (length > String::kMaxLength || length < 0) {
3751 isolate()->context()->mark_out_of_memory();
3752 return Failure::OutOfMemoryException(0x4);
3755 bool is_one_byte_data_in_two_byte_string = false;
3757 // At least one of the strings uses two-byte representation so we
3758 // can't use the fast case code for short ASCII strings below, but
3759 // we can try to save memory if all chars actually fit in ASCII.
3760 is_one_byte_data_in_two_byte_string =
3761 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3762 if (is_one_byte_data_in_two_byte_string) {
3763 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3767 // If the resulting string is small make a flat string.
3768 if (length < ConsString::kMinLength) {
3769 // Note that neither of the two inputs can be a slice because:
3770 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3771 ASSERT(first->IsFlat());
3772 ASSERT(second->IsFlat());
3775 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3776 if (!maybe_result->ToObject(&result)) return maybe_result;
3778 // Copy the characters into the new object.
3779 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3782 if (first->IsExternalString()) {
3783 src = ExternalAsciiString::cast(first)->GetChars();
3785 src = SeqOneByteString::cast(first)->GetChars();
3787 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3788 // Copy second part.
3789 if (second->IsExternalString()) {
3790 src = ExternalAsciiString::cast(second)->GetChars();
3792 src = SeqOneByteString::cast(second)->GetChars();
3794 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3797 if (is_one_byte_data_in_two_byte_string) {
3799 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3800 if (!maybe_result->ToObject(&result)) return maybe_result;
3802 // Copy the characters into the new object.
3803 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3804 String::WriteToFlat(first, dest, 0, first_length);
3805 String::WriteToFlat(second, dest + first_length, 0, second_length);
3806 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3811 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3812 if (!maybe_result->ToObject(&result)) return maybe_result;
3814 // Copy the characters into the new object.
3815 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3816 String::WriteToFlat(first, dest, 0, first_length);
3817 String::WriteToFlat(second, dest + first_length, 0, second_length);
3822 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3823 cons_ascii_string_map() : cons_string_map();
3826 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3827 if (!maybe_result->ToObject(&result)) return maybe_result;
3830 DisallowHeapAllocation no_gc;
3831 ConsString* cons_string = ConsString::cast(result);
3832 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3833 cons_string->set_length(length);
3834 cons_string->set_hash_field(String::kEmptyHashField);
3835 cons_string->set_first(first, mode);
3836 cons_string->set_second(second, mode);
3841 MaybeObject* Heap::AllocateSubString(String* buffer,
3844 PretenureFlag pretenure) {
3845 int length = end - start;
3847 return empty_string();
3848 } else if (length == 1) {
3849 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3850 } else if (length == 2) {
3851 // Optimization for 2-byte strings often used as keys in a decompression
3852 // dictionary. Check whether we already have the string in the string
3853 // table to prevent creation of many unnecessary strings.
3854 uint16_t c1 = buffer->Get(start);
3855 uint16_t c2 = buffer->Get(start + 1);
3856 return MakeOrFindTwoCharacterString(this, c1, c2);
3859 // Make an attempt to flatten the buffer to reduce access time.
3860 buffer = buffer->TryFlattenGetString();
3862 if (!FLAG_string_slices ||
3863 !buffer->IsFlat() ||
3864 length < SlicedString::kMinLength ||
3865 pretenure == TENURED) {
3867 // WriteToFlat takes care of the case when an indirect string has a
3868 // different encoding from its underlying string. These encodings may
3869 // differ because of externalization.
3870 bool is_one_byte = buffer->IsOneByteRepresentation();
3871 { MaybeObject* maybe_result = is_one_byte
3872 ? AllocateRawOneByteString(length, pretenure)
3873 : AllocateRawTwoByteString(length, pretenure);
3874 if (!maybe_result->ToObject(&result)) return maybe_result;
3876 String* string_result = String::cast(result);
3877 // Copy the characters into the new object.
3879 ASSERT(string_result->IsOneByteRepresentation());
3880 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3881 String::WriteToFlat(buffer, dest, start, end);
3883 ASSERT(string_result->IsTwoByteRepresentation());
3884 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3885 String::WriteToFlat(buffer, dest, start, end);
3890 ASSERT(buffer->IsFlat());
3892 if (FLAG_verify_heap) {
3893 buffer->StringVerify();
3898 // When slicing an indirect string we use its encoding for a newly created
3899 // slice and don't check the encoding of the underlying string. This is safe
3900 // even if the encodings are different because of externalization. If an
3901 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3902 // codes of the underlying string must still fit into ASCII (because
3903 // externalization must not change char codes).
3904 { Map* map = buffer->IsOneByteRepresentation()
3905 ? sliced_ascii_string_map()
3906 : sliced_string_map();
3907 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3908 if (!maybe_result->ToObject(&result)) return maybe_result;
3911 DisallowHeapAllocation no_gc;
3912 SlicedString* sliced_string = SlicedString::cast(result);
3913 sliced_string->set_length(length);
3914 sliced_string->set_hash_field(String::kEmptyHashField);
3915 if (buffer->IsConsString()) {
3916 ConsString* cons = ConsString::cast(buffer);
3917 ASSERT(cons->second()->length() == 0);
3918 sliced_string->set_parent(cons->first());
3919 sliced_string->set_offset(start);
3920 } else if (buffer->IsSlicedString()) {
3921 // Prevent nesting sliced strings.
3922 SlicedString* parent_slice = SlicedString::cast(buffer);
3923 sliced_string->set_parent(parent_slice->parent());
3924 sliced_string->set_offset(start + parent_slice->offset());
3926 sliced_string->set_parent(buffer);
3927 sliced_string->set_offset(start);
3929 ASSERT(sliced_string->parent()->IsSeqString() ||
3930 sliced_string->parent()->IsExternalString());
3935 MaybeObject* Heap::AllocateExternalStringFromAscii(
3936 const ExternalAsciiString::Resource* resource) {
3937 size_t length = resource->length();
3938 if (length > static_cast<size_t>(String::kMaxLength)) {
3939 isolate()->context()->mark_out_of_memory();
3940 return Failure::OutOfMemoryException(0x5);
3943 Map* map = external_ascii_string_map();
3945 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3946 if (!maybe_result->ToObject(&result)) return maybe_result;
3949 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3950 external_string->set_length(static_cast<int>(length));
3951 external_string->set_hash_field(String::kEmptyHashField);
3952 external_string->set_resource(resource);
3958 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3959 const ExternalTwoByteString::Resource* resource) {
3960 size_t length = resource->length();
3961 if (length > static_cast<size_t>(String::kMaxLength)) {
3962 isolate()->context()->mark_out_of_memory();
3963 return Failure::OutOfMemoryException(0x6);
3966 // For small strings we check whether the resource contains only
3967 // one byte characters. If yes, we use a different string map.
3968 static const size_t kOneByteCheckLengthLimit = 32;
3969 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3970 String::IsOneByte(resource->data(), static_cast<int>(length));
3971 Map* map = is_one_byte ?
3972 external_string_with_one_byte_data_map() : external_string_map();
3974 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3975 if (!maybe_result->ToObject(&result)) return maybe_result;
3978 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3979 external_string->set_length(static_cast<int>(length));
3980 external_string->set_hash_field(String::kEmptyHashField);
3981 external_string->set_resource(resource);
3987 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3988 if (code <= String::kMaxOneByteCharCode) {
3989 Object* value = single_character_string_cache()->get(code);
3990 if (value != undefined_value()) return value;
3993 buffer[0] = static_cast<uint8_t>(code);
3995 MaybeObject* maybe_result =
3996 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3998 if (!maybe_result->ToObject(&result)) return maybe_result;
3999 single_character_string_cache()->set(code, result);
4004 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4005 if (!maybe_result->ToObject(&result)) return maybe_result;
4007 String* answer = String::cast(result);
4008 answer->Set(0, code);
4013 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4014 if (length < 0 || length > ByteArray::kMaxLength) {
4015 return Failure::OutOfMemoryException(0x7);
4017 if (pretenure == NOT_TENURED) {
4018 return AllocateByteArray(length);
4020 int size = ByteArray::SizeFor(length);
4022 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4023 ? old_data_space_->AllocateRaw(size)
4024 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4025 if (!maybe_result->ToObject(&result)) return maybe_result;
4028 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4030 reinterpret_cast<ByteArray*>(result)->set_length(length);
4035 MaybeObject* Heap::AllocateByteArray(int length) {
4036 if (length < 0 || length > ByteArray::kMaxLength) {
4037 return Failure::OutOfMemoryException(0x8);
4039 int size = ByteArray::SizeFor(length);
4040 AllocationSpace space =
4041 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4043 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4044 if (!maybe_result->ToObject(&result)) return maybe_result;
4047 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4049 reinterpret_cast<ByteArray*>(result)->set_length(length);
4054 void Heap::CreateFillerObjectAt(Address addr, int size) {
4055 if (size == 0) return;
4056 HeapObject* filler = HeapObject::FromAddress(addr);
4057 if (size == kPointerSize) {
4058 filler->set_map_no_write_barrier(one_pointer_filler_map());
4059 } else if (size == 2 * kPointerSize) {
4060 filler->set_map_no_write_barrier(two_pointer_filler_map());
4062 filler->set_map_no_write_barrier(free_space_map());
4063 FreeSpace::cast(filler)->set_size(size);
4068 MaybeObject* Heap::AllocateExternalArray(int length,
4069 ExternalArrayType array_type,
4070 void* external_pointer,
4071 PretenureFlag pretenure) {
4072 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4074 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4077 if (!maybe_result->ToObject(&result)) return maybe_result;
4080 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4081 MapForExternalArrayType(array_type));
4082 reinterpret_cast<ExternalArray*>(result)->set_length(length);
4083 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4090 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4092 Handle<Object> self_reference,
4094 bool crankshafted) {
4095 // Allocate ByteArray before the Code object, so that we do not risk
4096 // leaving uninitialized Code object (and breaking the heap).
4097 ByteArray* reloc_info;
4098 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4099 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4102 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4103 int obj_size = Code::SizeFor(body_size);
4104 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4105 MaybeObject* maybe_result;
4106 // Large code objects and code objects which should stay at a fixed address
4107 // are allocated in large object space.
4109 bool force_lo_space = obj_size > code_space()->AreaSize();
4110 if (force_lo_space) {
4111 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4113 maybe_result = code_space_->AllocateRaw(obj_size);
4115 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4117 if (immovable && !force_lo_space &&
4118 // Objects on the first page of each space are never moved.
4119 !code_space_->FirstPage()->Contains(result->address())) {
4120 // Discard the first code allocation, which was on a page where it could be
4122 CreateFillerObjectAt(result->address(), obj_size);
4123 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4124 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4127 // Initialize the object
4128 result->set_map_no_write_barrier(code_map());
4129 Code* code = Code::cast(result);
4130 ASSERT(!isolate_->code_range()->exists() ||
4131 isolate_->code_range()->contains(code->address()));
4132 code->set_instruction_size(desc.instr_size);
4133 code->set_relocation_info(reloc_info);
4134 code->set_flags(flags);
4135 if (code->is_call_stub() || code->is_keyed_call_stub()) {
4136 code->set_check_type(RECEIVER_MAP_CHECK);
4138 code->set_is_crankshafted(crankshafted);
4139 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4140 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4141 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4142 code->set_gc_metadata(Smi::FromInt(0));
4143 code->set_ic_age(global_ic_age_);
4144 code->set_prologue_offset(kPrologueOffsetNotSet);
4145 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4146 code->set_marked_for_deoptimization(false);
4148 // Allow self references to created code object by patching the handle to
4149 // point to the newly allocated Code object.
4150 if (!self_reference.is_null()) {
4151 *(self_reference.location()) = code;
4153 // Migrate generated code.
4154 // The generated code can contain Object** values (typically from handles)
4155 // that are dereferenced during the copy to point directly to the actual heap
4156 // objects. These pointers can include references to the code object itself,
4157 // through the self_reference parameter.
4158 code->CopyFrom(desc);
4161 if (FLAG_verify_heap) {
4169 MaybeObject* Heap::CopyCode(Code* code) {
4170 // Allocate an object the same size as the code object.
4171 int obj_size = code->Size();
4172 MaybeObject* maybe_result;
4173 if (obj_size > code_space()->AreaSize()) {
4174 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4176 maybe_result = code_space_->AllocateRaw(obj_size);
4180 if (!maybe_result->ToObject(&result)) return maybe_result;
4182 // Copy code object.
4183 Address old_addr = code->address();
4184 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4185 CopyBlock(new_addr, old_addr, obj_size);
4186 // Relocate the copy.
4187 Code* new_code = Code::cast(result);
4188 ASSERT(!isolate_->code_range()->exists() ||
4189 isolate_->code_range()->contains(code->address()));
4190 new_code->Relocate(new_addr - old_addr);
4195 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4196 // Allocate ByteArray before the Code object, so that we do not risk
4197 // leaving uninitialized Code object (and breaking the heap).
4198 Object* reloc_info_array;
4199 { MaybeObject* maybe_reloc_info_array =
4200 AllocateByteArray(reloc_info.length(), TENURED);
4201 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4202 return maybe_reloc_info_array;
4206 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4208 int new_obj_size = Code::SizeFor(new_body_size);
4210 Address old_addr = code->address();
4212 size_t relocation_offset =
4213 static_cast<size_t>(code->instruction_end() - old_addr);
4215 MaybeObject* maybe_result;
4216 if (new_obj_size > code_space()->AreaSize()) {
4217 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4219 maybe_result = code_space_->AllocateRaw(new_obj_size);
4223 if (!maybe_result->ToObject(&result)) return maybe_result;
4225 // Copy code object.
4226 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4228 // Copy header and instructions.
4229 CopyBytes(new_addr, old_addr, relocation_offset);
4231 Code* new_code = Code::cast(result);
4232 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4234 // Copy patched rinfo.
4235 CopyBytes(new_code->relocation_start(),
4237 static_cast<size_t>(reloc_info.length()));
4239 // Relocate the copy.
4240 ASSERT(!isolate_->code_range()->exists() ||
4241 isolate_->code_range()->contains(code->address()));
4242 new_code->Relocate(new_addr - old_addr);
4245 if (FLAG_verify_heap) {
4253 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4254 Handle<AllocationSite> allocation_site) {
4255 ASSERT(gc_state_ == NOT_IN_GC);
4256 ASSERT(map->instance_type() != MAP_TYPE);
4257 // If allocation failures are disallowed, we may allocate in a different
4258 // space when new space is full and the object is not a large object.
4259 AllocationSpace retry_space =
4260 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4261 int size = map->instance_size() + AllocationMemento::kSize;
4263 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4264 if (!maybe_result->ToObject(&result)) return maybe_result;
4265 // No need for write barrier since object is white and map is in old space.
4266 HeapObject::cast(result)->set_map_no_write_barrier(map);
4267 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4268 reinterpret_cast<Address>(result) + map->instance_size());
4269 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4270 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4275 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4276 ASSERT(gc_state_ == NOT_IN_GC);
4277 ASSERT(map->instance_type() != MAP_TYPE);
4278 // If allocation failures are disallowed, we may allocate in a different
4279 // space when new space is full and the object is not a large object.
4280 AllocationSpace retry_space =
4281 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4282 int size = map->instance_size();
4284 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4285 if (!maybe_result->ToObject(&result)) return maybe_result;
4286 // No need for write barrier since object is white and map is in old space.
4287 HeapObject::cast(result)->set_map_no_write_barrier(map);
4292 void Heap::InitializeFunction(JSFunction* function,
4293 SharedFunctionInfo* shared,
4294 Object* prototype) {
4295 ASSERT(!prototype->IsMap());
4296 function->initialize_properties();
4297 function->initialize_elements();
4298 function->set_shared(shared);
4299 function->set_code(shared->code());
4300 function->set_prototype_or_initial_map(prototype);
4301 function->set_context(undefined_value());
4302 function->set_literals_or_bindings(empty_fixed_array());
4303 function->set_next_function_link(undefined_value());
4307 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4308 // Make sure to use globals from the function's context, since the function
4309 // can be from a different context.
4310 Context* native_context = function->context()->native_context();
4312 if (function->shared()->is_generator()) {
4313 // Generator prototypes can share maps since they don't have "constructor"
4315 new_map = native_context->generator_object_prototype_map();
4317 // Each function prototype gets a fresh map to avoid unwanted sharing of
4318 // maps between prototypes of different constructors.
4319 JSFunction* object_function = native_context->object_function();
4320 ASSERT(object_function->has_initial_map());
4321 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4322 if (!maybe_map->To(&new_map)) return maybe_map;
4326 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4327 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4329 if (!function->shared()->is_generator()) {
4330 MaybeObject* maybe_failure =
4331 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4332 constructor_string(), function, DONT_ENUM);
4333 if (maybe_failure->IsFailure()) return maybe_failure;
4340 MaybeObject* Heap::AllocateFunction(Map* function_map,
4341 SharedFunctionInfo* shared,
4343 PretenureFlag pretenure) {
4344 AllocationSpace space =
4345 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4347 { MaybeObject* maybe_result = Allocate(function_map, space);
4348 if (!maybe_result->ToObject(&result)) return maybe_result;
4350 InitializeFunction(JSFunction::cast(result), shared, prototype);
4355 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4356 // To get fast allocation and map sharing for arguments objects we
4357 // allocate them based on an arguments boilerplate.
4359 JSObject* boilerplate;
4360 int arguments_object_size;
4361 bool strict_mode_callee = callee->IsJSFunction() &&
4362 !JSFunction::cast(callee)->shared()->is_classic_mode();
4363 if (strict_mode_callee) {
4365 isolate()->context()->native_context()->
4366 strict_mode_arguments_boilerplate();
4367 arguments_object_size = kArgumentsObjectSizeStrict;
4370 isolate()->context()->native_context()->arguments_boilerplate();
4371 arguments_object_size = kArgumentsObjectSize;
4374 // This calls Copy directly rather than using Heap::AllocateRaw so we
4375 // duplicate the check here.
4376 ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4378 // Check that the size of the boilerplate matches our
4379 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4380 // on the size being a known constant.
4381 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4383 // Do the allocation.
4385 { MaybeObject* maybe_result =
4386 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4387 if (!maybe_result->ToObject(&result)) return maybe_result;
4390 // Copy the content. The arguments boilerplate doesn't have any
4391 // fields that point to new space so it's safe to skip the write
4393 CopyBlock(HeapObject::cast(result)->address(),
4394 boilerplate->address(),
4395 JSObject::kHeaderSize);
4397 // Set the length property.
4398 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4399 Smi::FromInt(length),
4400 SKIP_WRITE_BARRIER);
4401 // Set the callee property for non-strict mode arguments object only.
4402 if (!strict_mode_callee) {
4403 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4407 // Check the state of the object
4408 ASSERT(JSObject::cast(result)->HasFastProperties());
4409 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4415 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4416 ASSERT(!fun->has_initial_map());
4418 // First create a new map with the size and number of in-object properties
4419 // suggested by the function.
4420 InstanceType instance_type;
4422 int in_object_properties;
4423 if (fun->shared()->is_generator()) {
4424 instance_type = JS_GENERATOR_OBJECT_TYPE;
4425 instance_size = JSGeneratorObject::kSize;
4426 in_object_properties = 0;
4428 instance_type = JS_OBJECT_TYPE;
4429 instance_size = fun->shared()->CalculateInstanceSize();
4430 in_object_properties = fun->shared()->CalculateInObjectProperties();
4433 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4434 if (!maybe_map->To(&map)) return maybe_map;
4436 // Fetch or allocate prototype.
4438 if (fun->has_instance_prototype()) {
4439 prototype = fun->instance_prototype();
4441 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4442 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4444 map->set_inobject_properties(in_object_properties);
4445 map->set_unused_property_fields(in_object_properties);
4446 map->set_prototype(prototype);
4447 ASSERT(map->has_fast_object_elements());
4449 if (!fun->shared()->is_generator()) {
4450 fun->shared()->StartInobjectSlackTracking(map);
4457 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4458 FixedArray* properties,
4460 obj->set_properties(properties);
4461 obj->initialize_elements();
4462 // TODO(1240798): Initialize the object's body using valid initial values
4463 // according to the object's initial map. For example, if the map's
4464 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4465 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4466 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4467 // verification code has to cope with (temporarily) invalid objects. See
4468 // for example, JSArray::JSArrayVerify).
4470 // We cannot always fill with one_pointer_filler_map because objects
4471 // created from API functions expect their internal fields to be initialized
4472 // with undefined_value.
4473 // Pre-allocated fields need to be initialized with undefined_value as well
4474 // so that object accesses before the constructor completes (e.g. in the
4475 // debugger) will not cause a crash.
4476 if (map->constructor()->IsJSFunction() &&
4477 JSFunction::cast(map->constructor())->shared()->
4478 IsInobjectSlackTrackingInProgress()) {
4479 // We might want to shrink the object later.
4480 ASSERT(obj->GetInternalFieldCount() == 0);
4481 filler = Heap::one_pointer_filler_map();
4483 filler = Heap::undefined_value();
4485 obj->InitializeBody(map, Heap::undefined_value(), filler);
4489 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4490 // JSFunctions should be allocated using AllocateFunction to be
4491 // properly initialized.
4492 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4494 // Both types of global objects should be allocated using
4495 // AllocateGlobalObject to be properly initialized.
4496 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4497 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4499 // Allocate the backing storage for the properties.
4500 int prop_size = map->InitialPropertiesLength();
4501 ASSERT(prop_size >= 0);
4503 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4504 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4507 // Allocate the JSObject.
4508 AllocationSpace space =
4509 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4510 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4512 MaybeObject* maybe_obj = Allocate(map, space);
4513 if (!maybe_obj->To(&obj)) return maybe_obj;
4515 // Initialize the JSObject.
4516 InitializeJSObjectFromMap(JSObject::cast(obj),
4517 FixedArray::cast(properties),
4519 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4520 JSObject::cast(obj)->HasExternalArrayElements());
4525 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4526 Handle<AllocationSite> allocation_site) {
4527 // JSFunctions should be allocated using AllocateFunction to be
4528 // properly initialized.
4529 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4531 // Both types of global objects should be allocated using
4532 // AllocateGlobalObject to be properly initialized.
4533 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4534 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4536 // Allocate the backing storage for the properties.
4537 int prop_size = map->InitialPropertiesLength();
4538 ASSERT(prop_size >= 0);
4540 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4541 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4544 // Allocate the JSObject.
4545 AllocationSpace space = NEW_SPACE;
4546 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4548 MaybeObject* maybe_obj =
4549 AllocateWithAllocationSite(map, space, allocation_site);
4550 if (!maybe_obj->To(&obj)) return maybe_obj;
4552 // Initialize the JSObject.
4553 InitializeJSObjectFromMap(JSObject::cast(obj),
4554 FixedArray::cast(properties),
4556 ASSERT(JSObject::cast(obj)->HasFastElements());
4561 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4562 PretenureFlag pretenure) {
4563 // Allocate the initial map if absent.
4564 if (!constructor->has_initial_map()) {
4565 Object* initial_map;
4566 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4567 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4569 constructor->set_initial_map(Map::cast(initial_map));
4570 Map::cast(initial_map)->set_constructor(constructor);
4572 // Allocate the object based on the constructors initial map.
4573 MaybeObject* result = AllocateJSObjectFromMap(
4574 constructor->initial_map(), pretenure);
4576 // Make sure result is NOT a global object if valid.
4577 Object* non_failure;
4578 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4584 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4585 Handle<AllocationSite> allocation_site) {
4586 // Allocate the initial map if absent.
4587 if (!constructor->has_initial_map()) {
4588 Object* initial_map;
4589 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4590 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4592 constructor->set_initial_map(Map::cast(initial_map));
4593 Map::cast(initial_map)->set_constructor(constructor);
4595 // Allocate the object based on the constructors initial map, or the payload
4597 Map* initial_map = constructor->initial_map();
4599 Smi* smi = Smi::cast(allocation_site->transition_info());
4600 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4601 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4602 if (to_kind != initial_map->elements_kind()) {
4603 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4604 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4605 // Possibly alter the mode, since we found an updated elements kind
4606 // in the type info cell.
4607 mode = AllocationSite::GetMode(to_kind);
4610 MaybeObject* result;
4611 if (mode == TRACK_ALLOCATION_SITE) {
4612 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4615 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4618 // Make sure result is NOT a global object if valid.
4619 Object* non_failure;
4620 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4626 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4627 ASSERT(function->shared()->is_generator());
4629 if (function->has_initial_map()) {
4630 map = function->initial_map();
4632 // Allocate the initial map if absent.
4633 MaybeObject* maybe_map = AllocateInitialMap(function);
4634 if (!maybe_map->To(&map)) return maybe_map;
4635 function->set_initial_map(map);
4636 map->set_constructor(function);
4638 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4639 return AllocateJSObjectFromMap(map);
4643 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4644 // Allocate a fresh map. Modules do not have a prototype.
4646 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4647 if (!maybe_map->To(&map)) return maybe_map;
4648 // Allocate the object based on the map.
4650 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4651 if (!maybe_module->To(&module)) return maybe_module;
4652 module->set_context(context);
4653 module->set_scope_info(scope_info);
4658 MaybeObject* Heap::AllocateJSArrayAndStorage(
4659 ElementsKind elements_kind,
4662 ArrayStorageAllocationMode mode,
4663 PretenureFlag pretenure) {
4664 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4666 if (!maybe_array->To(&array)) return maybe_array;
4668 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4669 // for performance reasons.
4670 ASSERT(capacity >= length);
4672 if (capacity == 0) {
4673 array->set_length(Smi::FromInt(0));
4674 array->set_elements(empty_fixed_array());
4678 FixedArrayBase* elms;
4679 MaybeObject* maybe_elms = NULL;
4680 if (IsFastDoubleElementsKind(elements_kind)) {
4681 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4682 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4684 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4685 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4688 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4689 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4690 maybe_elms = AllocateUninitializedFixedArray(capacity);
4692 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4693 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4696 if (!maybe_elms->To(&elms)) return maybe_elms;
4698 array->set_elements(elms);
4699 array->set_length(Smi::FromInt(length));
4704 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4705 ElementsKind elements_kind,
4708 Handle<AllocationSite> allocation_site,
4709 ArrayStorageAllocationMode mode) {
4710 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4713 if (!maybe_array->To(&array)) return maybe_array;
4714 return AllocateJSArrayStorage(array, length, capacity, mode);
4718 MaybeObject* Heap::AllocateJSArrayStorage(
4722 ArrayStorageAllocationMode mode) {
4723 ASSERT(capacity >= length);
4725 if (capacity == 0) {
4726 array->set_length(Smi::FromInt(0));
4727 array->set_elements(empty_fixed_array());
4731 FixedArrayBase* elms;
4732 MaybeObject* maybe_elms = NULL;
4733 ElementsKind elements_kind = array->GetElementsKind();
4734 if (IsFastDoubleElementsKind(elements_kind)) {
4735 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4736 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4738 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4739 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4742 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4743 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4744 maybe_elms = AllocateUninitializedFixedArray(capacity);
4746 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4747 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4750 if (!maybe_elms->To(&elms)) return maybe_elms;
4752 array->set_elements(elms);
4753 array->set_length(Smi::FromInt(length));
4758 MaybeObject* Heap::AllocateJSArrayWithElements(
4759 FixedArrayBase* elements,
4760 ElementsKind elements_kind,
4762 PretenureFlag pretenure) {
4763 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4765 if (!maybe_array->To(&array)) return maybe_array;
4767 array->set_elements(elements);
4768 array->set_length(Smi::FromInt(length));
4769 array->ValidateElements();
4774 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4776 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4777 // maps. Will probably depend on the identity of the handler object, too.
4779 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4780 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4781 map->set_prototype(prototype);
4783 // Allocate the proxy object.
4785 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4786 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4787 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4788 result->set_handler(handler);
4789 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4794 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4796 Object* construct_trap,
4797 Object* prototype) {
4799 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4800 // maps. Will probably depend on the identity of the handler object, too.
4802 MaybeObject* maybe_map_obj =
4803 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4804 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4805 map->set_prototype(prototype);
4807 // Allocate the proxy object.
4808 JSFunctionProxy* result;
4809 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4810 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4811 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4812 result->set_handler(handler);
4813 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4814 result->set_call_trap(call_trap);
4815 result->set_construct_trap(construct_trap);
4820 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4821 ASSERT(constructor->has_initial_map());
4822 Map* map = constructor->initial_map();
4823 ASSERT(map->is_dictionary_map());
4825 // Make sure no field properties are described in the initial map.
4826 // This guarantees us that normalizing the properties does not
4827 // require us to change property values to PropertyCells.
4828 ASSERT(map->NextFreePropertyIndex() == 0);
4830 // Make sure we don't have a ton of pre-allocated slots in the
4831 // global objects. They will be unused once we normalize the object.
4832 ASSERT(map->unused_property_fields() == 0);
4833 ASSERT(map->inobject_properties() == 0);
4835 // Initial size of the backing store to avoid resize of the storage during
4836 // bootstrapping. The size differs between the JS global object ad the
4838 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4840 // Allocate a dictionary object for backing storage.
4841 NameDictionary* dictionary;
4842 MaybeObject* maybe_dictionary =
4843 NameDictionary::Allocate(
4845 map->NumberOfOwnDescriptors() * 2 + initial_size);
4846 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4848 // The global object might be created from an object template with accessors.
4849 // Fill these accessors into the dictionary.
4850 DescriptorArray* descs = map->instance_descriptors();
4851 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4852 PropertyDetails details = descs->GetDetails(i);
4853 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4854 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4855 Object* value = descs->GetCallbacksObject(i);
4856 MaybeObject* maybe_value = AllocatePropertyCell(value);
4857 if (!maybe_value->ToObject(&value)) return maybe_value;
4859 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4860 if (!maybe_added->To(&dictionary)) return maybe_added;
4863 // Allocate the global object and initialize it with the backing store.
4865 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4866 if (!maybe_global->To(&global)) return maybe_global;
4868 InitializeJSObjectFromMap(global, dictionary, map);
4870 // Create a new map for the global object.
4872 MaybeObject* maybe_map = map->CopyDropDescriptors();
4873 if (!maybe_map->To(&new_map)) return maybe_map;
4874 new_map->set_dictionary_map(true);
4876 // Set up the global object as a normalized object.
4877 global->set_map(new_map);
4878 global->set_properties(dictionary);
4880 // Make sure result is a global object with properties in dictionary.
4881 ASSERT(global->IsGlobalObject());
4882 ASSERT(!global->HasFastProperties());
4887 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4888 // Never used to copy functions. If functions need to be copied we
4889 // have to be careful to clear the literals array.
4890 SLOW_ASSERT(!source->IsJSFunction());
4893 Map* map = source->map();
4894 int object_size = map->instance_size();
4897 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4899 // If we're forced to always allocate, we use the general allocation
4900 // functions which may leave us with an object in old space.
4901 if (always_allocate()) {
4902 { MaybeObject* maybe_clone =
4903 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4904 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4906 Address clone_address = HeapObject::cast(clone)->address();
4907 CopyBlock(clone_address,
4910 // Update write barrier for all fields that lie beyond the header.
4911 RecordWrites(clone_address,
4912 JSObject::kHeaderSize,
4913 (object_size - JSObject::kHeaderSize) / kPointerSize);
4915 wb_mode = SKIP_WRITE_BARRIER;
4917 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4918 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4920 SLOW_ASSERT(InNewSpace(clone));
4921 // Since we know the clone is allocated in new space, we can copy
4922 // the contents without worrying about updating the write barrier.
4923 CopyBlock(HeapObject::cast(clone)->address(),
4929 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4930 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4931 FixedArray* properties = FixedArray::cast(source->properties());
4932 // Update elements if necessary.
4933 if (elements->length() > 0) {
4935 { MaybeObject* maybe_elem;
4936 if (elements->map() == fixed_cow_array_map()) {
4937 maybe_elem = FixedArray::cast(elements);
4938 } else if (source->HasFastDoubleElements()) {
4939 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4941 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4943 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4945 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4947 // Update properties if necessary.
4948 if (properties->length() > 0) {
4950 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4951 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4953 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4955 // Return the new clone.
4960 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4962 AllocationSite* site) {
4963 // Never used to copy functions. If functions need to be copied we
4964 // have to be careful to clear the literals array.
4965 SLOW_ASSERT(!source->IsJSFunction());
4968 Map* map = source->map();
4969 int object_size = map->instance_size();
4972 ASSERT(map->CanTrackAllocationSite());
4973 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4974 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4976 // If we're forced to always allocate, we use the general allocation
4977 // functions which may leave us with an object in old space.
4978 int adjusted_object_size = object_size;
4979 if (always_allocate()) {
4980 // We'll only track origin if we are certain to allocate in new space
4981 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4982 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4983 adjusted_object_size += AllocationMemento::kSize;
4986 { MaybeObject* maybe_clone =
4987 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4988 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4990 Address clone_address = HeapObject::cast(clone)->address();
4991 CopyBlock(clone_address,
4994 // Update write barrier for all fields that lie beyond the header.
4995 int write_barrier_offset = adjusted_object_size > object_size
4996 ? JSArray::kSize + AllocationMemento::kSize
4997 : JSObject::kHeaderSize;
4998 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4999 RecordWrites(clone_address,
5000 write_barrier_offset,
5001 (object_size - write_barrier_offset) / kPointerSize);
5004 // Track allocation site information, if we failed to allocate it inline.
5005 if (InNewSpace(clone) &&
5006 adjusted_object_size == object_size) {
5007 MaybeObject* maybe_alloc_memento =
5008 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5009 AllocationMemento* alloc_memento;
5010 if (maybe_alloc_memento->To(&alloc_memento)) {
5011 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5012 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5016 wb_mode = SKIP_WRITE_BARRIER;
5017 adjusted_object_size += AllocationMemento::kSize;
5019 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5020 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5022 SLOW_ASSERT(InNewSpace(clone));
5023 // Since we know the clone is allocated in new space, we can copy
5024 // the contents without worrying about updating the write barrier.
5025 CopyBlock(HeapObject::cast(clone)->address(),
5030 if (adjusted_object_size > object_size) {
5031 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5032 reinterpret_cast<Address>(clone) + object_size);
5033 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5034 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5038 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5039 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5040 FixedArray* properties = FixedArray::cast(source->properties());
5041 // Update elements if necessary.
5042 if (elements->length() > 0) {
5044 { MaybeObject* maybe_elem;
5045 if (elements->map() == fixed_cow_array_map()) {
5046 maybe_elem = FixedArray::cast(elements);
5047 } else if (source->HasFastDoubleElements()) {
5048 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5050 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5052 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5054 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5056 // Update properties if necessary.
5057 if (properties->length() > 0) {
5059 { MaybeObject* maybe_prop = CopyFixedArray(properties);
5060 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5062 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5064 // Return the new clone.
5069 MaybeObject* Heap::ReinitializeJSReceiver(
5070 JSReceiver* object, InstanceType type, int size) {
5071 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5073 // Allocate fresh map.
5074 // TODO(rossberg): Once we optimize proxies, cache these maps.
5076 MaybeObject* maybe = AllocateMap(type, size);
5077 if (!maybe->To<Map>(&map)) return maybe;
5079 // Check that the receiver has at least the size of the fresh object.
5080 int size_difference = object->map()->instance_size() - map->instance_size();
5081 ASSERT(size_difference >= 0);
5083 map->set_prototype(object->map()->prototype());
5085 // Allocate the backing storage for the properties.
5086 int prop_size = map->unused_property_fields() - map->inobject_properties();
5088 maybe = AllocateFixedArray(prop_size, TENURED);
5089 if (!maybe->ToObject(&properties)) return maybe;
5091 // Functions require some allocation, which might fail here.
5092 SharedFunctionInfo* shared = NULL;
5093 if (type == JS_FUNCTION_TYPE) {
5096 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5097 if (!maybe->To<String>(&name)) return maybe;
5098 maybe = AllocateSharedFunctionInfo(name);
5099 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5102 // Because of possible retries of this function after failure,
5103 // we must NOT fail after this point, where we have changed the type!
5105 // Reset the map for the object.
5106 object->set_map(map);
5107 JSObject* jsobj = JSObject::cast(object);
5109 // Reinitialize the object from the constructor map.
5110 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5112 // Functions require some minimal initialization.
5113 if (type == JS_FUNCTION_TYPE) {
5114 map->set_function_with_prototype(true);
5115 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5116 JSFunction::cast(object)->set_context(
5117 isolate()->context()->native_context());
5120 // Put in filler if the new object is smaller than the old.
5121 if (size_difference > 0) {
5122 CreateFillerObjectAt(
5123 object->address() + map->instance_size(), size_difference);
5130 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5131 JSGlobalProxy* object) {
5132 ASSERT(constructor->has_initial_map());
5133 Map* map = constructor->initial_map();
5135 // Check that the already allocated object has the same size and type as
5136 // objects allocated using the constructor.
5137 ASSERT(map->instance_size() == object->map()->instance_size());
5138 ASSERT(map->instance_type() == object->map()->instance_type());
5140 // Allocate the backing storage for the properties.
5141 int prop_size = map->unused_property_fields() - map->inobject_properties();
5143 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5144 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5147 // Reset the map for the object.
5148 object->set_map(constructor->initial_map());
5150 // Reinitialize the object from the constructor map.
5151 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5156 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5157 PretenureFlag pretenure) {
5158 int length = string.length();
5160 return Heap::LookupSingleCharacterStringFromCode(string[0]);
5163 { MaybeObject* maybe_result =
5164 AllocateRawOneByteString(string.length(), pretenure);
5165 if (!maybe_result->ToObject(&result)) return maybe_result;
5168 // Copy the characters into the new object.
5169 CopyChars(SeqOneByteString::cast(result)->GetChars(),
5176 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5177 int non_ascii_start,
5178 PretenureFlag pretenure) {
5179 // Continue counting the number of characters in the UTF-8 string, starting
5180 // from the first non-ascii character or word.
5181 Access<UnicodeCache::Utf8Decoder>
5182 decoder(isolate_->unicode_cache()->utf8_decoder());
5183 decoder->Reset(string.start() + non_ascii_start,
5184 string.length() - non_ascii_start);
5185 int utf16_length = decoder->Utf16Length();
5186 ASSERT(utf16_length > 0);
5190 int chars = non_ascii_start + utf16_length;
5191 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5192 if (!maybe_result->ToObject(&result)) return maybe_result;
5194 // Convert and copy the characters into the new object.
5195 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5196 // Copy ascii portion.
5197 uint16_t* data = twobyte->GetChars();
5198 if (non_ascii_start != 0) {
5199 const char* ascii_data = string.start();
5200 for (int i = 0; i < non_ascii_start; i++) {
5201 *data++ = *ascii_data++;
5204 // Now write the remainder.
5205 decoder->WriteUtf16(data, utf16_length);
5210 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5211 PretenureFlag pretenure) {
5212 // Check if the string is an ASCII string.
5214 int length = string.length();
5215 const uc16* start = string.start();
5217 if (String::IsOneByte(start, length)) {
5218 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5219 if (!maybe_result->ToObject(&result)) return maybe_result;
5220 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5221 } else { // It's not a one byte string.
5222 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5223 if (!maybe_result->ToObject(&result)) return maybe_result;
5224 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5230 Map* Heap::InternalizedStringMapForString(String* string) {
5231 // If the string is in new space it cannot be used as internalized.
5232 if (InNewSpace(string)) return NULL;
5234 // Find the corresponding internalized string map for strings.
5235 switch (string->map()->instance_type()) {
5236 case STRING_TYPE: return internalized_string_map();
5237 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5238 case CONS_STRING_TYPE: return cons_internalized_string_map();
5239 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5240 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5241 case EXTERNAL_ASCII_STRING_TYPE:
5242 return external_ascii_internalized_string_map();
5243 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5244 return external_internalized_string_with_one_byte_data_map();
5245 case SHORT_EXTERNAL_STRING_TYPE:
5246 return short_external_internalized_string_map();
5247 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5248 return short_external_ascii_internalized_string_map();
5249 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5250 return short_external_internalized_string_with_one_byte_data_map();
5251 default: return NULL; // No match found.
5256 static inline void WriteOneByteData(Vector<const char> vector,
5259 // Only works for ascii.
5260 ASSERT(vector.length() == len);
5261 OS::MemCopy(chars, vector.start(), len);
5264 static inline void WriteTwoByteData(Vector<const char> vector,
5267 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5268 unsigned stream_length = vector.length();
5269 while (stream_length != 0) {
5270 unsigned consumed = 0;
5271 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5272 ASSERT(c != unibrow::Utf8::kBadChar);
5273 ASSERT(consumed <= stream_length);
5274 stream_length -= consumed;
5276 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5279 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5280 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5287 ASSERT(stream_length == 0);
5292 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5293 ASSERT(s->length() == len);
5294 String::WriteToFlat(s, chars, 0, len);
5298 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5299 ASSERT(s->length() == len);
5300 String::WriteToFlat(s, chars, 0, len);
5304 template<bool is_one_byte, typename T>
5305 MaybeObject* Heap::AllocateInternalizedStringImpl(
5306 T t, int chars, uint32_t hash_field) {
5308 // Compute map and object size.
5313 if (chars > SeqOneByteString::kMaxLength) {
5314 return Failure::OutOfMemoryException(0x9);
5316 map = ascii_internalized_string_map();
5317 size = SeqOneByteString::SizeFor(chars);
5319 if (chars > SeqTwoByteString::kMaxLength) {
5320 return Failure::OutOfMemoryException(0xa);
5322 map = internalized_string_map();
5323 size = SeqTwoByteString::SizeFor(chars);
5328 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5329 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5330 : old_data_space_->AllocateRaw(size);
5331 if (!maybe_result->ToObject(&result)) return maybe_result;
5334 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5335 // Set length and hash fields of the allocated string.
5336 String* answer = String::cast(result);
5337 answer->set_length(chars);
5338 answer->set_hash_field(hash_field);
5340 ASSERT_EQ(size, answer->Size());
5343 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5345 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5351 // Need explicit instantiations.
5353 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5355 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5356 String*, int, uint32_t);
5358 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5359 Vector<const char>, int, uint32_t);
5362 MaybeObject* Heap::AllocateRawOneByteString(int length,
5363 PretenureFlag pretenure) {
5364 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5365 return Failure::OutOfMemoryException(0xb);
5367 int size = SeqOneByteString::SizeFor(length);
5368 ASSERT(size <= SeqOneByteString::kMaxSize);
5369 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5370 AllocationSpace retry_space = OLD_DATA_SPACE;
5372 if (size > Page::kMaxNonCodeHeapObjectSize) {
5373 // Allocate in large object space, retry space will be ignored.
5378 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5379 if (!maybe_result->ToObject(&result)) return maybe_result;
5382 // Partially initialize the object.
5383 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5384 String::cast(result)->set_length(length);
5385 String::cast(result)->set_hash_field(String::kEmptyHashField);
5386 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5392 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5393 PretenureFlag pretenure) {
5394 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5395 return Failure::OutOfMemoryException(0xc);
5397 int size = SeqTwoByteString::SizeFor(length);
5398 ASSERT(size <= SeqTwoByteString::kMaxSize);
5399 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5400 AllocationSpace retry_space = OLD_DATA_SPACE;
5402 if (size > Page::kMaxNonCodeHeapObjectSize) {
5403 // Allocate in large object space, retry space will be ignored.
5408 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5409 if (!maybe_result->ToObject(&result)) return maybe_result;
5412 // Partially initialize the object.
5413 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5414 String::cast(result)->set_length(length);
5415 String::cast(result)->set_hash_field(String::kEmptyHashField);
5416 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5421 MaybeObject* Heap::AllocateJSArray(
5422 ElementsKind elements_kind,
5423 PretenureFlag pretenure) {
5424 Context* native_context = isolate()->context()->native_context();
5425 JSFunction* array_function = native_context->array_function();
5426 Map* map = array_function->initial_map();
5427 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5428 if (transition_map != NULL) map = transition_map;
5429 return AllocateJSObjectFromMap(map, pretenure);
5433 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5434 ElementsKind elements_kind,
5435 Handle<AllocationSite> allocation_site) {
5436 Context* native_context = isolate()->context()->native_context();
5437 JSFunction* array_function = native_context->array_function();
5438 Map* map = array_function->initial_map();
5439 Object* maybe_map_array = native_context->js_array_maps();
5440 if (!maybe_map_array->IsUndefined()) {
5441 Object* maybe_transitioned_map =
5442 FixedArray::cast(maybe_map_array)->get(elements_kind);
5443 if (!maybe_transitioned_map->IsUndefined()) {
5444 map = Map::cast(maybe_transitioned_map);
5447 return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5451 MaybeObject* Heap::AllocateEmptyFixedArray() {
5452 int size = FixedArray::SizeFor(0);
5454 { MaybeObject* maybe_result =
5455 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5456 if (!maybe_result->ToObject(&result)) return maybe_result;
5458 // Initialize the object.
5459 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5461 reinterpret_cast<FixedArray*>(result)->set_length(0);
5466 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5467 return AllocateExternalArray(0, array_type, NULL, TENURED);
5471 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5472 if (length < 0 || length > FixedArray::kMaxLength) {
5473 return Failure::OutOfMemoryException(0xd);
5476 // Use the general function if we're forced to always allocate.
5477 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5478 // Allocate the raw data for a fixed array.
5479 int size = FixedArray::SizeFor(length);
5480 return size <= Page::kMaxNonCodeHeapObjectSize
5481 ? new_space_.AllocateRaw(size)
5482 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5486 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5487 int len = src->length();
5489 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5490 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5492 if (InNewSpace(obj)) {
5493 HeapObject* dst = HeapObject::cast(obj);
5494 dst->set_map_no_write_barrier(map);
5495 CopyBlock(dst->address() + kPointerSize,
5496 src->address() + kPointerSize,
5497 FixedArray::SizeFor(len) - kPointerSize);
5500 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5501 FixedArray* result = FixedArray::cast(obj);
5502 result->set_length(len);
5505 DisallowHeapAllocation no_gc;
5506 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5507 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5512 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5514 int len = src->length();
5516 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5517 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5519 HeapObject* dst = HeapObject::cast(obj);
5520 dst->set_map_no_write_barrier(map);
5522 dst->address() + FixedDoubleArray::kLengthOffset,
5523 src->address() + FixedDoubleArray::kLengthOffset,
5524 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5529 MaybeObject* Heap::AllocateFixedArray(int length) {
5530 ASSERT(length >= 0);
5531 if (length == 0) return empty_fixed_array();
5533 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5534 if (!maybe_result->ToObject(&result)) return maybe_result;
5536 // Initialize header.
5537 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5538 array->set_map_no_write_barrier(fixed_array_map());
5539 array->set_length(length);
5541 ASSERT(!InNewSpace(undefined_value()));
5542 MemsetPointer(array->data_start(), undefined_value(), length);
5547 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5548 if (length < 0 || length > FixedArray::kMaxLength) {
5549 return Failure::OutOfMemoryException(0xe);
5551 int size = FixedArray::SizeFor(length);
5552 AllocationSpace space =
5553 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5554 AllocationSpace retry_space = OLD_POINTER_SPACE;
5556 if (size > Page::kMaxNonCodeHeapObjectSize) {
5557 // Allocate in large object space, retry space will be ignored.
5561 return AllocateRaw(size, space, retry_space);
5565 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5568 PretenureFlag pretenure,
5570 ASSERT(length >= 0);
5571 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5572 if (length == 0) return heap->empty_fixed_array();
5574 ASSERT(!heap->InNewSpace(filler));
5576 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5577 if (!maybe_result->ToObject(&result)) return maybe_result;
5580 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5581 FixedArray* array = FixedArray::cast(result);
5582 array->set_length(length);
5583 MemsetPointer(array->data_start(), filler, length);
5588 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5589 return AllocateFixedArrayWithFiller(this,
5596 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5597 PretenureFlag pretenure) {
5598 return AllocateFixedArrayWithFiller(this,
5605 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5606 if (length == 0) return empty_fixed_array();
5609 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5610 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5613 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5615 FixedArray::cast(obj)->set_length(length);
5620 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5621 int size = FixedDoubleArray::SizeFor(0);
5623 { MaybeObject* maybe_result =
5624 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5625 if (!maybe_result->ToObject(&result)) return maybe_result;
5627 // Initialize the object.
5628 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5629 fixed_double_array_map());
5630 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5635 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5637 PretenureFlag pretenure) {
5638 if (length == 0) return empty_fixed_array();
5640 Object* elements_object;
5641 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5642 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5643 FixedDoubleArray* elements =
5644 reinterpret_cast<FixedDoubleArray*>(elements_object);
5646 elements->set_map_no_write_barrier(fixed_double_array_map());
5647 elements->set_length(length);
5652 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5654 PretenureFlag pretenure) {
5655 if (length == 0) return empty_fixed_array();
5657 Object* elements_object;
5658 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5659 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5660 FixedDoubleArray* elements =
5661 reinterpret_cast<FixedDoubleArray*>(elements_object);
5663 for (int i = 0; i < length; ++i) {
5664 elements->set_the_hole(i);
5667 elements->set_map_no_write_barrier(fixed_double_array_map());
5668 elements->set_length(length);
5673 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5674 PretenureFlag pretenure) {
5675 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5676 return Failure::OutOfMemoryException(0xf);
5678 int size = FixedDoubleArray::SizeFor(length);
5679 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5680 AllocationSpace retry_space = OLD_DATA_SPACE;
5682 #ifndef V8_HOST_ARCH_64_BIT
5683 size += kPointerSize;
5686 if (size > Page::kMaxNonCodeHeapObjectSize) {
5687 // Allocate in large object space, retry space will be ignored.
5692 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5693 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5696 return EnsureDoubleAligned(this, object, size);
5700 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5702 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5703 if (!maybe_result->ToObject(&result)) return maybe_result;
5705 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5707 ASSERT(result->IsHashTable());
5712 MaybeObject* Heap::AllocateSymbol() {
5713 // Statically ensure that it is safe to allocate symbols in paged spaces.
5714 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5717 MaybeObject* maybe =
5718 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5719 if (!maybe->ToObject(&result)) return maybe;
5721 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5723 // Generate a random hash value.
5727 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5729 } while (hash == 0 && attempts < 30);
5730 if (hash == 0) hash = 1; // never return 0
5732 Symbol::cast(result)->set_hash_field(
5733 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5734 Symbol::cast(result)->set_name(undefined_value());
5736 ASSERT(result->IsSymbol());
5741 MaybeObject* Heap::AllocateNativeContext() {
5743 { MaybeObject* maybe_result =
5744 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5745 if (!maybe_result->ToObject(&result)) return maybe_result;
5747 Context* context = reinterpret_cast<Context*>(result);
5748 context->set_map_no_write_barrier(native_context_map());
5749 context->set_js_array_maps(undefined_value());
5750 ASSERT(context->IsNativeContext());
5751 ASSERT(result->IsContext());
5756 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5757 ScopeInfo* scope_info) {
5759 { MaybeObject* maybe_result =
5760 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5761 if (!maybe_result->ToObject(&result)) return maybe_result;
5763 Context* context = reinterpret_cast<Context*>(result);
5764 context->set_map_no_write_barrier(global_context_map());
5765 context->set_closure(function);
5766 context->set_previous(function->context());
5767 context->set_extension(scope_info);
5768 context->set_global_object(function->context()->global_object());
5769 ASSERT(context->IsGlobalContext());
5770 ASSERT(result->IsContext());
5775 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5777 { MaybeObject* maybe_result =
5778 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5779 if (!maybe_result->ToObject(&result)) return maybe_result;
5781 Context* context = reinterpret_cast<Context*>(result);
5782 context->set_map_no_write_barrier(module_context_map());
5783 // Instance link will be set later.
5784 context->set_extension(Smi::FromInt(0));
5789 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5790 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5792 { MaybeObject* maybe_result = AllocateFixedArray(length);
5793 if (!maybe_result->ToObject(&result)) return maybe_result;
5795 Context* context = reinterpret_cast<Context*>(result);
5796 context->set_map_no_write_barrier(function_context_map());
5797 context->set_closure(function);
5798 context->set_previous(function->context());
5799 context->set_extension(Smi::FromInt(0));
5800 context->set_global_object(function->context()->global_object());
5805 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5808 Object* thrown_object) {
5809 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5811 { MaybeObject* maybe_result =
5812 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5813 if (!maybe_result->ToObject(&result)) return maybe_result;
5815 Context* context = reinterpret_cast<Context*>(result);
5816 context->set_map_no_write_barrier(catch_context_map());
5817 context->set_closure(function);
5818 context->set_previous(previous);
5819 context->set_extension(name);
5820 context->set_global_object(previous->global_object());
5821 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5826 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5828 JSReceiver* extension) {
5830 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5831 if (!maybe_result->ToObject(&result)) return maybe_result;
5833 Context* context = reinterpret_cast<Context*>(result);
5834 context->set_map_no_write_barrier(with_context_map());
5835 context->set_closure(function);
5836 context->set_previous(previous);
5837 context->set_extension(extension);
5838 context->set_global_object(previous->global_object());
5843 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5845 ScopeInfo* scope_info) {
5847 { MaybeObject* maybe_result =
5848 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5849 if (!maybe_result->ToObject(&result)) return maybe_result;
5851 Context* context = reinterpret_cast<Context*>(result);
5852 context->set_map_no_write_barrier(block_context_map());
5853 context->set_closure(function);
5854 context->set_previous(previous);
5855 context->set_extension(scope_info);
5856 context->set_global_object(previous->global_object());
5861 MaybeObject* Heap::AllocateScopeInfo(int length) {
5862 FixedArray* scope_info;
5863 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5864 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5865 scope_info->set_map_no_write_barrier(scope_info_map());
5870 MaybeObject* Heap::AllocateExternal(void* value) {
5872 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5873 if (!maybe_result->To(&foreign)) return maybe_result;
5876 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5877 if (!maybe_result->To(&external)) return maybe_result;
5879 external->SetInternalField(0, foreign);
5884 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5887 #define MAKE_CASE(NAME, Name, name) \
5888 case NAME##_TYPE: map = name##_map(); break;
5889 STRUCT_LIST(MAKE_CASE)
5893 return Failure::InternalError();
5895 int size = map->instance_size();
5896 AllocationSpace space =
5897 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5899 { MaybeObject* maybe_result = Allocate(map, space);
5900 if (!maybe_result->ToObject(&result)) return maybe_result;
5902 Struct::cast(result)->InitializeBody(size);
5907 bool Heap::IsHeapIterable() {
5908 return (!old_pointer_space()->was_swept_conservatively() &&
5909 !old_data_space()->was_swept_conservatively());
5913 void Heap::EnsureHeapIsIterable() {
5914 ASSERT(AllowHeapAllocation::IsAllowed());
5915 if (!IsHeapIterable()) {
5916 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5918 ASSERT(IsHeapIterable());
5922 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5923 incremental_marking()->Step(step_size,
5924 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5926 if (incremental_marking()->IsComplete()) {
5927 bool uncommit = false;
5928 if (gc_count_at_last_idle_gc_ == gc_count_) {
5929 // No GC since the last full GC, the mutator is probably not active.
5930 isolate_->compilation_cache()->Clear();
5933 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5934 mark_sweeps_since_idle_round_started_++;
5935 gc_count_at_last_idle_gc_ = gc_count_;
5937 new_space_.Shrink();
5938 UncommitFromSpace();
5944 bool Heap::IdleNotification(int hint) {
5945 // Hints greater than this value indicate that
5946 // the embedder is requesting a lot of GC work.
5947 const int kMaxHint = 1000;
5948 const int kMinHintForIncrementalMarking = 10;
5949 // Minimal hint that allows to do full GC.
5950 const int kMinHintForFullGC = 100;
5951 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5952 // The size factor is in range [5..250]. The numbers here are chosen from
5953 // experiments. If you changes them, make sure to test with
5954 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5955 intptr_t step_size =
5956 size_factor * IncrementalMarking::kAllocatedThreshold;
5958 if (contexts_disposed_ > 0) {
5959 if (hint >= kMaxHint) {
5960 // The embedder is requesting a lot of GC work after context disposal,
5961 // we age inline caches so that they don't keep objects from
5962 // the old context alive.
5965 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5966 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5967 incremental_marking()->IsStopped()) {
5968 HistogramTimerScope scope(isolate_->counters()->gc_context());
5969 CollectAllGarbage(kReduceMemoryFootprintMask,
5970 "idle notification: contexts disposed");
5972 AdvanceIdleIncrementalMarking(step_size);
5973 contexts_disposed_ = 0;
5975 // After context disposal there is likely a lot of garbage remaining, reset
5976 // the idle notification counters in order to trigger more incremental GCs
5977 // on subsequent idle notifications.
5982 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5983 return IdleGlobalGC();
5986 // By doing small chunks of GC work in each IdleNotification,
5987 // perform a round of incremental GCs and after that wait until
5988 // the mutator creates enough garbage to justify a new round.
5989 // An incremental GC progresses as follows:
5990 // 1. many incremental marking steps,
5991 // 2. one old space mark-sweep-compact,
5992 // 3. many lazy sweep steps.
5993 // Use mark-sweep-compact events to count incremental GCs in a round.
5995 if (incremental_marking()->IsStopped()) {
5996 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5997 !IsSweepingComplete() &&
5998 !AdvanceSweepers(static_cast<int>(step_size))) {
6003 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6004 if (EnoughGarbageSinceLastIdleRound()) {
6011 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6012 mark_sweeps_since_idle_round_started_;
6014 if (incremental_marking()->IsStopped()) {
6015 // If there are no more than two GCs left in this idle round and we are
6016 // allowed to do a full GC, then make those GCs full in order to compact
6018 // TODO(ulan): Once we enable code compaction for incremental marking,
6019 // we can get rid of this special case and always start incremental marking.
6020 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6021 CollectAllGarbage(kReduceMemoryFootprintMask,
6022 "idle notification: finalize idle round");
6023 mark_sweeps_since_idle_round_started_++;
6024 } else if (hint > kMinHintForIncrementalMarking) {
6025 incremental_marking()->Start();
6028 if (!incremental_marking()->IsStopped() &&
6029 hint > kMinHintForIncrementalMarking) {
6030 AdvanceIdleIncrementalMarking(step_size);
6033 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6042 bool Heap::IdleGlobalGC() {
6043 static const int kIdlesBeforeScavenge = 4;
6044 static const int kIdlesBeforeMarkSweep = 7;
6045 static const int kIdlesBeforeMarkCompact = 8;
6046 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6047 static const unsigned int kGCsBetweenCleanup = 4;
6049 if (!last_idle_notification_gc_count_init_) {
6050 last_idle_notification_gc_count_ = gc_count_;
6051 last_idle_notification_gc_count_init_ = true;
6054 bool uncommit = true;
6055 bool finished = false;
6057 // Reset the number of idle notifications received when a number of
6058 // GCs have taken place. This allows another round of cleanup based
6059 // on idle notifications if enough work has been carried out to
6060 // provoke a number of garbage collections.
6061 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6062 number_idle_notifications_ =
6063 Min(number_idle_notifications_ + 1, kMaxIdleCount);
6065 number_idle_notifications_ = 0;
6066 last_idle_notification_gc_count_ = gc_count_;
6069 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6070 CollectGarbage(NEW_SPACE, "idle notification");
6071 new_space_.Shrink();
6072 last_idle_notification_gc_count_ = gc_count_;
6073 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6074 // Before doing the mark-sweep collections we clear the
6075 // compilation cache to avoid hanging on to source code and
6076 // generated code for cached functions.
6077 isolate_->compilation_cache()->Clear();
6079 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6080 new_space_.Shrink();
6081 last_idle_notification_gc_count_ = gc_count_;
6083 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6084 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6085 new_space_.Shrink();
6086 last_idle_notification_gc_count_ = gc_count_;
6087 number_idle_notifications_ = 0;
6089 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6090 // If we have received more than kIdlesBeforeMarkCompact idle
6091 // notifications we do not perform any cleanup because we don't
6092 // expect to gain much by doing so.
6096 if (uncommit) UncommitFromSpace();
6104 void Heap::Print() {
6105 if (!HasBeenSetUp()) return;
6106 isolate()->PrintStack(stdout);
6107 AllSpaces spaces(this);
6108 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6114 void Heap::ReportCodeStatistics(const char* title) {
6115 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6116 PagedSpace::ResetCodeStatistics();
6117 // We do not look for code in new space, map space, or old space. If code
6118 // somehow ends up in those spaces, we would miss it here.
6119 code_space_->CollectCodeStatistics();
6120 lo_space_->CollectCodeStatistics();
6121 PagedSpace::ReportCodeStatistics();
6125 // This function expects that NewSpace's allocated objects histogram is
6126 // populated (via a call to CollectStatistics or else as a side effect of a
6127 // just-completed scavenge collection).
6128 void Heap::ReportHeapStatistics(const char* title) {
6130 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6132 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6133 old_generation_allocation_limit_);
6136 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6137 isolate_->global_handles()->PrintStats();
6140 PrintF("Heap statistics : ");
6141 isolate_->memory_allocator()->ReportStatistics();
6142 PrintF("To space : ");
6143 new_space_.ReportStatistics();
6144 PrintF("Old pointer space : ");
6145 old_pointer_space_->ReportStatistics();
6146 PrintF("Old data space : ");
6147 old_data_space_->ReportStatistics();
6148 PrintF("Code space : ");
6149 code_space_->ReportStatistics();
6150 PrintF("Map space : ");
6151 map_space_->ReportStatistics();
6152 PrintF("Cell space : ");
6153 cell_space_->ReportStatistics();
6154 PrintF("PropertyCell space : ");
6155 property_cell_space_->ReportStatistics();
6156 PrintF("Large object space : ");
6157 lo_space_->ReportStatistics();
6158 PrintF(">>>>>> ========================================= >>>>>>\n");
6163 bool Heap::Contains(HeapObject* value) {
6164 return Contains(value->address());
6168 bool Heap::Contains(Address addr) {
6169 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6170 return HasBeenSetUp() &&
6171 (new_space_.ToSpaceContains(addr) ||
6172 old_pointer_space_->Contains(addr) ||
6173 old_data_space_->Contains(addr) ||
6174 code_space_->Contains(addr) ||
6175 map_space_->Contains(addr) ||
6176 cell_space_->Contains(addr) ||
6177 property_cell_space_->Contains(addr) ||
6178 lo_space_->SlowContains(addr));
6182 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6183 return InSpace(value->address(), space);
6187 bool Heap::InSpace(Address addr, AllocationSpace space) {
6188 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6189 if (!HasBeenSetUp()) return false;
6193 return new_space_.ToSpaceContains(addr);
6194 case OLD_POINTER_SPACE:
6195 return old_pointer_space_->Contains(addr);
6196 case OLD_DATA_SPACE:
6197 return old_data_space_->Contains(addr);
6199 return code_space_->Contains(addr);
6201 return map_space_->Contains(addr);
6203 return cell_space_->Contains(addr);
6204 case PROPERTY_CELL_SPACE:
6205 return property_cell_space_->Contains(addr);
6207 return lo_space_->SlowContains(addr);
6215 void Heap::Verify() {
6216 CHECK(HasBeenSetUp());
6218 store_buffer()->Verify();
6220 VerifyPointersVisitor visitor;
6221 IterateRoots(&visitor, VISIT_ONLY_STRONG);
6223 new_space_.Verify();
6225 old_pointer_space_->Verify(&visitor);
6226 map_space_->Verify(&visitor);
6228 VerifyPointersVisitor no_dirty_regions_visitor;
6229 old_data_space_->Verify(&no_dirty_regions_visitor);
6230 code_space_->Verify(&no_dirty_regions_visitor);
6231 cell_space_->Verify(&no_dirty_regions_visitor);
6232 property_cell_space_->Verify(&no_dirty_regions_visitor);
6234 lo_space_->Verify();
6239 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6240 Object* result = NULL;
6242 { MaybeObject* maybe_new_table =
6243 string_table()->LookupUtf8String(string, &result);
6244 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6246 // Can't use set_string_table because StringTable::cast knows that
6247 // StringTable is a singleton and checks for identity.
6248 roots_[kStringTableRootIndex] = new_table;
6249 ASSERT(result != NULL);
6254 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6255 Object* result = NULL;
6257 { MaybeObject* maybe_new_table =
6258 string_table()->LookupOneByteString(string, &result);
6259 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6261 // Can't use set_string_table because StringTable::cast knows that
6262 // StringTable is a singleton and checks for identity.
6263 roots_[kStringTableRootIndex] = new_table;
6264 ASSERT(result != NULL);
6269 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6272 Object* result = NULL;
6274 { MaybeObject* maybe_new_table =
6275 string_table()->LookupSubStringOneByteString(string,
6279 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6281 // Can't use set_string_table because StringTable::cast knows that
6282 // StringTable is a singleton and checks for identity.
6283 roots_[kStringTableRootIndex] = new_table;
6284 ASSERT(result != NULL);
6289 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6290 Object* result = NULL;
6292 { MaybeObject* maybe_new_table =
6293 string_table()->LookupTwoByteString(string, &result);
6294 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6296 // Can't use set_string_table because StringTable::cast knows that
6297 // StringTable is a singleton and checks for identity.
6298 roots_[kStringTableRootIndex] = new_table;
6299 ASSERT(result != NULL);
6304 MaybeObject* Heap::InternalizeString(String* string) {
6305 if (string->IsInternalizedString()) return string;
6306 Object* result = NULL;
6308 { MaybeObject* maybe_new_table =
6309 string_table()->LookupString(string, &result);
6310 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6312 // Can't use set_string_table because StringTable::cast knows that
6313 // StringTable is a singleton and checks for identity.
6314 roots_[kStringTableRootIndex] = new_table;
6315 ASSERT(result != NULL);
6320 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6321 if (string->IsInternalizedString()) {
6325 return string_table()->LookupStringIfExists(string, result);
6329 void Heap::ZapFromSpace() {
6330 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6331 new_space_.FromSpaceEnd());
6332 while (it.has_next()) {
6333 NewSpacePage* page = it.next();
6334 for (Address cursor = page->area_start(), limit = page->area_end();
6336 cursor += kPointerSize) {
6337 Memory::Address_at(cursor) = kFromSpaceZapValue;
6343 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6345 ObjectSlotCallback callback) {
6346 Address slot_address = start;
6348 // We are not collecting slots on new space objects during mutation
6349 // thus we have to scan for pointers to evacuation candidates when we
6350 // promote objects. But we should not record any slots in non-black
6351 // objects. Grey object's slots would be rescanned.
6352 // White object might not survive until the end of collection
6353 // it would be a violation of the invariant to record it's slots.
6354 bool record_slots = false;
6355 if (incremental_marking()->IsCompacting()) {
6356 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6357 record_slots = Marking::IsBlack(mark_bit);
6360 while (slot_address < end) {
6361 Object** slot = reinterpret_cast<Object**>(slot_address);
6362 Object* object = *slot;
6363 // If the store buffer becomes overfull we mark pages as being exempt from
6364 // the store buffer. These pages are scanned to find pointers that point
6365 // to the new space. In that case we may hit newly promoted objects and
6366 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6367 if (object->IsHeapObject()) {
6368 if (Heap::InFromSpace(object)) {
6369 callback(reinterpret_cast<HeapObject**>(slot),
6370 HeapObject::cast(object));
6371 Object* new_object = *slot;
6372 if (InNewSpace(new_object)) {
6373 SLOW_ASSERT(Heap::InToSpace(new_object));
6374 SLOW_ASSERT(new_object->IsHeapObject());
6375 store_buffer_.EnterDirectlyIntoStoreBuffer(
6376 reinterpret_cast<Address>(slot));
6378 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6379 } else if (record_slots &&
6380 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6381 mark_compact_collector()->RecordSlot(slot, slot, object);
6384 slot_address += kPointerSize;
6390 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6393 bool IsAMapPointerAddress(Object** addr) {
6394 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6395 int mod = a % Map::kSize;
6396 return mod >= Map::kPointerFieldsBeginOffset &&
6397 mod < Map::kPointerFieldsEndOffset;
6401 bool EverythingsAPointer(Object** addr) {
6406 static void CheckStoreBuffer(Heap* heap,
6409 Object**** store_buffer_position,
6410 Object*** store_buffer_top,
6411 CheckStoreBufferFilter filter,
6412 Address special_garbage_start,
6413 Address special_garbage_end) {
6414 Map* free_space_map = heap->free_space_map();
6415 for ( ; current < limit; current++) {
6416 Object* o = *current;
6417 Address current_address = reinterpret_cast<Address>(current);
6419 if (o == free_space_map) {
6420 Address current_address = reinterpret_cast<Address>(current);
6421 FreeSpace* free_space =
6422 FreeSpace::cast(HeapObject::FromAddress(current_address));
6423 int skip = free_space->Size();
6424 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6426 current_address += skip - kPointerSize;
6427 current = reinterpret_cast<Object**>(current_address);
6430 // Skip the current linear allocation space between top and limit which is
6431 // unmarked with the free space map, but can contain junk.
6432 if (current_address == special_garbage_start &&
6433 special_garbage_end != special_garbage_start) {
6434 current_address = special_garbage_end - kPointerSize;
6435 current = reinterpret_cast<Object**>(current_address);
6438 if (!(*filter)(current)) continue;
6439 ASSERT(current_address < special_garbage_start ||
6440 current_address >= special_garbage_end);
6441 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6442 // We have to check that the pointer does not point into new space
6443 // without trying to cast it to a heap object since the hash field of
6444 // a string can contain values like 1 and 3 which are tagged null
6446 if (!heap->InNewSpace(o)) continue;
6447 while (**store_buffer_position < current &&
6448 *store_buffer_position < store_buffer_top) {
6449 (*store_buffer_position)++;
6451 if (**store_buffer_position != current ||
6452 *store_buffer_position == store_buffer_top) {
6453 Object** obj_start = current;
6454 while (!(*obj_start)->IsMap()) obj_start--;
6461 // Check that the store buffer contains all intergenerational pointers by
6462 // scanning a page and ensuring that all pointers to young space are in the
6464 void Heap::OldPointerSpaceCheckStoreBuffer() {
6465 OldSpace* space = old_pointer_space();
6466 PageIterator pages(space);
6468 store_buffer()->SortUniq();
6470 while (pages.has_next()) {
6471 Page* page = pages.next();
6472 Object** current = reinterpret_cast<Object**>(page->area_start());
6474 Address end = page->area_end();
6476 Object*** store_buffer_position = store_buffer()->Start();
6477 Object*** store_buffer_top = store_buffer()->Top();
6479 Object** limit = reinterpret_cast<Object**>(end);
6480 CheckStoreBuffer(this,
6483 &store_buffer_position,
6485 &EverythingsAPointer,
6492 void Heap::MapSpaceCheckStoreBuffer() {
6493 MapSpace* space = map_space();
6494 PageIterator pages(space);
6496 store_buffer()->SortUniq();
6498 while (pages.has_next()) {
6499 Page* page = pages.next();
6500 Object** current = reinterpret_cast<Object**>(page->area_start());
6502 Address end = page->area_end();
6504 Object*** store_buffer_position = store_buffer()->Start();
6505 Object*** store_buffer_top = store_buffer()->Top();
6507 Object** limit = reinterpret_cast<Object**>(end);
6508 CheckStoreBuffer(this,
6511 &store_buffer_position,
6513 &IsAMapPointerAddress,
6520 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6521 LargeObjectIterator it(lo_space());
6522 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6523 // We only have code, sequential strings, or fixed arrays in large
6524 // object space, and only fixed arrays can possibly contain pointers to
6525 // the young generation.
6526 if (object->IsFixedArray()) {
6527 Object*** store_buffer_position = store_buffer()->Start();
6528 Object*** store_buffer_top = store_buffer()->Top();
6529 Object** current = reinterpret_cast<Object**>(object->address());
6531 reinterpret_cast<Object**>(object->address() + object->Size());
6532 CheckStoreBuffer(this,
6535 &store_buffer_position,
6537 &EverythingsAPointer,
6546 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6547 IterateStrongRoots(v, mode);
6548 IterateWeakRoots(v, mode);
6552 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6553 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6554 v->Synchronize(VisitorSynchronization::kStringTable);
6555 if (mode != VISIT_ALL_IN_SCAVENGE &&
6556 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6557 // Scavenge collections have special processing for this.
6558 external_string_table_.Iterate(v);
6560 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6564 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6565 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6566 v->Synchronize(VisitorSynchronization::kStrongRootList);
6568 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6569 v->Synchronize(VisitorSynchronization::kInternalizedString);
6571 isolate_->bootstrapper()->Iterate(v);
6572 v->Synchronize(VisitorSynchronization::kBootstrapper);
6573 isolate_->Iterate(v);
6574 v->Synchronize(VisitorSynchronization::kTop);
6575 Relocatable::Iterate(v);
6576 v->Synchronize(VisitorSynchronization::kRelocatable);
6578 #ifdef ENABLE_DEBUGGER_SUPPORT
6579 isolate_->debug()->Iterate(v);
6580 if (isolate_->deoptimizer_data() != NULL) {
6581 isolate_->deoptimizer_data()->Iterate(v);
6584 v->Synchronize(VisitorSynchronization::kDebug);
6585 isolate_->compilation_cache()->Iterate(v);
6586 v->Synchronize(VisitorSynchronization::kCompilationCache);
6588 // Iterate over local handles in handle scopes.
6589 isolate_->handle_scope_implementer()->Iterate(v);
6590 isolate_->IterateDeferredHandles(v);
6591 v->Synchronize(VisitorSynchronization::kHandleScope);
6593 // Iterate over the builtin code objects and code stubs in the
6594 // heap. Note that it is not necessary to iterate over code objects
6595 // on scavenge collections.
6596 if (mode != VISIT_ALL_IN_SCAVENGE) {
6597 isolate_->builtins()->IterateBuiltins(v);
6599 v->Synchronize(VisitorSynchronization::kBuiltins);
6601 // Iterate over global handles.
6603 case VISIT_ONLY_STRONG:
6604 isolate_->global_handles()->IterateStrongRoots(v);
6606 case VISIT_ALL_IN_SCAVENGE:
6607 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6609 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6611 isolate_->global_handles()->IterateAllRoots(v);
6614 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6616 // Iterate over pointers being held by inactive threads.
6617 isolate_->thread_manager()->Iterate(v);
6618 v->Synchronize(VisitorSynchronization::kThreadManager);
6620 // Iterate over the pointers the Serialization/Deserialization code is
6622 // During garbage collection this keeps the partial snapshot cache alive.
6623 // During deserialization of the startup snapshot this creates the partial
6624 // snapshot cache and deserializes the objects it refers to. During
6625 // serialization this does nothing, since the partial snapshot cache is
6626 // empty. However the next thing we do is create the partial snapshot,
6627 // filling up the partial snapshot cache with objects it needs as we go.
6628 SerializerDeserializer::Iterate(v);
6629 // We don't do a v->Synchronize call here, because in debug mode that will
6630 // output a flag to the snapshot. However at this point the serializer and
6631 // deserializer are deliberately a little unsynchronized (see above) so the
6632 // checking of the sync flag in the snapshot would fail.
6636 // TODO(1236194): Since the heap size is configurable on the command line
6637 // and through the API, we should gracefully handle the case that the heap
6638 // size is not big enough to fit all the initial objects.
6639 bool Heap::ConfigureHeap(int max_semispace_size,
6640 intptr_t max_old_gen_size,
6641 intptr_t max_executable_size) {
6642 if (HasBeenSetUp()) return false;
6644 if (FLAG_stress_compaction) {
6645 // This will cause more frequent GCs when stressing.
6646 max_semispace_size_ = Page::kPageSize;
6649 if (max_semispace_size > 0) {
6650 if (max_semispace_size < Page::kPageSize) {
6651 max_semispace_size = Page::kPageSize;
6652 if (FLAG_trace_gc) {
6653 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6654 Page::kPageSize >> 10);
6657 max_semispace_size_ = max_semispace_size;
6660 if (Snapshot::IsEnabled()) {
6661 // If we are using a snapshot we always reserve the default amount
6662 // of memory for each semispace because code in the snapshot has
6663 // write-barrier code that relies on the size and alignment of new
6664 // space. We therefore cannot use a larger max semispace size
6665 // than the default reserved semispace size.
6666 if (max_semispace_size_ > reserved_semispace_size_) {
6667 max_semispace_size_ = reserved_semispace_size_;
6668 if (FLAG_trace_gc) {
6669 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6670 reserved_semispace_size_ >> 10);
6674 // If we are not using snapshots we reserve space for the actual
6675 // max semispace size.
6676 reserved_semispace_size_ = max_semispace_size_;
6679 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6680 if (max_executable_size > 0) {
6681 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6684 // The max executable size must be less than or equal to the max old
6686 if (max_executable_size_ > max_old_generation_size_) {
6687 max_executable_size_ = max_old_generation_size_;
6690 // The new space size must be a power of two to support single-bit testing
6692 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6693 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6694 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6696 // The external allocation limit should be below 256 MB on all architectures
6697 // to avoid unnecessary low memory notifications, as that is the threshold
6698 // for some embedders.
6699 external_allocation_limit_ = 12 * max_semispace_size_;
6700 ASSERT(external_allocation_limit_ <= 256 * MB);
6702 // The old generation is paged and needs at least one page for each space.
6703 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6704 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6706 RoundUp(max_old_generation_size_,
6714 bool Heap::ConfigureHeapDefault() {
6715 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6716 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6717 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6721 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6722 *stats->start_marker = HeapStats::kStartMarker;
6723 *stats->end_marker = HeapStats::kEndMarker;
6724 *stats->new_space_size = new_space_.SizeAsInt();
6725 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6726 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6727 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6728 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6729 *stats->old_data_space_capacity = old_data_space_->Capacity();
6730 *stats->code_space_size = code_space_->SizeOfObjects();
6731 *stats->code_space_capacity = code_space_->Capacity();
6732 *stats->map_space_size = map_space_->SizeOfObjects();
6733 *stats->map_space_capacity = map_space_->Capacity();
6734 *stats->cell_space_size = cell_space_->SizeOfObjects();
6735 *stats->cell_space_capacity = cell_space_->Capacity();
6736 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6737 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6738 *stats->lo_space_size = lo_space_->Size();
6739 isolate_->global_handles()->RecordStats(stats);
6740 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6741 *stats->memory_allocator_capacity =
6742 isolate()->memory_allocator()->Size() +
6743 isolate()->memory_allocator()->Available();
6744 *stats->os_error = OS::GetLastError();
6745 isolate()->memory_allocator()->Available();
6746 if (take_snapshot) {
6747 HeapIterator iterator(this);
6748 for (HeapObject* obj = iterator.next();
6750 obj = iterator.next()) {
6751 InstanceType type = obj->map()->instance_type();
6752 ASSERT(0 <= type && type <= LAST_TYPE);
6753 stats->objects_per_type[type]++;
6754 stats->size_per_type[type] += obj->Size();
6760 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6761 return old_pointer_space_->SizeOfObjects()
6762 + old_data_space_->SizeOfObjects()
6763 + code_space_->SizeOfObjects()
6764 + map_space_->SizeOfObjects()
6765 + cell_space_->SizeOfObjects()
6766 + property_cell_space_->SizeOfObjects()
6767 + lo_space_->SizeOfObjects();
6771 intptr_t Heap::PromotedExternalMemorySize() {
6772 if (amount_of_external_allocated_memory_
6773 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6774 return amount_of_external_allocated_memory_
6775 - amount_of_external_allocated_memory_at_last_global_gc_;
6779 V8_DECLARE_ONCE(initialize_gc_once);
6781 static void InitializeGCOnce() {
6782 InitializeScavengingVisitorsTables();
6783 NewSpaceScavenger::Initialize();
6784 MarkCompactCollector::Initialize();
6788 bool Heap::SetUp() {
6790 allocation_timeout_ = FLAG_gc_interval;
6793 // Initialize heap spaces and initial maps and objects. Whenever something
6794 // goes wrong, just return false. The caller should check the results and
6795 // call Heap::TearDown() to release allocated memory.
6797 // If the heap is not yet configured (e.g. through the API), configure it.
6798 // Configuration is based on the flags new-space-size (really the semispace
6799 // size) and old-space-size if set or the initial values of semispace_size_
6800 // and old_generation_size_ otherwise.
6802 if (!ConfigureHeapDefault()) return false;
6805 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6807 MarkMapPointersAsEncoded(false);
6809 // Set up memory allocator.
6810 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6813 // Set up new space.
6814 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6818 // Initialize old pointer space.
6819 old_pointer_space_ =
6821 max_old_generation_size_,
6824 if (old_pointer_space_ == NULL) return false;
6825 if (!old_pointer_space_->SetUp()) return false;
6827 // Initialize old data space.
6830 max_old_generation_size_,
6833 if (old_data_space_ == NULL) return false;
6834 if (!old_data_space_->SetUp()) return false;
6836 // Initialize the code space, set its maximum capacity to the old
6837 // generation size. It needs executable memory.
6838 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6839 // virtual address space, so that they can call each other with near calls.
6840 if (code_range_size_ > 0) {
6841 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6847 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6848 if (code_space_ == NULL) return false;
6849 if (!code_space_->SetUp()) return false;
6851 // Initialize map space.
6852 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6853 if (map_space_ == NULL) return false;
6854 if (!map_space_->SetUp()) return false;
6856 // Initialize simple cell space.
6857 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6858 if (cell_space_ == NULL) return false;
6859 if (!cell_space_->SetUp()) return false;
6861 // Initialize global property cell space.
6862 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6863 PROPERTY_CELL_SPACE);
6864 if (property_cell_space_ == NULL) return false;
6865 if (!property_cell_space_->SetUp()) return false;
6867 // The large object code space may contain code or data. We set the memory
6868 // to be non-executable here for safety, but this means we need to enable it
6869 // explicitly when allocating large code objects.
6870 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6871 if (lo_space_ == NULL) return false;
6872 if (!lo_space_->SetUp()) return false;
6874 // Set up the seed that is used to randomize the string hash function.
6875 ASSERT(hash_seed() == 0);
6876 if (FLAG_randomize_hashes) {
6877 if (FLAG_hash_seed == 0) {
6879 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6881 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6885 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6886 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6888 store_buffer()->SetUp();
6890 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6892 relocation_mutex_locked_by_optimizer_thread_ = false;
6899 bool Heap::CreateHeapObjects() {
6900 // Create initial maps.
6901 if (!CreateInitialMaps()) return false;
6902 if (!CreateApiObjects()) return false;
6904 // Create initial objects
6905 if (!CreateInitialObjects()) return false;
6907 native_contexts_list_ = undefined_value();
6908 array_buffers_list_ = undefined_value();
6909 allocation_sites_list_ = undefined_value();
6914 void Heap::SetStackLimits() {
6915 ASSERT(isolate_ != NULL);
6916 ASSERT(isolate_ == isolate());
6917 // On 64 bit machines, pointers are generally out of range of Smis. We write
6918 // something that looks like an out of range Smi to the GC.
6920 // Set up the special root array entries containing the stack limits.
6921 // These are actually addresses, but the tag makes the GC ignore it.
6922 roots_[kStackLimitRootIndex] =
6923 reinterpret_cast<Object*>(
6924 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6925 roots_[kRealStackLimitRootIndex] =
6926 reinterpret_cast<Object*>(
6927 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6931 void Heap::TearDown() {
6933 if (FLAG_verify_heap) {
6938 if (FLAG_print_cumulative_gc_stat) {
6940 PrintF("gc_count=%d ", gc_count_);
6941 PrintF("mark_sweep_count=%d ", ms_count_);
6942 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6943 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6944 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6945 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6946 get_max_alive_after_gc());
6947 PrintF("total_marking_time=%.1f ", marking_time());
6948 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6952 TearDownArrayBuffers();
6954 isolate_->global_handles()->TearDown();
6956 external_string_table_.TearDown();
6958 new_space_.TearDown();
6960 if (old_pointer_space_ != NULL) {
6961 old_pointer_space_->TearDown();
6962 delete old_pointer_space_;
6963 old_pointer_space_ = NULL;
6966 if (old_data_space_ != NULL) {
6967 old_data_space_->TearDown();
6968 delete old_data_space_;
6969 old_data_space_ = NULL;
6972 if (code_space_ != NULL) {
6973 code_space_->TearDown();
6978 if (map_space_ != NULL) {
6979 map_space_->TearDown();
6984 if (cell_space_ != NULL) {
6985 cell_space_->TearDown();
6990 if (property_cell_space_ != NULL) {
6991 property_cell_space_->TearDown();
6992 delete property_cell_space_;
6993 property_cell_space_ = NULL;
6996 if (lo_space_ != NULL) {
6997 lo_space_->TearDown();
7002 store_buffer()->TearDown();
7003 incremental_marking()->TearDown();
7005 isolate_->memory_allocator()->TearDown();
7007 delete relocation_mutex_;
7011 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7012 ASSERT(callback != NULL);
7013 GCPrologueCallbackPair pair(callback, gc_type);
7014 ASSERT(!gc_prologue_callbacks_.Contains(pair));
7015 return gc_prologue_callbacks_.Add(pair);
7019 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7020 ASSERT(callback != NULL);
7021 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7022 if (gc_prologue_callbacks_[i].callback == callback) {
7023 gc_prologue_callbacks_.Remove(i);
7031 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7032 ASSERT(callback != NULL);
7033 GCEpilogueCallbackPair pair(callback, gc_type);
7034 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7035 return gc_epilogue_callbacks_.Add(pair);
7039 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7040 ASSERT(callback != NULL);
7041 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7042 if (gc_epilogue_callbacks_[i].callback == callback) {
7043 gc_epilogue_callbacks_.Remove(i);
7053 class PrintHandleVisitor: public ObjectVisitor {
7055 void VisitPointers(Object** start, Object** end) {
7056 for (Object** p = start; p < end; p++)
7057 PrintF(" handle %p to %p\n",
7058 reinterpret_cast<void*>(p),
7059 reinterpret_cast<void*>(*p));
7064 void Heap::PrintHandles() {
7065 PrintF("Handles:\n");
7066 PrintHandleVisitor v;
7067 isolate_->handle_scope_implementer()->Iterate(&v);
7073 Space* AllSpaces::next() {
7074 switch (counter_++) {
7076 return heap_->new_space();
7077 case OLD_POINTER_SPACE:
7078 return heap_->old_pointer_space();
7079 case OLD_DATA_SPACE:
7080 return heap_->old_data_space();
7082 return heap_->code_space();
7084 return heap_->map_space();
7086 return heap_->cell_space();
7087 case PROPERTY_CELL_SPACE:
7088 return heap_->property_cell_space();
7090 return heap_->lo_space();
7097 PagedSpace* PagedSpaces::next() {
7098 switch (counter_++) {
7099 case OLD_POINTER_SPACE:
7100 return heap_->old_pointer_space();
7101 case OLD_DATA_SPACE:
7102 return heap_->old_data_space();
7104 return heap_->code_space();
7106 return heap_->map_space();
7108 return heap_->cell_space();
7109 case PROPERTY_CELL_SPACE:
7110 return heap_->property_cell_space();
7118 OldSpace* OldSpaces::next() {
7119 switch (counter_++) {
7120 case OLD_POINTER_SPACE:
7121 return heap_->old_pointer_space();
7122 case OLD_DATA_SPACE:
7123 return heap_->old_data_space();
7125 return heap_->code_space();
7132 SpaceIterator::SpaceIterator(Heap* heap)
7134 current_space_(FIRST_SPACE),
7140 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7142 current_space_(FIRST_SPACE),
7144 size_func_(size_func) {
7148 SpaceIterator::~SpaceIterator() {
7149 // Delete active iterator if any.
7154 bool SpaceIterator::has_next() {
7155 // Iterate until no more spaces.
7156 return current_space_ != LAST_SPACE;
7160 ObjectIterator* SpaceIterator::next() {
7161 if (iterator_ != NULL) {
7164 // Move to the next space
7166 if (current_space_ > LAST_SPACE) {
7171 // Return iterator for the new current space.
7172 return CreateIterator();
7176 // Create an iterator for the space to iterate.
7177 ObjectIterator* SpaceIterator::CreateIterator() {
7178 ASSERT(iterator_ == NULL);
7180 switch (current_space_) {
7182 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7184 case OLD_POINTER_SPACE:
7186 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7188 case OLD_DATA_SPACE:
7189 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7192 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7195 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7198 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7200 case PROPERTY_CELL_SPACE:
7201 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7205 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7209 // Return the newly allocated iterator;
7210 ASSERT(iterator_ != NULL);
7215 class HeapObjectsFilter {
7217 virtual ~HeapObjectsFilter() {}
7218 virtual bool SkipObject(HeapObject* object) = 0;
7222 class UnreachableObjectsFilter : public HeapObjectsFilter {
7224 UnreachableObjectsFilter() {
7225 MarkReachableObjects();
7228 ~UnreachableObjectsFilter() {
7229 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7232 bool SkipObject(HeapObject* object) {
7233 MarkBit mark_bit = Marking::MarkBitFrom(object);
7234 return !mark_bit.Get();
7238 class MarkingVisitor : public ObjectVisitor {
7240 MarkingVisitor() : marking_stack_(10) {}
7242 void VisitPointers(Object** start, Object** end) {
7243 for (Object** p = start; p < end; p++) {
7244 if (!(*p)->IsHeapObject()) continue;
7245 HeapObject* obj = HeapObject::cast(*p);
7246 MarkBit mark_bit = Marking::MarkBitFrom(obj);
7247 if (!mark_bit.Get()) {
7249 marking_stack_.Add(obj);
7254 void TransitiveClosure() {
7255 while (!marking_stack_.is_empty()) {
7256 HeapObject* obj = marking_stack_.RemoveLast();
7262 List<HeapObject*> marking_stack_;
7265 void MarkReachableObjects() {
7266 Heap* heap = Isolate::Current()->heap();
7267 MarkingVisitor visitor;
7268 heap->IterateRoots(&visitor, VISIT_ALL);
7269 visitor.TransitiveClosure();
7272 DisallowHeapAllocation no_allocation_;
7276 HeapIterator::HeapIterator(Heap* heap)
7278 filtering_(HeapIterator::kNoFiltering),
7284 HeapIterator::HeapIterator(Heap* heap,
7285 HeapIterator::HeapObjectsFiltering filtering)
7287 filtering_(filtering),
7293 HeapIterator::~HeapIterator() {
7298 void HeapIterator::Init() {
7299 // Start the iteration.
7300 space_iterator_ = new SpaceIterator(heap_);
7301 switch (filtering_) {
7302 case kFilterUnreachable:
7303 filter_ = new UnreachableObjectsFilter;
7308 object_iterator_ = space_iterator_->next();
7312 void HeapIterator::Shutdown() {
7314 // Assert that in filtering mode we have iterated through all
7315 // objects. Otherwise, heap will be left in an inconsistent state.
7316 if (filtering_ != kNoFiltering) {
7317 ASSERT(object_iterator_ == NULL);
7320 // Make sure the last iterator is deallocated.
7321 delete space_iterator_;
7322 space_iterator_ = NULL;
7323 object_iterator_ = NULL;
7329 HeapObject* HeapIterator::next() {
7330 if (filter_ == NULL) return NextObject();
7332 HeapObject* obj = NextObject();
7333 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7338 HeapObject* HeapIterator::NextObject() {
7339 // No iterator means we are done.
7340 if (object_iterator_ == NULL) return NULL;
7342 if (HeapObject* obj = object_iterator_->next_object()) {
7343 // If the current iterator has more objects we are fine.
7346 // Go though the spaces looking for one that has objects.
7347 while (space_iterator_->has_next()) {
7348 object_iterator_ = space_iterator_->next();
7349 if (HeapObject* obj = object_iterator_->next_object()) {
7354 // Done with the last space.
7355 object_iterator_ = NULL;
7360 void HeapIterator::reset() {
7361 // Restart the iterator.
7369 Object* const PathTracer::kAnyGlobalObject = NULL;
7371 class PathTracer::MarkVisitor: public ObjectVisitor {
7373 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7374 void VisitPointers(Object** start, Object** end) {
7375 // Scan all HeapObject pointers in [start, end)
7376 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7377 if ((*p)->IsHeapObject())
7378 tracer_->MarkRecursively(p, this);
7383 PathTracer* tracer_;
7387 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7389 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7390 void VisitPointers(Object** start, Object** end) {
7391 // Scan all HeapObject pointers in [start, end)
7392 for (Object** p = start; p < end; p++) {
7393 if ((*p)->IsHeapObject())
7394 tracer_->UnmarkRecursively(p, this);
7399 PathTracer* tracer_;
7403 void PathTracer::VisitPointers(Object** start, Object** end) {
7404 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7405 // Visit all HeapObject pointers in [start, end)
7406 for (Object** p = start; !done && (p < end); p++) {
7407 if ((*p)->IsHeapObject()) {
7409 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7415 void PathTracer::Reset() {
7416 found_target_ = false;
7417 object_stack_.Clear();
7421 void PathTracer::TracePathFrom(Object** root) {
7422 ASSERT((search_target_ == kAnyGlobalObject) ||
7423 search_target_->IsHeapObject());
7424 found_target_in_trace_ = false;
7427 MarkVisitor mark_visitor(this);
7428 MarkRecursively(root, &mark_visitor);
7430 UnmarkVisitor unmark_visitor(this);
7431 UnmarkRecursively(root, &unmark_visitor);
7437 static bool SafeIsNativeContext(HeapObject* obj) {
7438 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7442 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7443 if (!(*p)->IsHeapObject()) return;
7445 HeapObject* obj = HeapObject::cast(*p);
7447 Object* map = obj->map();
7449 if (!map->IsHeapObject()) return; // visited before
7451 if (found_target_in_trace_) return; // stop if target found
7452 object_stack_.Add(obj);
7453 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7454 (obj == search_target_)) {
7455 found_target_in_trace_ = true;
7456 found_target_ = true;
7460 bool is_native_context = SafeIsNativeContext(obj);
7463 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7465 Address map_addr = map_p->address();
7467 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7469 // Scan the object body.
7470 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7471 // This is specialized to scan Context's properly.
7472 Object** start = reinterpret_cast<Object**>(obj->address() +
7473 Context::kHeaderSize);
7474 Object** end = reinterpret_cast<Object**>(obj->address() +
7475 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7476 mark_visitor->VisitPointers(start, end);
7478 obj->IterateBody(map_p->instance_type(),
7479 obj->SizeFromMap(map_p),
7483 // Scan the map after the body because the body is a lot more interesting
7484 // when doing leak detection.
7485 MarkRecursively(&map, mark_visitor);
7487 if (!found_target_in_trace_) // don't pop if found the target
7488 object_stack_.RemoveLast();
7492 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7493 if (!(*p)->IsHeapObject()) return;
7495 HeapObject* obj = HeapObject::cast(*p);
7497 Object* map = obj->map();
7499 if (map->IsHeapObject()) return; // unmarked already
7501 Address map_addr = reinterpret_cast<Address>(map);
7503 map_addr -= kMarkTag;
7505 ASSERT_TAG_ALIGNED(map_addr);
7507 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7509 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7511 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7513 obj->IterateBody(Map::cast(map_p)->instance_type(),
7514 obj->SizeFromMap(Map::cast(map_p)),
7519 void PathTracer::ProcessResults() {
7520 if (found_target_) {
7521 PrintF("=====================================\n");
7522 PrintF("==== Path to object ====\n");
7523 PrintF("=====================================\n\n");
7525 ASSERT(!object_stack_.is_empty());
7526 for (int i = 0; i < object_stack_.length(); i++) {
7527 if (i > 0) PrintF("\n |\n |\n V\n\n");
7528 Object* obj = object_stack_[i];
7531 PrintF("=====================================\n");
7536 // Triggers a depth-first traversal of reachable objects from one
7537 // given root object and finds a path to a specific heap object and
7539 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7540 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7541 tracer.VisitPointer(&root);
7545 // Triggers a depth-first traversal of reachable objects from roots
7546 // and finds a path to a specific heap object and prints it.
7547 void Heap::TracePathToObject(Object* target) {
7548 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7549 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7553 // Triggers a depth-first traversal of reachable objects from roots
7554 // and finds a path to any global object and prints it. Useful for
7555 // determining the source for leaks of global objects.
7556 void Heap::TracePathToGlobal() {
7557 PathTracer tracer(PathTracer::kAnyGlobalObject,
7558 PathTracer::FIND_ALL,
7560 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7565 static intptr_t CountTotalHolesSize(Heap* heap) {
7566 intptr_t holes_size = 0;
7567 OldSpaces spaces(heap);
7568 for (OldSpace* space = spaces.next();
7570 space = spaces.next()) {
7571 holes_size += space->Waste() + space->Available();
7577 GCTracer::GCTracer(Heap* heap,
7578 const char* gc_reason,
7579 const char* collector_reason)
7581 start_object_size_(0),
7582 start_memory_size_(0),
7585 allocated_since_last_gc_(0),
7586 spent_in_mutator_(0),
7587 promoted_objects_size_(0),
7588 nodes_died_in_new_space_(0),
7589 nodes_copied_in_new_space_(0),
7592 gc_reason_(gc_reason),
7593 collector_reason_(collector_reason) {
7594 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7595 start_time_ = OS::TimeCurrentMillis();
7596 start_object_size_ = heap_->SizeOfObjects();
7597 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7599 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7603 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7605 allocated_since_last_gc_ =
7606 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7608 if (heap_->last_gc_end_timestamp_ > 0) {
7609 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7612 steps_count_ = heap_->incremental_marking()->steps_count();
7613 steps_took_ = heap_->incremental_marking()->steps_took();
7614 longest_step_ = heap_->incremental_marking()->longest_step();
7615 steps_count_since_last_gc_ =
7616 heap_->incremental_marking()->steps_count_since_last_gc();
7617 steps_took_since_last_gc_ =
7618 heap_->incremental_marking()->steps_took_since_last_gc();
7622 GCTracer::~GCTracer() {
7623 // Printf ONE line iff flag is set.
7624 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7626 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7628 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7629 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7631 double time = heap_->last_gc_end_timestamp_ - start_time_;
7633 // Update cumulative GC statistics if required.
7634 if (FLAG_print_cumulative_gc_stat) {
7635 heap_->total_gc_time_ms_ += time;
7636 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7637 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7638 heap_->alive_after_last_gc_);
7640 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7643 } else if (FLAG_trace_gc_verbose) {
7644 heap_->total_gc_time_ms_ += time;
7647 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7649 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7651 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7652 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7654 if (!FLAG_trace_gc_nvp) {
7655 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7657 double end_memory_size_mb =
7658 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7660 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7662 static_cast<double>(start_object_size_) / MB,
7663 static_cast<double>(start_memory_size_) / MB,
7664 SizeOfHeapObjects(),
7665 end_memory_size_mb);
7667 if (external_time > 0) PrintF("%d / ", external_time);
7668 PrintF("%.1f ms", time);
7669 if (steps_count_ > 0) {
7670 if (collector_ == SCAVENGER) {
7671 PrintF(" (+ %.1f ms in %d steps since last GC)",
7672 steps_took_since_last_gc_,
7673 steps_count_since_last_gc_);
7675 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7676 "biggest step %.1f ms)",
7683 if (gc_reason_ != NULL) {
7684 PrintF(" [%s]", gc_reason_);
7687 if (collector_reason_ != NULL) {
7688 PrintF(" [%s]", collector_reason_);
7693 PrintF("pause=%.1f ", time);
7694 PrintF("mutator=%.1f ", spent_in_mutator_);
7696 switch (collector_) {
7700 case MARK_COMPACTOR:
7708 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7709 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7710 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7711 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7712 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7713 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7714 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7715 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7716 PrintF("compaction_ptrs=%.1f ",
7717 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7718 PrintF("intracompaction_ptrs=%.1f ",
7719 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7720 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7721 PrintF("weakcollection_process=%.1f ",
7722 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7723 PrintF("weakcollection_clear=%.1f ",
7724 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7726 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7727 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7728 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7729 in_free_list_or_wasted_before_gc_);
7730 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7732 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7733 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7734 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7735 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7736 PrintF("nodes_promoted=%d ", nodes_promoted_);
7738 if (collector_ == SCAVENGER) {
7739 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7740 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7742 PrintF("stepscount=%d ", steps_count_);
7743 PrintF("stepstook=%.1f ", steps_took_);
7744 PrintF("longeststep=%.1f ", longest_step_);
7750 heap_->PrintShortHeapStatistics();
7754 const char* GCTracer::CollectorString() {
7755 switch (collector_) {
7758 case MARK_COMPACTOR:
7759 return "Mark-sweep";
7761 return "Unknown GC";
7765 int KeyedLookupCache::Hash(Map* map, Name* name) {
7766 // Uses only lower 32 bits if pointers are larger.
7767 uintptr_t addr_hash =
7768 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7769 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7773 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7774 int index = (Hash(map, name) & kHashMask);
7775 for (int i = 0; i < kEntriesPerBucket; i++) {
7776 Key& key = keys_[index + i];
7777 if ((key.map == map) && key.name->Equals(name)) {
7778 return field_offsets_[index + i];
7785 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7786 if (!name->IsUniqueName()) {
7787 String* internalized_string;
7788 if (!HEAP->InternalizeStringIfExists(
7789 String::cast(name), &internalized_string)) {
7792 name = internalized_string;
7794 // This cache is cleared only between mark compact passes, so we expect the
7795 // cache to only contain old space names.
7796 ASSERT(!HEAP->InNewSpace(name));
7798 int index = (Hash(map, name) & kHashMask);
7799 // After a GC there will be free slots, so we use them in order (this may
7800 // help to get the most frequently used one in position 0).
7801 for (int i = 0; i< kEntriesPerBucket; i++) {
7802 Key& key = keys_[index];
7803 Object* free_entry_indicator = NULL;
7804 if (key.map == free_entry_indicator) {
7807 field_offsets_[index + i] = field_offset;
7811 // No free entry found in this bucket, so we move them all down one and
7812 // put the new entry at position zero.
7813 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7814 Key& key = keys_[index + i];
7815 Key& key2 = keys_[index + i - 1];
7817 field_offsets_[index + i] = field_offsets_[index + i - 1];
7820 // Write the new first entry.
7821 Key& key = keys_[index];
7824 field_offsets_[index] = field_offset;
7828 void KeyedLookupCache::Clear() {
7829 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7833 void DescriptorLookupCache::Clear() {
7834 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7839 void Heap::GarbageCollectionGreedyCheck() {
7840 ASSERT(FLAG_gc_greedy);
7841 if (isolate_->bootstrapper()->IsActive()) return;
7842 if (disallow_allocation_failure()) return;
7843 CollectGarbage(NEW_SPACE);
7848 TranscendentalCache::SubCache::SubCache(Type t)
7850 isolate_(Isolate::Current()) {
7851 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7852 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7853 for (int i = 0; i < kCacheSize; i++) {
7854 elements_[i].in[0] = in0;
7855 elements_[i].in[1] = in1;
7856 elements_[i].output = NULL;
7861 void TranscendentalCache::Clear() {
7862 for (int i = 0; i < kNumberOfCaches; i++) {
7863 if (caches_[i] != NULL) {
7871 void ExternalStringTable::CleanUp() {
7873 for (int i = 0; i < new_space_strings_.length(); ++i) {
7874 if (new_space_strings_[i] == heap_->the_hole_value()) {
7877 if (heap_->InNewSpace(new_space_strings_[i])) {
7878 new_space_strings_[last++] = new_space_strings_[i];
7880 old_space_strings_.Add(new_space_strings_[i]);
7883 new_space_strings_.Rewind(last);
7884 new_space_strings_.Trim();
7887 for (int i = 0; i < old_space_strings_.length(); ++i) {
7888 if (old_space_strings_[i] == heap_->the_hole_value()) {
7891 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7892 old_space_strings_[last++] = old_space_strings_[i];
7894 old_space_strings_.Rewind(last);
7895 old_space_strings_.Trim();
7897 if (FLAG_verify_heap) {
7904 void ExternalStringTable::TearDown() {
7905 new_space_strings_.Free();
7906 old_space_strings_.Free();
7910 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7911 chunk->set_next_chunk(chunks_queued_for_free_);
7912 chunks_queued_for_free_ = chunk;
7916 void Heap::FreeQueuedChunks() {
7917 if (chunks_queued_for_free_ == NULL) return;
7920 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7921 next = chunk->next_chunk();
7922 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7924 if (chunk->owner()->identity() == LO_SPACE) {
7925 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7926 // If FromAnyPointerAddress encounters a slot that belongs to a large
7927 // chunk queued for deletion it will fail to find the chunk because
7928 // it try to perform a search in the list of pages owned by of the large
7929 // object space and queued chunks were detached from that list.
7930 // To work around this we split large chunk into normal kPageSize aligned
7931 // pieces and initialize size, owner and flags field of every piece.
7932 // If FromAnyPointerAddress encounters a slot that belongs to one of
7933 // these smaller pieces it will treat it as a slot on a normal Page.
7934 Address chunk_end = chunk->address() + chunk->size();
7935 MemoryChunk* inner = MemoryChunk::FromAddress(
7936 chunk->address() + Page::kPageSize);
7937 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7938 while (inner <= inner_last) {
7939 // Size of a large chunk is always a multiple of
7940 // OS::AllocateAlignment() so there is always
7941 // enough space for a fake MemoryChunk header.
7942 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7943 // Guard against overflow.
7944 if (area_end < inner->address()) area_end = chunk_end;
7945 inner->SetArea(inner->address(), area_end);
7946 inner->set_size(Page::kPageSize);
7947 inner->set_owner(lo_space());
7948 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7949 inner = MemoryChunk::FromAddress(
7950 inner->address() + Page::kPageSize);
7954 isolate_->heap()->store_buffer()->Compact();
7955 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7956 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7957 next = chunk->next_chunk();
7958 isolate_->memory_allocator()->Free(chunk);
7960 chunks_queued_for_free_ = NULL;
7964 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7965 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7966 // Tag the page pointer to make it findable in the dump file.
7968 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7970 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7972 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7973 reinterpret_cast<Address>(p);
7974 remembered_unmapped_pages_index_++;
7975 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7979 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7980 memset(object_counts_, 0, sizeof(object_counts_));
7981 memset(object_sizes_, 0, sizeof(object_sizes_));
7982 if (clear_last_time_stats) {
7983 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7984 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7989 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7992 void Heap::CheckpointObjectStats() {
7993 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7994 Counters* counters = isolate()->counters();
7995 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7996 counters->count_of_##name()->Increment( \
7997 static_cast<int>(object_counts_[name])); \
7998 counters->count_of_##name()->Decrement( \
7999 static_cast<int>(object_counts_last_time_[name])); \
8000 counters->size_of_##name()->Increment( \
8001 static_cast<int>(object_sizes_[name])); \
8002 counters->size_of_##name()->Decrement( \
8003 static_cast<int>(object_sizes_last_time_[name]));
8004 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8005 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8007 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8008 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
8009 counters->count_of_CODE_TYPE_##name()->Increment( \
8010 static_cast<int>(object_counts_[index])); \
8011 counters->count_of_CODE_TYPE_##name()->Decrement( \
8012 static_cast<int>(object_counts_last_time_[index])); \
8013 counters->size_of_CODE_TYPE_##name()->Increment( \
8014 static_cast<int>(object_sizes_[index])); \
8015 counters->size_of_CODE_TYPE_##name()->Decrement( \
8016 static_cast<int>(object_sizes_last_time_[index]));
8017 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8018 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8019 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8020 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
8021 counters->count_of_FIXED_ARRAY_##name()->Increment( \
8022 static_cast<int>(object_counts_[index])); \
8023 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
8024 static_cast<int>(object_counts_last_time_[index])); \
8025 counters->size_of_FIXED_ARRAY_##name()->Increment( \
8026 static_cast<int>(object_sizes_[index])); \
8027 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
8028 static_cast<int>(object_sizes_last_time_[index]));
8029 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8030 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8032 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8033 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8038 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8039 if (FLAG_parallel_recompilation) {
8040 heap_->relocation_mutex_->Lock();
8042 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8043 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8048 } } // namespace v8::internal