1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
109 property_cell_space_(NULL),
111 gc_state_(NOT_IN_GC),
112 gc_post_processing_depth_(0),
115 remembered_unmapped_pages_index_(0),
116 unflattened_strings_length_(0),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
121 new_space_high_promotion_mode_active_(false),
122 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false),
128 store_buffer_rebuilder_(store_buffer()),
129 hidden_string_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
133 total_regexp_code_generated_(0),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 low_survival_rate_period_length_(0),
139 previous_survival_rate_trend_(Heap::STABLE),
140 survival_rate_trend_(Heap::STABLE),
142 total_gc_time_ms_(0.0),
143 max_alive_after_gc_(0),
144 min_in_mutator_(kMaxInt),
145 alive_after_last_gc_(0),
146 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 gc_count_at_last_idle_gc_(0),
157 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158 gcs_since_last_deopt_(0),
160 no_weak_embedded_maps_verification_scope_depth_(0),
162 promotion_queue_(this),
164 chunks_queued_for_free_(NULL),
165 relocation_mutex_(NULL) {
166 // Allow build-time customization of the max semispace size. Building
167 // V8 with snapshots and a non-default max semispace size is much
168 // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 intptr_t max_virtual = OS::MaxVirtualMemory();
175 if (max_virtual > 0) {
176 if (code_range_size_ > 0) {
177 // Reserve no more than 1/8 of the memory for the code range.
178 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183 native_contexts_list_ = NULL;
184 array_buffers_list_ = Smi::FromInt(0);
185 allocation_sites_list_ = Smi::FromInt(0);
186 mark_compact_collector_.heap_ = this;
187 external_string_table_.heap_ = this;
188 // Put a dummy entry in the remembered pages so we can find the list the
189 // minidump even if there are no real unmapped pages.
190 RememberUnmappedPage(NULL, false);
192 ClearObjectStats(true);
196 intptr_t Heap::Capacity() {
197 if (!HasBeenSetUp()) return 0;
199 return new_space_.Capacity() +
200 old_pointer_space_->Capacity() +
201 old_data_space_->Capacity() +
202 code_space_->Capacity() +
203 map_space_->Capacity() +
204 cell_space_->Capacity() +
205 property_cell_space_->Capacity();
209 intptr_t Heap::CommittedMemory() {
210 if (!HasBeenSetUp()) return 0;
212 return new_space_.CommittedMemory() +
213 old_pointer_space_->CommittedMemory() +
214 old_data_space_->CommittedMemory() +
215 code_space_->CommittedMemory() +
216 map_space_->CommittedMemory() +
217 cell_space_->CommittedMemory() +
218 property_cell_space_->CommittedMemory() +
223 size_t Heap::CommittedPhysicalMemory() {
224 if (!HasBeenSetUp()) return 0;
226 return new_space_.CommittedPhysicalMemory() +
227 old_pointer_space_->CommittedPhysicalMemory() +
228 old_data_space_->CommittedPhysicalMemory() +
229 code_space_->CommittedPhysicalMemory() +
230 map_space_->CommittedPhysicalMemory() +
231 cell_space_->CommittedPhysicalMemory() +
232 property_cell_space_->CommittedPhysicalMemory() +
233 lo_space_->CommittedPhysicalMemory();
237 intptr_t Heap::CommittedMemoryExecutable() {
238 if (!HasBeenSetUp()) return 0;
240 return isolate()->memory_allocator()->SizeExecutable();
244 intptr_t Heap::Available() {
245 if (!HasBeenSetUp()) return 0;
247 return new_space_.Available() +
248 old_pointer_space_->Available() +
249 old_data_space_->Available() +
250 code_space_->Available() +
251 map_space_->Available() +
252 cell_space_->Available() +
253 property_cell_space_->Available();
257 bool Heap::HasBeenSetUp() {
258 return old_pointer_space_ != NULL &&
259 old_data_space_ != NULL &&
260 code_space_ != NULL &&
261 map_space_ != NULL &&
262 cell_space_ != NULL &&
263 property_cell_space_ != NULL &&
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269 if (IntrusiveMarking::IsMarked(object)) {
270 return IntrusiveMarking::SizeOfMarkedObject(object);
272 return object->SizeFromMap(object->map());
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277 const char** reason) {
278 // Is global GC requested?
279 if (space != NEW_SPACE) {
280 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281 *reason = "GC in old space requested";
282 return MARK_COMPACTOR;
285 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286 *reason = "GC in old space forced by flags";
287 return MARK_COMPACTOR;
290 // Is enough data promoted to justify a global GC?
291 if (OldGenerationAllocationLimitReached()) {
292 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293 *reason = "promotion limit reached";
294 return MARK_COMPACTOR;
297 // Have allocation in OLD and LO failed?
298 if (old_gen_exhausted_) {
299 isolate_->counters()->
300 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301 *reason = "old generations exhausted";
302 return MARK_COMPACTOR;
305 // Is there enough space left in OLD to guarantee that a scavenge can
308 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309 // for object promotion. It counts only the bytes that the memory
310 // allocator has not yet allocated from the OS and assigned to any space,
311 // and does not count available bytes already in the old space or code
312 // space. Undercounting is safe---we may get an unrequested full GC when
313 // a scavenge would have succeeded.
314 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315 isolate_->counters()->
316 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317 *reason = "scavenge might not succeed";
318 return MARK_COMPACTOR;
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330 // Heap::ReportHeapStatistics will also log NewSpace statistics when
331 // compiled --log-gc is set. The following logic is used to avoid
334 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335 if (FLAG_heap_stats) {
336 ReportHeapStatistics("Before GC");
337 } else if (FLAG_log_gc) {
338 new_space_.ReportStatistics();
340 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
343 new_space_.CollectStatistics();
344 new_space_.ReportStatistics();
345 new_space_.ClearHistograms();
351 void Heap::PrintShortHeapStatistics() {
352 if (!FLAG_trace_gc_verbose) return;
353 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
354 ", available: %6" V8_PTR_PREFIX "d KB\n",
355 isolate_->memory_allocator()->Size() / KB,
356 isolate_->memory_allocator()->Available() / KB);
357 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB"
359 ", committed: %6" V8_PTR_PREFIX "d KB\n",
360 new_space_.Size() / KB,
361 new_space_.Available() / KB,
362 new_space_.CommittedMemory() / KB);
363 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
364 ", available: %6" V8_PTR_PREFIX "d KB"
365 ", committed: %6" V8_PTR_PREFIX "d KB\n",
366 old_pointer_space_->SizeOfObjects() / KB,
367 old_pointer_space_->Available() / KB,
368 old_pointer_space_->CommittedMemory() / KB);
369 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
370 ", available: %6" V8_PTR_PREFIX "d KB"
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 old_data_space_->SizeOfObjects() / KB,
373 old_data_space_->Available() / KB,
374 old_data_space_->CommittedMemory() / KB);
375 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
376 ", available: %6" V8_PTR_PREFIX "d KB"
377 ", committed: %6" V8_PTR_PREFIX "d KB\n",
378 code_space_->SizeOfObjects() / KB,
379 code_space_->Available() / KB,
380 code_space_->CommittedMemory() / KB);
381 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
382 ", available: %6" V8_PTR_PREFIX "d KB"
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 map_space_->SizeOfObjects() / KB,
385 map_space_->Available() / KB,
386 map_space_->CommittedMemory() / KB);
387 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
388 ", available: %6" V8_PTR_PREFIX "d KB"
389 ", committed: %6" V8_PTR_PREFIX "d KB\n",
390 cell_space_->SizeOfObjects() / KB,
391 cell_space_->Available() / KB,
392 cell_space_->CommittedMemory() / KB);
393 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394 ", available: %6" V8_PTR_PREFIX "d KB"
395 ", committed: %6" V8_PTR_PREFIX "d KB\n",
396 property_cell_space_->SizeOfObjects() / KB,
397 property_cell_space_->Available() / KB,
398 property_cell_space_->CommittedMemory() / KB);
399 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400 ", available: %6" V8_PTR_PREFIX "d KB"
401 ", committed: %6" V8_PTR_PREFIX "d KB\n",
402 lo_space_->SizeOfObjects() / KB,
403 lo_space_->Available() / KB,
404 lo_space_->CommittedMemory() / KB);
405 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
406 ", available: %6" V8_PTR_PREFIX "d KB"
407 ", committed: %6" V8_PTR_PREFIX "d KB\n",
408 this->SizeOfObjects() / KB,
409 this->Available() / KB,
410 this->CommittedMemory() / KB);
411 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412 amount_of_external_allocated_memory_ / KB);
413 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420 // Similar to the before GC, we use some complicated logic to ensure that
421 // NewSpace statistics are logged exactly once when --log-gc is turned on.
423 if (FLAG_heap_stats) {
424 new_space_.CollectStatistics();
425 ReportHeapStatistics("After GC");
426 } else if (FLAG_log_gc) {
427 new_space_.ReportStatistics();
430 if (FLAG_log_gc) new_space_.ReportStatistics();
435 void Heap::GarbageCollectionPrologue() {
436 { AllowHeapAllocation for_the_first_part_of_prologue;
437 isolate_->transcendental_cache()->Clear();
438 ClearJSFunctionResultCaches();
440 unflattened_strings_length_ = 0;
442 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443 mark_compact_collector()->EnableCodeFlushing(true);
447 if (FLAG_verify_heap) {
454 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
456 if (FLAG_gc_verbose) Print();
458 ReportStatisticsBeforeGC();
461 store_buffer()->GCPrologue();
465 intptr_t Heap::SizeOfObjects() {
467 AllSpaces spaces(this);
468 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469 total += space->SizeOfObjects();
475 void Heap::RepairFreeListsAfterBoot() {
476 PagedSpaces spaces(this);
477 for (PagedSpace* space = spaces.next();
479 space = spaces.next()) {
480 space->RepairFreeListsAfterBoot();
485 void Heap::GarbageCollectionEpilogue() {
486 store_buffer()->GCEpilogue();
488 // In release mode, we only zap the from space under heap verification.
489 if (Heap::ShouldZapGarbage()) {
494 if (FLAG_verify_heap) {
499 AllowHeapAllocation for_the_rest_of_the_epilogue;
502 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503 if (FLAG_print_handles) PrintHandles();
504 if (FLAG_gc_verbose) Print();
505 if (FLAG_code_stats) ReportCodeStatistics("After GC");
507 if (FLAG_deopt_every_n_garbage_collections > 0) {
508 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509 Deoptimizer::DeoptimizeAll(isolate());
510 gcs_since_last_deopt_ = 0;
514 isolate_->counters()->alive_after_last_gc()->Set(
515 static_cast<int>(SizeOfObjects()));
517 isolate_->counters()->string_table_capacity()->Set(
518 string_table()->Capacity());
519 isolate_->counters()->number_of_symbols()->Set(
520 string_table()->NumberOfElements());
522 if (CommittedMemory() > 0) {
523 isolate_->counters()->external_fragmentation_total()->AddSample(
524 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
526 isolate_->counters()->heap_fraction_map_space()->AddSample(
528 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529 isolate_->counters()->heap_fraction_cell_space()->AddSample(
531 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532 isolate_->counters()->heap_fraction_property_cell_space()->
533 AddSample(static_cast<int>(
534 (property_cell_space()->CommittedMemory() * 100.0) /
537 isolate_->counters()->heap_sample_total_committed()->AddSample(
538 static_cast<int>(CommittedMemory() / KB));
539 isolate_->counters()->heap_sample_total_used()->AddSample(
540 static_cast<int>(SizeOfObjects() / KB));
541 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542 static_cast<int>(map_space()->CommittedMemory() / KB));
543 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544 static_cast<int>(cell_space()->CommittedMemory() / KB));
545 isolate_->counters()->
546 heap_sample_property_cell_space_committed()->
547 AddSample(static_cast<int>(
548 property_cell_space()->CommittedMemory() / KB));
551 #define UPDATE_COUNTERS_FOR_SPACE(space) \
552 isolate_->counters()->space##_bytes_available()->Set( \
553 static_cast<int>(space()->Available())); \
554 isolate_->counters()->space##_bytes_committed()->Set( \
555 static_cast<int>(space()->CommittedMemory())); \
556 isolate_->counters()->space##_bytes_used()->Set( \
557 static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
559 if (space()->CommittedMemory() > 0) { \
560 isolate_->counters()->external_fragmentation_##space()->AddSample( \
561 static_cast<int>(100 - \
562 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
565 UPDATE_COUNTERS_FOR_SPACE(space) \
566 UPDATE_FRAGMENTATION_FOR_SPACE(space)
568 UPDATE_COUNTERS_FOR_SPACE(new_space)
569 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
581 ReportStatisticsAfterGC();
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584 isolate_->debug()->AfterGarbageCollection();
585 #endif // ENABLE_DEBUGGER_SUPPORT
589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590 // Since we are ignoring the return value, the exact choice of space does
591 // not matter, so long as we do not specify NEW_SPACE, which would not
593 mark_compact_collector_.SetFlags(flags);
594 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595 mark_compact_collector_.SetFlags(kNoGCFlags);
599 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600 // Since we are ignoring the return value, the exact choice of space does
601 // not matter, so long as we do not specify NEW_SPACE, which would not
603 // Major GC would invoke weak handle callbacks on weakly reachable
604 // handles, but won't collect weakly reachable objects until next
605 // major GC. Therefore if we collect aggressively and weak handle callback
606 // has been invoked, we rerun major GC to release objects which become
608 // Note: as weak callbacks can execute arbitrary code, we cannot
609 // hope that eventually there will be no weak callbacks invocations.
610 // Therefore stop recollecting after several attempts.
611 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612 kReduceMemoryFootprintMask);
613 isolate_->compilation_cache()->Clear();
614 const int kMaxNumberOfAttempts = 7;
615 const int kMinNumberOfAttempts = 2;
616 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618 attempt + 1 >= kMinNumberOfAttempts) {
622 mark_compact_collector()->SetFlags(kNoGCFlags);
625 incremental_marking()->UncommitMarkingDeque();
629 bool Heap::CollectGarbage(AllocationSpace space,
630 GarbageCollector collector,
631 const char* gc_reason,
632 const char* collector_reason) {
633 // The VM is in the GC state until exiting this function.
634 VMState<GC> state(isolate_);
637 // Reset the allocation timeout to the GC interval, but make sure to
638 // allow at least a few allocations after a collection. The reason
639 // for this is that we have a lot of allocation sequences and we
640 // assume that a garbage collection will allow the subsequent
641 // allocation attempts to go through.
642 allocation_timeout_ = Max(6, FLAG_gc_interval);
645 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646 if (FLAG_trace_incremental_marking) {
647 PrintF("[IncrementalMarking] Scavenge during marking.\n");
651 if (collector == MARK_COMPACTOR &&
652 !mark_compact_collector()->abort_incremental_marking() &&
653 !incremental_marking()->IsStopped() &&
654 !incremental_marking()->should_hurry() &&
655 FLAG_incremental_marking_steps) {
656 // Make progress in incremental marking.
657 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660 if (!incremental_marking()->IsComplete()) {
661 if (FLAG_trace_incremental_marking) {
662 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
664 collector = SCAVENGER;
665 collector_reason = "incremental marking delaying mark-sweep";
669 bool next_gc_likely_to_collect_more = false;
671 { GCTracer tracer(this, gc_reason, collector_reason);
672 ASSERT(AllowHeapAllocation::IsAllowed());
673 DisallowHeapAllocation no_allocation_during_gc;
674 GarbageCollectionPrologue();
675 // The GC count was incremented in the prologue. Tell the tracer about
677 tracer.set_gc_count(gc_count_);
679 // Tell the tracer which collector we've selected.
680 tracer.set_collector(collector);
683 HistogramTimerScope histogram_timer_scope(
684 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685 : isolate_->counters()->gc_compactor());
686 next_gc_likely_to_collect_more =
687 PerformGarbageCollection(collector, &tracer);
690 GarbageCollectionEpilogue();
693 // Start incremental marking for the next cycle. The heap snapshot
694 // generator needs incremental marking to stay off after it aborted.
695 if (!mark_compact_collector()->abort_incremental_marking() &&
696 incremental_marking()->IsStopped() &&
697 incremental_marking()->WorthActivating() &&
698 NextGCIsLikelyToBeFull()) {
699 incremental_marking()->Start();
702 return next_gc_likely_to_collect_more;
706 void Heap::PerformScavenge() {
707 GCTracer tracer(this, NULL, NULL);
708 if (incremental_marking()->IsStopped()) {
709 PerformGarbageCollection(SCAVENGER, &tracer);
711 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
716 void Heap::MoveElements(FixedArray* array,
720 if (len == 0) return;
722 ASSERT(array->map() != HEAP->fixed_cow_array_map());
723 Object** dst_objects = array->data_start() + dst_index;
724 OS::MemMove(dst_objects,
725 array->data_start() + src_index,
727 if (!InNewSpace(array)) {
728 for (int i = 0; i < len; i++) {
729 // TODO(hpayer): check store buffer for entries
730 if (InNewSpace(dst_objects[i])) {
731 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
735 incremental_marking()->RecordWrites(array);
740 // Helper class for verifying the string table.
741 class StringTableVerifier : public ObjectVisitor {
743 void VisitPointers(Object** start, Object** end) {
744 // Visit all HeapObject pointers in [start, end).
745 for (Object** p = start; p < end; p++) {
746 if ((*p)->IsHeapObject()) {
747 // Check that the string is actually internalized.
748 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
749 (*p)->IsInternalizedString());
756 static void VerifyStringTable() {
757 StringTableVerifier verifier;
758 HEAP->string_table()->IterateElements(&verifier);
760 #endif // VERIFY_HEAP
763 static bool AbortIncrementalMarkingAndCollectGarbage(
765 AllocationSpace space,
766 const char* gc_reason = NULL) {
767 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
768 bool result = heap->CollectGarbage(space, gc_reason);
769 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
774 void Heap::ReserveSpace(
776 Address *locations_out) {
777 bool gc_performed = true;
779 static const int kThreshold = 20;
780 while (gc_performed && counter++ < kThreshold) {
781 gc_performed = false;
782 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
783 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
784 if (sizes[space] != 0) {
785 MaybeObject* allocation;
786 if (space == NEW_SPACE) {
787 allocation = new_space()->AllocateRaw(sizes[space]);
789 allocation = paged_space(space)->AllocateRaw(sizes[space]);
792 if (!allocation->To<FreeListNode>(&node)) {
793 if (space == NEW_SPACE) {
794 Heap::CollectGarbage(NEW_SPACE,
795 "failed to reserve space in the new space");
797 AbortIncrementalMarkingAndCollectGarbage(
799 static_cast<AllocationSpace>(space),
800 "failed to reserve space in paged space");
805 // Mark with a free list node, in case we have a GC before
807 node->set_size(this, sizes[space]);
808 locations_out[space] = node->address();
815 // Failed to reserve the space after several attempts.
816 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
821 void Heap::EnsureFromSpaceIsCommitted() {
822 if (new_space_.CommitFromSpaceIfNeeded()) return;
824 // Committing memory to from space failed.
825 // Memory is exhausted and we will die.
826 V8::FatalProcessOutOfMemory("Committing semi space failed.");
830 void Heap::ClearJSFunctionResultCaches() {
831 if (isolate_->bootstrapper()->IsActive()) return;
833 Object* context = native_contexts_list_;
834 while (!context->IsUndefined()) {
835 // Get the caches for this context. GC can happen when the context
836 // is not fully initialized, so the caches can be undefined.
837 Object* caches_or_undefined =
838 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
839 if (!caches_or_undefined->IsUndefined()) {
840 FixedArray* caches = FixedArray::cast(caches_or_undefined);
842 int length = caches->length();
843 for (int i = 0; i < length; i++) {
844 JSFunctionResultCache::cast(caches->get(i))->Clear();
847 // Get the next context:
848 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
853 void Heap::ClearNormalizedMapCaches() {
854 if (isolate_->bootstrapper()->IsActive() &&
855 !incremental_marking()->IsMarking()) {
859 Object* context = native_contexts_list_;
860 while (!context->IsUndefined()) {
861 // GC can happen when the context is not fully initialized,
862 // so the cache can be undefined.
864 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
865 if (!cache->IsUndefined()) {
866 NormalizedMapCache::cast(cache)->Clear();
868 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
873 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
874 double survival_rate =
875 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
876 start_new_space_size;
878 if (survival_rate > kYoungSurvivalRateHighThreshold) {
879 high_survival_rate_period_length_++;
881 high_survival_rate_period_length_ = 0;
884 if (survival_rate < kYoungSurvivalRateLowThreshold) {
885 low_survival_rate_period_length_++;
887 low_survival_rate_period_length_ = 0;
890 double survival_rate_diff = survival_rate_ - survival_rate;
892 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
893 set_survival_rate_trend(DECREASING);
894 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
895 set_survival_rate_trend(INCREASING);
897 set_survival_rate_trend(STABLE);
900 survival_rate_ = survival_rate;
903 bool Heap::PerformGarbageCollection(GarbageCollector collector,
905 bool next_gc_likely_to_collect_more = false;
907 if (collector != SCAVENGER) {
908 PROFILE(isolate_, CodeMovingGCEvent());
912 if (FLAG_verify_heap) {
918 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
921 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
922 VMState<EXTERNAL> state(isolate_);
923 HandleScope handle_scope(isolate_);
924 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
927 EnsureFromSpaceIsCommitted();
929 int start_new_space_size = Heap::new_space()->SizeAsInt();
931 if (IsHighSurvivalRate()) {
932 // We speed up the incremental marker if it is running so that it
933 // does not fall behind the rate of promotion, which would cause a
934 // constantly growing old space.
935 incremental_marking()->NotifyOfHighPromotionRate();
938 if (collector == MARK_COMPACTOR) {
939 // Perform mark-sweep with optional compaction.
943 UpdateSurvivalRateTrend(start_new_space_size);
945 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
947 old_generation_allocation_limit_ =
948 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
950 old_gen_exhausted_ = false;
956 UpdateSurvivalRateTrend(start_new_space_size);
959 if (!new_space_high_promotion_mode_active_ &&
960 new_space_.Capacity() == new_space_.MaximumCapacity() &&
961 IsStableOrIncreasingSurvivalTrend() &&
962 IsHighSurvivalRate()) {
963 // Stable high survival rates even though young generation is at
964 // maximum capacity indicates that most objects will be promoted.
965 // To decrease scavenger pauses and final mark-sweep pauses, we
966 // have to limit maximal capacity of the young generation.
967 SetNewSpaceHighPromotionModeActive(true);
969 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
970 new_space_.InitialCapacity() / MB);
972 // Support for global pre-tenuring uses the high promotion mode as a
973 // heuristic indicator of whether to pretenure or not, we trigger
974 // deoptimization here to take advantage of pre-tenuring as soon as
976 if (FLAG_pretenuring) {
977 isolate_->stack_guard()->FullDeopt();
979 } else if (new_space_high_promotion_mode_active_ &&
980 IsStableOrDecreasingSurvivalTrend() &&
981 IsLowSurvivalRate()) {
982 // Decreasing low survival rates might indicate that the above high
983 // promotion mode is over and we should allow the young generation
985 SetNewSpaceHighPromotionModeActive(false);
987 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
988 new_space_.MaximumCapacity() / MB);
990 // Trigger deoptimization here to turn off pre-tenuring as soon as
992 if (FLAG_pretenuring) {
993 isolate_->stack_guard()->FullDeopt();
997 if (new_space_high_promotion_mode_active_ &&
998 new_space_.Capacity() > new_space_.InitialCapacity()) {
1002 isolate_->counters()->objs_since_last_young()->Set(0);
1004 // Callbacks that fire after this point might trigger nested GCs and
1005 // restart incremental marking, the assertion can't be moved down.
1006 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1008 gc_post_processing_depth_++;
1009 { AllowHeapAllocation allow_allocation;
1010 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1011 next_gc_likely_to_collect_more =
1012 isolate_->global_handles()->PostGarbageCollectionProcessing(
1015 gc_post_processing_depth_--;
1017 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1019 // Update relocatables.
1020 Relocatable::PostGarbageCollectionProcessing();
1022 if (collector == MARK_COMPACTOR) {
1023 // Register the amount of external allocated memory.
1024 amount_of_external_allocated_memory_at_last_global_gc_ =
1025 amount_of_external_allocated_memory_;
1029 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1030 VMState<EXTERNAL> state(isolate_);
1031 HandleScope handle_scope(isolate_);
1032 CallGCEpilogueCallbacks(gc_type);
1036 if (FLAG_verify_heap) {
1037 VerifyStringTable();
1041 return next_gc_likely_to_collect_more;
1045 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1046 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1047 global_gc_prologue_callback_();
1049 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1050 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1051 gc_prologue_callbacks_[i].callback(gc_type, flags);
1057 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1058 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1059 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1060 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1063 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1064 global_gc_epilogue_callback_();
1069 void Heap::MarkCompact(GCTracer* tracer) {
1070 gc_state_ = MARK_COMPACT;
1071 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1073 mark_compact_collector_.Prepare(tracer);
1076 tracer->set_full_gc_count(ms_count_);
1078 MarkCompactPrologue();
1080 mark_compact_collector_.CollectGarbage();
1082 LOG(isolate_, ResourceEvent("markcompact", "end"));
1084 gc_state_ = NOT_IN_GC;
1086 isolate_->counters()->objs_since_last_full()->Set(0);
1088 contexts_disposed_ = 0;
1090 flush_monomorphic_ics_ = false;
1094 void Heap::MarkCompactPrologue() {
1095 // At any old GC clear the keyed lookup cache to enable collection of unused
1097 isolate_->keyed_lookup_cache()->Clear();
1098 isolate_->context_slot_cache()->Clear();
1099 isolate_->descriptor_lookup_cache()->Clear();
1100 RegExpResultsCache::Clear(string_split_cache());
1101 RegExpResultsCache::Clear(regexp_multiple_cache());
1103 isolate_->compilation_cache()->MarkCompactPrologue();
1105 CompletelyClearInstanceofCache();
1107 FlushNumberStringCache();
1108 if (FLAG_cleanup_code_caches_at_gc) {
1109 polymorphic_code_cache()->set_cache(undefined_value());
1112 ClearNormalizedMapCaches();
1116 // Helper class for copying HeapObjects
1117 class ScavengeVisitor: public ObjectVisitor {
1119 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1121 void VisitPointer(Object** p) { ScavengePointer(p); }
1123 void VisitPointers(Object** start, Object** end) {
1124 // Copy all HeapObject pointers in [start, end)
1125 for (Object** p = start; p < end; p++) ScavengePointer(p);
1129 void ScavengePointer(Object** p) {
1130 Object* object = *p;
1131 if (!heap_->InNewSpace(object)) return;
1132 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1133 reinterpret_cast<HeapObject*>(object));
1141 // Visitor class to verify pointers in code or data space do not point into
1143 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1145 void VisitPointers(Object** start, Object**end) {
1146 for (Object** current = start; current < end; current++) {
1147 if ((*current)->IsHeapObject()) {
1148 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1155 static void VerifyNonPointerSpacePointers() {
1156 // Verify that there are no pointers to new space in spaces where we
1157 // do not expect them.
1158 VerifyNonPointerSpacePointersVisitor v;
1159 HeapObjectIterator code_it(HEAP->code_space());
1160 for (HeapObject* object = code_it.Next();
1161 object != NULL; object = code_it.Next())
1162 object->Iterate(&v);
1164 // The old data space was normally swept conservatively so that the iterator
1165 // doesn't work, so we normally skip the next bit.
1166 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1167 HeapObjectIterator data_it(HEAP->old_data_space());
1168 for (HeapObject* object = data_it.Next();
1169 object != NULL; object = data_it.Next())
1170 object->Iterate(&v);
1173 #endif // VERIFY_HEAP
1176 void Heap::CheckNewSpaceExpansionCriteria() {
1177 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1178 survived_since_last_expansion_ > new_space_.Capacity() &&
1179 !new_space_high_promotion_mode_active_) {
1180 // Grow the size of new space if there is room to grow, enough data
1181 // has survived scavenge since the last expansion and we are not in
1182 // high promotion mode.
1184 survived_since_last_expansion_ = 0;
1189 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1190 return heap->InNewSpace(*p) &&
1191 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1195 void Heap::ScavengeStoreBufferCallback(
1198 StoreBufferEvent event) {
1199 heap->store_buffer_rebuilder_.Callback(page, event);
1203 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1204 if (event == kStoreBufferStartScanningPagesEvent) {
1205 start_of_current_page_ = NULL;
1206 current_page_ = NULL;
1207 } else if (event == kStoreBufferScanningPageEvent) {
1208 if (current_page_ != NULL) {
1209 // If this page already overflowed the store buffer during this iteration.
1210 if (current_page_->scan_on_scavenge()) {
1211 // Then we should wipe out the entries that have been added for it.
1212 store_buffer_->SetTop(start_of_current_page_);
1213 } else if (store_buffer_->Top() - start_of_current_page_ >=
1214 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1215 // Did we find too many pointers in the previous page? The heuristic is
1216 // that no page can take more then 1/5 the remaining slots in the store
1218 current_page_->set_scan_on_scavenge(true);
1219 store_buffer_->SetTop(start_of_current_page_);
1221 // In this case the page we scanned took a reasonable number of slots in
1222 // the store buffer. It has now been rehabilitated and is no longer
1223 // marked scan_on_scavenge.
1224 ASSERT(!current_page_->scan_on_scavenge());
1227 start_of_current_page_ = store_buffer_->Top();
1228 current_page_ = page;
1229 } else if (event == kStoreBufferFullEvent) {
1230 // The current page overflowed the store buffer again. Wipe out its entries
1231 // in the store buffer and mark it scan-on-scavenge again. This may happen
1232 // several times while scanning.
1233 if (current_page_ == NULL) {
1234 // Store Buffer overflowed while scanning promoted objects. These are not
1235 // in any particular page, though they are likely to be clustered by the
1236 // allocation routines.
1237 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1239 // Store Buffer overflowed while scanning a particular old space page for
1240 // pointers to new space.
1241 ASSERT(current_page_ == page);
1242 ASSERT(page != NULL);
1243 current_page_->set_scan_on_scavenge(true);
1244 ASSERT(start_of_current_page_ != store_buffer_->Top());
1245 store_buffer_->SetTop(start_of_current_page_);
1253 void PromotionQueue::Initialize() {
1254 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1255 // entries (where each is a pair of intptr_t). This allows us to simplify
1256 // the test fpr when to switch pages.
1257 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1259 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1261 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1262 emergency_stack_ = NULL;
1267 void PromotionQueue::RelocateQueueHead() {
1268 ASSERT(emergency_stack_ == NULL);
1270 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1271 intptr_t* head_start = rear_;
1272 intptr_t* head_end =
1273 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1276 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1278 emergency_stack_ = new List<Entry>(2 * entries_count);
1280 while (head_start != head_end) {
1281 int size = static_cast<int>(*(head_start++));
1282 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1283 emergency_stack_->Add(Entry(obj, size));
1289 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1291 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1293 virtual Object* RetainAs(Object* object) {
1294 if (!heap_->InFromSpace(object)) {
1298 MapWord map_word = HeapObject::cast(object)->map_word();
1299 if (map_word.IsForwardingAddress()) {
1300 return map_word.ToForwardingAddress();
1310 void Heap::Scavenge() {
1311 RelocationLock relocation_lock(this);
1314 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1317 gc_state_ = SCAVENGE;
1319 // Implements Cheney's copying algorithm
1320 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1322 // Clear descriptor cache.
1323 isolate_->descriptor_lookup_cache()->Clear();
1325 // Used for updating survived_since_last_expansion_ at function end.
1326 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1328 CheckNewSpaceExpansionCriteria();
1330 SelectScavengingVisitorsTable();
1332 incremental_marking()->PrepareForScavenge();
1334 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1335 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1337 // Flip the semispaces. After flipping, to space is empty, from space has
1340 new_space_.ResetAllocationInfo();
1342 // We need to sweep newly copied objects which can be either in the
1343 // to space or promoted to the old generation. For to-space
1344 // objects, we treat the bottom of the to space as a queue. Newly
1345 // copied and unswept objects lie between a 'front' mark and the
1346 // allocation pointer.
1348 // Promoted objects can go into various old-generation spaces, and
1349 // can be allocated internally in the spaces (from the free list).
1350 // We treat the top of the to space as a queue of addresses of
1351 // promoted objects. The addresses of newly promoted and unswept
1352 // objects lie between a 'front' mark and a 'rear' mark that is
1353 // updated as a side effect of promoting an object.
1355 // There is guaranteed to be enough room at the top of the to space
1356 // for the addresses of promoted objects: every object promoted
1357 // frees up its size in bytes from the top of the new space, and
1358 // objects are at least one pointer in size.
1359 Address new_space_front = new_space_.ToSpaceStart();
1360 promotion_queue_.Initialize();
1363 store_buffer()->Clean();
1366 ScavengeVisitor scavenge_visitor(this);
1368 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1370 // Copy objects reachable from the old generation.
1372 StoreBufferRebuildScope scope(this,
1374 &ScavengeStoreBufferCallback);
1375 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1378 // Copy objects reachable from simple cells by scavenging cell values
1380 HeapObjectIterator cell_iterator(cell_space_);
1381 for (HeapObject* heap_object = cell_iterator.Next();
1382 heap_object != NULL;
1383 heap_object = cell_iterator.Next()) {
1384 if (heap_object->IsCell()) {
1385 Cell* cell = Cell::cast(heap_object);
1386 Address value_address = cell->ValueAddress();
1387 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1391 // Copy objects reachable from global property cells by scavenging global
1392 // property cell values directly.
1393 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1394 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1395 heap_object != NULL;
1396 heap_object = js_global_property_cell_iterator.Next()) {
1397 if (heap_object->IsPropertyCell()) {
1398 PropertyCell* cell = PropertyCell::cast(heap_object);
1399 Address value_address = cell->ValueAddress();
1400 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1401 Address type_address = cell->TypeAddress();
1402 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1406 // Copy objects reachable from the code flushing candidates list.
1407 MarkCompactCollector* collector = mark_compact_collector();
1408 if (collector->is_code_flushing_enabled()) {
1409 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1412 // Scavenge object reachable from the native contexts list directly.
1413 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1415 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1417 while (isolate()->global_handles()->IterateObjectGroups(
1418 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1419 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1421 isolate()->global_handles()->RemoveObjectGroups();
1422 isolate()->global_handles()->RemoveImplicitRefGroups();
1424 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1425 &IsUnscavengedHeapObject);
1426 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1428 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1430 UpdateNewSpaceReferencesInExternalStringTable(
1431 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1433 promotion_queue_.Destroy();
1435 if (!FLAG_watch_ic_patching) {
1436 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1438 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1440 ScavengeWeakObjectRetainer weak_object_retainer(this);
1441 ProcessWeakReferences(&weak_object_retainer);
1443 ASSERT(new_space_front == new_space_.top());
1446 new_space_.set_age_mark(new_space_.top());
1448 new_space_.LowerInlineAllocationLimit(
1449 new_space_.inline_allocation_limit_step());
1451 // Update how much has survived scavenge.
1452 IncrementYoungSurvivorsCounter(static_cast<int>(
1453 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1455 LOG(isolate_, ResourceEvent("scavenge", "end"));
1457 gc_state_ = NOT_IN_GC;
1459 scavenges_since_last_idle_round_++;
1463 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1465 MapWord first_word = HeapObject::cast(*p)->map_word();
1467 if (!first_word.IsForwardingAddress()) {
1468 // Unreachable external string can be finalized.
1469 heap->FinalizeExternalString(String::cast(*p));
1473 // String is still reachable.
1474 return String::cast(first_word.ToForwardingAddress());
1478 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1479 ExternalStringTableUpdaterCallback updater_func) {
1481 if (FLAG_verify_heap) {
1482 external_string_table_.Verify();
1486 if (external_string_table_.new_space_strings_.is_empty()) return;
1488 Object** start = &external_string_table_.new_space_strings_[0];
1489 Object** end = start + external_string_table_.new_space_strings_.length();
1490 Object** last = start;
1492 for (Object** p = start; p < end; ++p) {
1493 ASSERT(InFromSpace(*p));
1494 String* target = updater_func(this, p);
1496 if (target == NULL) continue;
1498 ASSERT(target->IsExternalString());
1500 if (InNewSpace(target)) {
1501 // String is still in new space. Update the table entry.
1505 // String got promoted. Move it to the old string list.
1506 external_string_table_.AddOldString(target);
1510 ASSERT(last <= end);
1511 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1515 void Heap::UpdateReferencesInExternalStringTable(
1516 ExternalStringTableUpdaterCallback updater_func) {
1518 // Update old space string references.
1519 if (external_string_table_.old_space_strings_.length() > 0) {
1520 Object** start = &external_string_table_.old_space_strings_[0];
1521 Object** end = start + external_string_table_.old_space_strings_.length();
1522 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1525 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1530 struct WeakListVisitor;
1534 static Object* VisitWeakList(Heap* heap,
1536 WeakObjectRetainer* retainer,
1537 bool record_slots) {
1538 Object* undefined = heap->undefined_value();
1539 Object* head = undefined;
1541 MarkCompactCollector* collector = heap->mark_compact_collector();
1542 while (list != undefined) {
1543 // Check whether to keep the candidate in the list.
1544 T* candidate = reinterpret_cast<T*>(list);
1545 Object* retained = retainer->RetainAs(list);
1546 if (retained != NULL) {
1547 if (head == undefined) {
1548 // First element in the list.
1551 // Subsequent elements in the list.
1552 ASSERT(tail != NULL);
1553 WeakListVisitor<T>::SetWeakNext(tail, retained);
1555 Object** next_slot =
1556 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1557 collector->RecordSlot(next_slot, next_slot, retained);
1560 // Retained object is new tail.
1561 ASSERT(!retained->IsUndefined());
1562 candidate = reinterpret_cast<T*>(retained);
1566 // tail is a live object, visit it.
1567 WeakListVisitor<T>::VisitLiveObject(
1568 heap, tail, retainer, record_slots);
1570 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1573 // Move to next element in the list.
1574 list = WeakListVisitor<T>::WeakNext(candidate);
1577 // Terminate the list if there is one or more elements.
1579 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1586 struct WeakListVisitor<JSFunction> {
1587 static void SetWeakNext(JSFunction* function, Object* next) {
1588 function->set_next_function_link(next);
1591 static Object* WeakNext(JSFunction* function) {
1592 return function->next_function_link();
1595 static int WeakNextOffset() {
1596 return JSFunction::kNextFunctionLinkOffset;
1599 static void VisitLiveObject(Heap*, JSFunction*,
1600 WeakObjectRetainer*, bool) {
1603 static void VisitPhantomObject(Heap*, JSFunction*) {
1609 struct WeakListVisitor<Context> {
1610 static void SetWeakNext(Context* context, Object* next) {
1611 context->set(Context::NEXT_CONTEXT_LINK,
1613 UPDATE_WRITE_BARRIER);
1616 static Object* WeakNext(Context* context) {
1617 return context->get(Context::NEXT_CONTEXT_LINK);
1620 static void VisitLiveObject(Heap* heap,
1622 WeakObjectRetainer* retainer,
1623 bool record_slots) {
1624 // Process the weak list of optimized functions for the context.
1625 Object* function_list_head =
1626 VisitWeakList<JSFunction>(
1628 context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1631 context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1633 UPDATE_WRITE_BARRIER);
1635 Object** optimized_functions =
1636 HeapObject::RawField(
1637 context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1638 heap->mark_compact_collector()->RecordSlot(
1639 optimized_functions, optimized_functions, function_list_head);
1643 static void VisitPhantomObject(Heap*, Context*) {
1646 static int WeakNextOffset() {
1647 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653 // We don't record weak slots during marking or scavenges.
1654 // Instead we do it once when we complete mark-compact cycle.
1655 // Note that write barrier has no effect if we are already in the middle of
1656 // compacting mark-sweep cycle and we have to record slots manually.
1658 gc_state() == MARK_COMPACT &&
1659 mark_compact_collector()->is_compacting();
1660 ProcessArrayBuffers(retainer, record_slots);
1661 ProcessNativeContexts(retainer, record_slots);
1662 ProcessAllocationSites(retainer, record_slots);
1665 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1666 bool record_slots) {
1668 VisitWeakList<Context>(
1669 this, native_contexts_list(), retainer, record_slots);
1670 // Update the head of the list of contexts.
1671 native_contexts_list_ = head;
1676 struct WeakListVisitor<JSArrayBufferView> {
1677 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1678 obj->set_weak_next(next);
1681 static Object* WeakNext(JSArrayBufferView* obj) {
1682 return obj->weak_next();
1685 static void VisitLiveObject(Heap*,
1686 JSArrayBufferView* obj,
1687 WeakObjectRetainer* retainer,
1688 bool record_slots) {}
1690 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1692 static int WeakNextOffset() {
1693 return JSArrayBufferView::kWeakNextOffset;
1699 struct WeakListVisitor<JSArrayBuffer> {
1700 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1701 obj->set_weak_next(next);
1704 static Object* WeakNext(JSArrayBuffer* obj) {
1705 return obj->weak_next();
1708 static void VisitLiveObject(Heap* heap,
1709 JSArrayBuffer* array_buffer,
1710 WeakObjectRetainer* retainer,
1711 bool record_slots) {
1712 Object* typed_array_obj =
1713 VisitWeakList<JSArrayBufferView>(
1715 array_buffer->weak_first_view(),
1716 retainer, record_slots);
1717 array_buffer->set_weak_first_view(typed_array_obj);
1718 if (typed_array_obj != heap->undefined_value() && record_slots) {
1719 Object** slot = HeapObject::RawField(
1720 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1721 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1725 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1726 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1729 static int WeakNextOffset() {
1730 return JSArrayBuffer::kWeakNextOffset;
1735 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1736 bool record_slots) {
1737 Object* array_buffer_obj =
1738 VisitWeakList<JSArrayBuffer>(this,
1739 array_buffers_list(),
1740 retainer, record_slots);
1741 set_array_buffers_list(array_buffer_obj);
1745 void Heap::TearDownArrayBuffers() {
1746 Object* undefined = undefined_value();
1747 for (Object* o = array_buffers_list(); o != undefined;) {
1748 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1749 Runtime::FreeArrayBuffer(isolate(), buffer);
1750 o = buffer->weak_next();
1752 array_buffers_list_ = undefined;
1757 struct WeakListVisitor<AllocationSite> {
1758 static void SetWeakNext(AllocationSite* obj, Object* next) {
1759 obj->set_weak_next(next);
1762 static Object* WeakNext(AllocationSite* obj) {
1763 return obj->weak_next();
1766 static void VisitLiveObject(Heap* heap,
1767 AllocationSite* array_buffer,
1768 WeakObjectRetainer* retainer,
1769 bool record_slots) {}
1771 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1773 static int WeakNextOffset() {
1774 return AllocationSite::kWeakNextOffset;
1779 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1780 bool record_slots) {
1781 Object* allocation_site_obj =
1782 VisitWeakList<AllocationSite>(this,
1783 allocation_sites_list(),
1784 retainer, record_slots);
1785 set_allocation_sites_list(allocation_site_obj);
1789 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1790 DisallowHeapAllocation no_allocation;
1792 // Both the external string table and the string table may contain
1793 // external strings, but neither lists them exhaustively, nor is the
1794 // intersection set empty. Therefore we iterate over the external string
1795 // table first, ignoring internalized strings, and then over the
1796 // internalized string table.
1798 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1800 explicit ExternalStringTableVisitorAdapter(
1801 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1802 virtual void VisitPointers(Object** start, Object** end) {
1803 for (Object** p = start; p < end; p++) {
1804 // Visit non-internalized external strings,
1805 // since internalized strings are listed in the string table.
1806 if (!(*p)->IsInternalizedString()) {
1807 ASSERT((*p)->IsExternalString());
1808 visitor_->VisitExternalString(Utils::ToLocal(
1809 Handle<String>(String::cast(*p))));
1814 v8::ExternalResourceVisitor* visitor_;
1815 } external_string_table_visitor(visitor);
1817 external_string_table_.Iterate(&external_string_table_visitor);
1819 class StringTableVisitorAdapter : public ObjectVisitor {
1821 explicit StringTableVisitorAdapter(
1822 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1823 virtual void VisitPointers(Object** start, Object** end) {
1824 for (Object** p = start; p < end; p++) {
1825 if ((*p)->IsExternalString()) {
1826 ASSERT((*p)->IsInternalizedString());
1827 visitor_->VisitExternalString(Utils::ToLocal(
1828 Handle<String>(String::cast(*p))));
1833 v8::ExternalResourceVisitor* visitor_;
1834 } string_table_visitor(visitor);
1836 string_table()->IterateElements(&string_table_visitor);
1840 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1842 static inline void VisitPointer(Heap* heap, Object** p) {
1843 Object* object = *p;
1844 if (!heap->InNewSpace(object)) return;
1845 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1846 reinterpret_cast<HeapObject*>(object));
1851 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1852 Address new_space_front) {
1854 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1855 // The addresses new_space_front and new_space_.top() define a
1856 // queue of unprocessed copied objects. Process them until the
1858 while (new_space_front != new_space_.top()) {
1859 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1860 HeapObject* object = HeapObject::FromAddress(new_space_front);
1862 NewSpaceScavenger::IterateBody(object->map(), object);
1865 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1869 // Promote and process all the to-be-promoted objects.
1871 StoreBufferRebuildScope scope(this,
1873 &ScavengeStoreBufferCallback);
1874 while (!promotion_queue()->is_empty()) {
1877 promotion_queue()->remove(&target, &size);
1879 // Promoted object might be already partially visited
1880 // during old space pointer iteration. Thus we search specificly
1881 // for pointers to from semispace instead of looking for pointers
1883 ASSERT(!target->IsMap());
1884 IterateAndMarkPointersToFromSpace(target->address(),
1885 target->address() + size,
1890 // Take another spin if there are now unswept objects in new space
1891 // (there are currently no more unswept promoted objects).
1892 } while (new_space_front != new_space_.top());
1894 return new_space_front;
1898 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1901 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1905 static HeapObject* EnsureDoubleAligned(Heap* heap,
1908 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1909 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1910 return HeapObject::FromAddress(object->address() + kPointerSize);
1912 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1919 enum LoggingAndProfiling {
1920 LOGGING_AND_PROFILING_ENABLED,
1921 LOGGING_AND_PROFILING_DISABLED
1925 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1928 template<MarksHandling marks_handling,
1929 LoggingAndProfiling logging_and_profiling_mode>
1930 class ScavengingVisitor : public StaticVisitorBase {
1932 static void Initialize() {
1933 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1934 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1935 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1936 table_.Register(kVisitByteArray, &EvacuateByteArray);
1937 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1938 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1940 table_.Register(kVisitNativeContext,
1941 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1942 template VisitSpecialized<Context::kSize>);
1944 table_.Register(kVisitConsString,
1945 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1946 template VisitSpecialized<ConsString::kSize>);
1948 table_.Register(kVisitSlicedString,
1949 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950 template VisitSpecialized<SlicedString::kSize>);
1952 table_.Register(kVisitSymbol,
1953 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954 template VisitSpecialized<Symbol::kSize>);
1956 table_.Register(kVisitSharedFunctionInfo,
1957 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958 template VisitSpecialized<SharedFunctionInfo::kSize>);
1960 table_.Register(kVisitJSWeakMap,
1961 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1964 table_.Register(kVisitJSWeakSet,
1965 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1968 table_.Register(kVisitJSArrayBuffer,
1969 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1972 table_.Register(kVisitJSTypedArray,
1973 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1976 table_.Register(kVisitJSDataView,
1977 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1980 table_.Register(kVisitJSRegExp,
1981 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1984 if (marks_handling == IGNORE_MARKS) {
1985 table_.Register(kVisitJSFunction,
1986 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1987 template VisitSpecialized<JSFunction::kSize>);
1989 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1992 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1994 kVisitDataObjectGeneric>();
1996 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1998 kVisitJSObjectGeneric>();
2000 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2002 kVisitStructGeneric>();
2005 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2010 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2012 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2013 bool should_record = false;
2015 should_record = FLAG_heap_stats;
2017 should_record = should_record || FLAG_log_gc;
2018 if (should_record) {
2019 if (heap->new_space()->Contains(obj)) {
2020 heap->new_space()->RecordAllocation(obj);
2022 heap->new_space()->RecordPromotion(obj);
2027 // Helper function used by CopyObject to copy a source object to an
2028 // allocated target object and update the forwarding pointer in the source
2029 // object. Returns the target object.
2030 INLINE(static void MigrateObject(Heap* heap,
2034 // Copy the content of source to target.
2035 heap->CopyBlock(target->address(), source->address(), size);
2037 // Set the forwarding address.
2038 source->set_map_word(MapWord::FromForwardingAddress(target));
2040 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2041 // Update NewSpace stats if necessary.
2042 RecordCopiedObject(heap, target);
2043 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2044 Isolate* isolate = heap->isolate();
2045 if (isolate->logger()->is_logging_code_events() ||
2046 isolate->cpu_profiler()->is_profiling()) {
2047 if (target->IsSharedFunctionInfo()) {
2048 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2049 source->address(), target->address()));
2054 if (marks_handling == TRANSFER_MARKS) {
2055 if (Marking::TransferColor(source, target)) {
2056 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2062 template<ObjectContents object_contents, int alignment>
2063 static inline void EvacuateObject(Map* map,
2067 SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2068 SLOW_ASSERT(object->Size() == object_size);
2070 int allocation_size = object_size;
2071 if (alignment != kObjectAlignment) {
2072 ASSERT(alignment == kDoubleAlignment);
2073 allocation_size += kPointerSize;
2076 Heap* heap = map->GetHeap();
2077 if (heap->ShouldBePromoted(object->address(), object_size)) {
2078 MaybeObject* maybe_result;
2080 if (object_contents == DATA_OBJECT) {
2081 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2084 heap->old_pointer_space()->AllocateRaw(allocation_size);
2087 Object* result = NULL; // Initialization to please compiler.
2088 if (maybe_result->ToObject(&result)) {
2089 HeapObject* target = HeapObject::cast(result);
2091 if (alignment != kObjectAlignment) {
2092 target = EnsureDoubleAligned(heap, target, allocation_size);
2095 // Order is important: slot might be inside of the target if target
2096 // was allocated over a dead object and slot comes from the store
2099 MigrateObject(heap, object, target, object_size);
2101 if (object_contents == POINTER_OBJECT) {
2102 if (map->instance_type() == JS_FUNCTION_TYPE) {
2103 heap->promotion_queue()->insert(
2104 target, JSFunction::kNonWeakFieldsEndOffset);
2106 heap->promotion_queue()->insert(target, object_size);
2110 heap->tracer()->increment_promoted_objects_size(object_size);
2114 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2115 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2116 Object* result = allocation->ToObjectUnchecked();
2117 HeapObject* target = HeapObject::cast(result);
2119 if (alignment != kObjectAlignment) {
2120 target = EnsureDoubleAligned(heap, target, allocation_size);
2123 // Order is important: slot might be inside of the target if target
2124 // was allocated over a dead object and slot comes from the store
2127 MigrateObject(heap, object, target, object_size);
2132 static inline void EvacuateJSFunction(Map* map,
2134 HeapObject* object) {
2135 ObjectEvacuationStrategy<POINTER_OBJECT>::
2136 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2138 HeapObject* target = *slot;
2139 MarkBit mark_bit = Marking::MarkBitFrom(target);
2140 if (Marking::IsBlack(mark_bit)) {
2141 // This object is black and it might not be rescanned by marker.
2142 // We should explicitly record code entry slot for compaction because
2143 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2144 // miss it as it is not HeapObject-tagged.
2145 Address code_entry_slot =
2146 target->address() + JSFunction::kCodeEntryOffset;
2147 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2148 map->GetHeap()->mark_compact_collector()->
2149 RecordCodeEntrySlot(code_entry_slot, code);
2154 static inline void EvacuateFixedArray(Map* map,
2156 HeapObject* object) {
2157 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2158 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2159 map, slot, object, object_size);
2163 static inline void EvacuateFixedDoubleArray(Map* map,
2165 HeapObject* object) {
2166 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2167 int object_size = FixedDoubleArray::SizeFor(length);
2168 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2169 map, slot, object, object_size);
2173 static inline void EvacuateByteArray(Map* map,
2175 HeapObject* object) {
2176 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2177 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2178 map, slot, object, object_size);
2182 static inline void EvacuateSeqOneByteString(Map* map,
2184 HeapObject* object) {
2185 int object_size = SeqOneByteString::cast(object)->
2186 SeqOneByteStringSize(map->instance_type());
2187 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2188 map, slot, object, object_size);
2192 static inline void EvacuateSeqTwoByteString(Map* map,
2194 HeapObject* object) {
2195 int object_size = SeqTwoByteString::cast(object)->
2196 SeqTwoByteStringSize(map->instance_type());
2197 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2198 map, slot, object, object_size);
2202 static inline bool IsShortcutCandidate(int type) {
2203 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2206 static inline void EvacuateShortcutCandidate(Map* map,
2208 HeapObject* object) {
2209 ASSERT(IsShortcutCandidate(map->instance_type()));
2211 Heap* heap = map->GetHeap();
2213 if (marks_handling == IGNORE_MARKS &&
2214 ConsString::cast(object)->unchecked_second() ==
2215 heap->empty_string()) {
2217 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2221 if (!heap->InNewSpace(first)) {
2222 object->set_map_word(MapWord::FromForwardingAddress(first));
2226 MapWord first_word = first->map_word();
2227 if (first_word.IsForwardingAddress()) {
2228 HeapObject* target = first_word.ToForwardingAddress();
2231 object->set_map_word(MapWord::FromForwardingAddress(target));
2235 heap->DoScavengeObject(first->map(), slot, first);
2236 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2240 int object_size = ConsString::kSize;
2241 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2242 map, slot, object, object_size);
2245 template<ObjectContents object_contents>
2246 class ObjectEvacuationStrategy {
2248 template<int object_size>
2249 static inline void VisitSpecialized(Map* map,
2251 HeapObject* object) {
2252 EvacuateObject<object_contents, kObjectAlignment>(
2253 map, slot, object, object_size);
2256 static inline void Visit(Map* map,
2258 HeapObject* object) {
2259 int object_size = map->instance_size();
2260 EvacuateObject<object_contents, kObjectAlignment>(
2261 map, slot, object, object_size);
2265 static VisitorDispatchTable<ScavengingCallback> table_;
2269 template<MarksHandling marks_handling,
2270 LoggingAndProfiling logging_and_profiling_mode>
2271 VisitorDispatchTable<ScavengingCallback>
2272 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2275 static void InitializeScavengingVisitorsTables() {
2276 ScavengingVisitor<TRANSFER_MARKS,
2277 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2278 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2279 ScavengingVisitor<TRANSFER_MARKS,
2280 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2281 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2285 void Heap::SelectScavengingVisitorsTable() {
2286 bool logging_and_profiling =
2287 isolate()->logger()->is_logging() ||
2288 isolate()->cpu_profiler()->is_profiling() ||
2289 (isolate()->heap_profiler() != NULL &&
2290 isolate()->heap_profiler()->is_profiling());
2292 if (!incremental_marking()->IsMarking()) {
2293 if (!logging_and_profiling) {
2294 scavenging_visitors_table_.CopyFrom(
2295 ScavengingVisitor<IGNORE_MARKS,
2296 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2298 scavenging_visitors_table_.CopyFrom(
2299 ScavengingVisitor<IGNORE_MARKS,
2300 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2303 if (!logging_and_profiling) {
2304 scavenging_visitors_table_.CopyFrom(
2305 ScavengingVisitor<TRANSFER_MARKS,
2306 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2308 scavenging_visitors_table_.CopyFrom(
2309 ScavengingVisitor<TRANSFER_MARKS,
2310 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2313 if (incremental_marking()->IsCompacting()) {
2314 // When compacting forbid short-circuiting of cons-strings.
2315 // Scavenging code relies on the fact that new space object
2316 // can't be evacuated into evacuation candidate but
2317 // short-circuiting violates this assumption.
2318 scavenging_visitors_table_.Register(
2319 StaticVisitorBase::kVisitShortcutCandidate,
2320 scavenging_visitors_table_.GetVisitorById(
2321 StaticVisitorBase::kVisitConsString));
2327 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2328 SLOW_ASSERT(HEAP->InFromSpace(object));
2329 MapWord first_word = object->map_word();
2330 SLOW_ASSERT(!first_word.IsForwardingAddress());
2331 Map* map = first_word.ToMap();
2332 map->GetHeap()->DoScavengeObject(map, p, object);
2336 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2337 int instance_size) {
2339 MaybeObject* maybe_result = AllocateRawMap();
2340 if (!maybe_result->ToObject(&result)) return maybe_result;
2342 // Map::cast cannot be used due to uninitialized map field.
2343 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2344 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2345 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2346 reinterpret_cast<Map*>(result)->set_visitor_id(
2347 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2348 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2349 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2350 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2351 reinterpret_cast<Map*>(result)->set_bit_field(0);
2352 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2353 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2354 Map::OwnsDescriptors::encode(true);
2355 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2360 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2362 ElementsKind elements_kind) {
2364 MaybeObject* maybe_result = AllocateRawMap();
2365 if (!maybe_result->To(&result)) return maybe_result;
2367 Map* map = reinterpret_cast<Map*>(result);
2368 map->set_map_no_write_barrier(meta_map());
2369 map->set_instance_type(instance_type);
2370 map->set_visitor_id(
2371 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2372 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2373 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2374 map->set_instance_size(instance_size);
2375 map->set_inobject_properties(0);
2376 map->set_pre_allocated_property_fields(0);
2377 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2378 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2379 SKIP_WRITE_BARRIER);
2380 map->init_back_pointer(undefined_value());
2381 map->set_unused_property_fields(0);
2382 map->set_instance_descriptors(empty_descriptor_array());
2383 map->set_bit_field(0);
2384 map->set_bit_field2(1 << Map::kIsExtensible);
2385 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2386 Map::OwnsDescriptors::encode(true);
2387 map->set_bit_field3(bit_field3);
2388 map->set_elements_kind(elements_kind);
2394 MaybeObject* Heap::AllocateCodeCache() {
2395 CodeCache* code_cache;
2396 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2397 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2399 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2400 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2405 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2406 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2410 MaybeObject* Heap::AllocateAccessorPair() {
2411 AccessorPair* accessors;
2412 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2413 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2415 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2416 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2421 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2422 TypeFeedbackInfo* info;
2423 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2424 if (!maybe_info->To(&info)) return maybe_info;
2426 info->initialize_storage();
2427 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2428 SKIP_WRITE_BARRIER);
2433 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2434 AliasedArgumentsEntry* entry;
2435 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2436 if (!maybe_entry->To(&entry)) return maybe_entry;
2438 entry->set_aliased_context_slot(aliased_context_slot);
2443 const Heap::StringTypeTable Heap::string_type_table[] = {
2444 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2445 {type, size, k##camel_name##MapRootIndex},
2446 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2447 #undef STRING_TYPE_ELEMENT
2451 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2452 #define CONSTANT_STRING_ELEMENT(name, contents) \
2453 {contents, k##name##RootIndex},
2454 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2455 #undef CONSTANT_STRING_ELEMENT
2459 const Heap::StructTable Heap::struct_table[] = {
2460 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2461 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2462 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2463 #undef STRUCT_TABLE_ELEMENT
2467 bool Heap::CreateInitialMaps() {
2469 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2470 if (!maybe_obj->ToObject(&obj)) return false;
2472 // Map::cast cannot be used due to uninitialized map field.
2473 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2474 set_meta_map(new_meta_map);
2475 new_meta_map->set_map(new_meta_map);
2477 { MaybeObject* maybe_obj =
2478 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2479 if (!maybe_obj->ToObject(&obj)) return false;
2481 set_fixed_array_map(Map::cast(obj));
2483 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2484 if (!maybe_obj->ToObject(&obj)) return false;
2486 set_oddball_map(Map::cast(obj));
2488 // Allocate the empty array.
2489 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2490 if (!maybe_obj->ToObject(&obj)) return false;
2492 set_empty_fixed_array(FixedArray::cast(obj));
2494 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2495 if (!maybe_obj->ToObject(&obj)) return false;
2497 set_null_value(Oddball::cast(obj));
2498 Oddball::cast(obj)->set_kind(Oddball::kNull);
2500 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2501 if (!maybe_obj->ToObject(&obj)) return false;
2503 set_undefined_value(Oddball::cast(obj));
2504 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2505 ASSERT(!InNewSpace(undefined_value()));
2507 // Allocate the empty descriptor array.
2508 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2509 if (!maybe_obj->ToObject(&obj)) return false;
2511 set_empty_descriptor_array(DescriptorArray::cast(obj));
2513 // Fix the instance_descriptors for the existing maps.
2514 meta_map()->set_code_cache(empty_fixed_array());
2515 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2516 meta_map()->init_back_pointer(undefined_value());
2517 meta_map()->set_instance_descriptors(empty_descriptor_array());
2519 fixed_array_map()->set_code_cache(empty_fixed_array());
2520 fixed_array_map()->set_dependent_code(
2521 DependentCode::cast(empty_fixed_array()));
2522 fixed_array_map()->init_back_pointer(undefined_value());
2523 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2525 oddball_map()->set_code_cache(empty_fixed_array());
2526 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2527 oddball_map()->init_back_pointer(undefined_value());
2528 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2530 // Fix prototype object for existing maps.
2531 meta_map()->set_prototype(null_value());
2532 meta_map()->set_constructor(null_value());
2534 fixed_array_map()->set_prototype(null_value());
2535 fixed_array_map()->set_constructor(null_value());
2537 oddball_map()->set_prototype(null_value());
2538 oddball_map()->set_constructor(null_value());
2540 { MaybeObject* maybe_obj =
2541 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2542 if (!maybe_obj->ToObject(&obj)) return false;
2544 set_fixed_cow_array_map(Map::cast(obj));
2545 ASSERT(fixed_array_map() != fixed_cow_array_map());
2547 { MaybeObject* maybe_obj =
2548 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2549 if (!maybe_obj->ToObject(&obj)) return false;
2551 set_scope_info_map(Map::cast(obj));
2553 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2554 if (!maybe_obj->ToObject(&obj)) return false;
2556 set_heap_number_map(Map::cast(obj));
2558 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2559 if (!maybe_obj->ToObject(&obj)) return false;
2561 set_symbol_map(Map::cast(obj));
2563 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2564 if (!maybe_obj->ToObject(&obj)) return false;
2566 set_foreign_map(Map::cast(obj));
2568 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2569 const StringTypeTable& entry = string_type_table[i];
2570 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2571 if (!maybe_obj->ToObject(&obj)) return false;
2573 roots_[entry.index] = Map::cast(obj);
2576 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2577 if (!maybe_obj->ToObject(&obj)) return false;
2579 set_undetectable_string_map(Map::cast(obj));
2580 Map::cast(obj)->set_is_undetectable();
2582 { MaybeObject* maybe_obj =
2583 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2584 if (!maybe_obj->ToObject(&obj)) return false;
2586 set_undetectable_ascii_string_map(Map::cast(obj));
2587 Map::cast(obj)->set_is_undetectable();
2589 { MaybeObject* maybe_obj =
2590 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2591 if (!maybe_obj->ToObject(&obj)) return false;
2593 set_fixed_double_array_map(Map::cast(obj));
2595 { MaybeObject* maybe_obj =
2596 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2597 if (!maybe_obj->ToObject(&obj)) return false;
2599 set_byte_array_map(Map::cast(obj));
2601 { MaybeObject* maybe_obj =
2602 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2603 if (!maybe_obj->ToObject(&obj)) return false;
2605 set_free_space_map(Map::cast(obj));
2607 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2608 if (!maybe_obj->ToObject(&obj)) return false;
2610 set_empty_byte_array(ByteArray::cast(obj));
2612 { MaybeObject* maybe_obj =
2613 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2614 if (!maybe_obj->ToObject(&obj)) return false;
2616 set_external_pixel_array_map(Map::cast(obj));
2618 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2619 ExternalArray::kAlignedSize);
2620 if (!maybe_obj->ToObject(&obj)) return false;
2622 set_external_byte_array_map(Map::cast(obj));
2624 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2625 ExternalArray::kAlignedSize);
2626 if (!maybe_obj->ToObject(&obj)) return false;
2628 set_external_unsigned_byte_array_map(Map::cast(obj));
2630 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2631 ExternalArray::kAlignedSize);
2632 if (!maybe_obj->ToObject(&obj)) return false;
2634 set_external_short_array_map(Map::cast(obj));
2636 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2637 ExternalArray::kAlignedSize);
2638 if (!maybe_obj->ToObject(&obj)) return false;
2640 set_external_unsigned_short_array_map(Map::cast(obj));
2642 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2643 ExternalArray::kAlignedSize);
2644 if (!maybe_obj->ToObject(&obj)) return false;
2646 set_external_int_array_map(Map::cast(obj));
2648 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2649 ExternalArray::kAlignedSize);
2650 if (!maybe_obj->ToObject(&obj)) return false;
2652 set_external_unsigned_int_array_map(Map::cast(obj));
2654 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2655 ExternalArray::kAlignedSize);
2656 if (!maybe_obj->ToObject(&obj)) return false;
2658 set_external_float_array_map(Map::cast(obj));
2660 { MaybeObject* maybe_obj =
2661 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2662 if (!maybe_obj->ToObject(&obj)) return false;
2664 set_non_strict_arguments_elements_map(Map::cast(obj));
2666 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2667 ExternalArray::kAlignedSize);
2668 if (!maybe_obj->ToObject(&obj)) return false;
2670 set_external_double_array_map(Map::cast(obj));
2672 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2673 if (!maybe_obj->ToObject(&obj)) return false;
2675 set_empty_external_byte_array(ExternalArray::cast(obj));
2677 { MaybeObject* maybe_obj =
2678 AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2679 if (!maybe_obj->ToObject(&obj)) return false;
2681 set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2683 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2684 if (!maybe_obj->ToObject(&obj)) return false;
2686 set_empty_external_short_array(ExternalArray::cast(obj));
2688 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2689 kExternalUnsignedShortArray);
2690 if (!maybe_obj->ToObject(&obj)) return false;
2692 set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2694 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2695 if (!maybe_obj->ToObject(&obj)) return false;
2697 set_empty_external_int_array(ExternalArray::cast(obj));
2699 { MaybeObject* maybe_obj =
2700 AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2701 if (!maybe_obj->ToObject(&obj)) return false;
2703 set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2705 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2706 if (!maybe_obj->ToObject(&obj)) return false;
2708 set_empty_external_float_array(ExternalArray::cast(obj));
2710 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2711 if (!maybe_obj->ToObject(&obj)) return false;
2713 set_empty_external_double_array(ExternalArray::cast(obj));
2715 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2716 if (!maybe_obj->ToObject(&obj)) return false;
2718 set_empty_external_pixel_array(ExternalArray::cast(obj));
2720 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2721 if (!maybe_obj->ToObject(&obj)) return false;
2723 set_code_map(Map::cast(obj));
2725 { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2726 if (!maybe_obj->ToObject(&obj)) return false;
2728 set_cell_map(Map::cast(obj));
2730 { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2731 PropertyCell::kSize);
2732 if (!maybe_obj->ToObject(&obj)) return false;
2734 set_global_property_cell_map(Map::cast(obj));
2736 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2737 if (!maybe_obj->ToObject(&obj)) return false;
2739 set_one_pointer_filler_map(Map::cast(obj));
2741 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2742 if (!maybe_obj->ToObject(&obj)) return false;
2744 set_two_pointer_filler_map(Map::cast(obj));
2746 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2747 const StructTable& entry = struct_table[i];
2748 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2749 if (!maybe_obj->ToObject(&obj)) return false;
2751 roots_[entry.index] = Map::cast(obj);
2754 { MaybeObject* maybe_obj =
2755 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2756 if (!maybe_obj->ToObject(&obj)) return false;
2758 set_hash_table_map(Map::cast(obj));
2760 { MaybeObject* maybe_obj =
2761 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2762 if (!maybe_obj->ToObject(&obj)) return false;
2764 set_function_context_map(Map::cast(obj));
2766 { MaybeObject* maybe_obj =
2767 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2768 if (!maybe_obj->ToObject(&obj)) return false;
2770 set_catch_context_map(Map::cast(obj));
2772 { MaybeObject* maybe_obj =
2773 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2774 if (!maybe_obj->ToObject(&obj)) return false;
2776 set_with_context_map(Map::cast(obj));
2778 { MaybeObject* maybe_obj =
2779 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2780 if (!maybe_obj->ToObject(&obj)) return false;
2782 set_block_context_map(Map::cast(obj));
2784 { MaybeObject* maybe_obj =
2785 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2786 if (!maybe_obj->ToObject(&obj)) return false;
2788 set_module_context_map(Map::cast(obj));
2790 { MaybeObject* maybe_obj =
2791 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2792 if (!maybe_obj->ToObject(&obj)) return false;
2794 set_global_context_map(Map::cast(obj));
2796 { MaybeObject* maybe_obj =
2797 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2798 if (!maybe_obj->ToObject(&obj)) return false;
2800 Map* native_context_map = Map::cast(obj);
2801 native_context_map->set_dictionary_map(true);
2802 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2803 set_native_context_map(native_context_map);
2805 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2806 SharedFunctionInfo::kAlignedSize);
2807 if (!maybe_obj->ToObject(&obj)) return false;
2809 set_shared_function_info_map(Map::cast(obj));
2811 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2812 JSMessageObject::kSize);
2813 if (!maybe_obj->ToObject(&obj)) return false;
2815 set_message_object_map(Map::cast(obj));
2818 { MaybeObject* maybe_obj =
2819 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2820 if (!maybe_obj->To(&external_map)) return false;
2822 external_map->set_is_extensible(false);
2823 set_external_map(external_map);
2825 ASSERT(!InNewSpace(empty_fixed_array()));
2830 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2831 // Statically ensure that it is safe to allocate heap numbers in paged
2833 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2834 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2837 { MaybeObject* maybe_result =
2838 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2839 if (!maybe_result->ToObject(&result)) return maybe_result;
2842 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2843 HeapNumber::cast(result)->set_value(value);
2848 MaybeObject* Heap::AllocateHeapNumber(double value) {
2849 // Use general version, if we're forced to always allocate.
2850 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2852 // This version of AllocateHeapNumber is optimized for
2853 // allocation in new space.
2854 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2856 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2857 if (!maybe_result->ToObject(&result)) return maybe_result;
2859 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2860 HeapNumber::cast(result)->set_value(value);
2865 MaybeObject* Heap::AllocateCell(Object* value) {
2867 { MaybeObject* maybe_result = AllocateRawCell();
2868 if (!maybe_result->ToObject(&result)) return maybe_result;
2870 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2871 Cell::cast(result)->set_value(value);
2876 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2878 MaybeObject* maybe_result = AllocateRawPropertyCell();
2879 if (!maybe_result->ToObject(&result)) return maybe_result;
2881 HeapObject::cast(result)->set_map_no_write_barrier(
2882 global_property_cell_map());
2883 PropertyCell* cell = PropertyCell::cast(result);
2884 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2885 SKIP_WRITE_BARRIER);
2886 cell->set_value(value);
2887 cell->set_type(Type::None());
2888 maybe_result = cell->SetValueInferType(value);
2889 if (maybe_result->IsFailure()) return maybe_result;
2894 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2896 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2897 if (!maybe_result->To(&result)) return maybe_result;
2898 result->set_value(value);
2903 MaybeObject* Heap::AllocateAllocationSite() {
2905 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2907 if (!maybe_result->ToObject(&result)) return maybe_result;
2908 AllocationSite* site = AllocationSite::cast(result);
2912 site->set_weak_next(allocation_sites_list());
2913 set_allocation_sites_list(site);
2918 MaybeObject* Heap::CreateOddball(const char* to_string,
2922 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2923 if (!maybe_result->ToObject(&result)) return maybe_result;
2925 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2929 bool Heap::CreateApiObjects() {
2932 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2933 if (!maybe_obj->ToObject(&obj)) return false;
2935 // Don't use Smi-only elements optimizations for objects with the neander
2936 // map. There are too many cases where element values are set directly with a
2937 // bottleneck to trap the Smi-only -> fast elements transition, and there
2938 // appears to be no benefit for optimize this case.
2939 Map* new_neander_map = Map::cast(obj);
2940 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2941 set_neander_map(new_neander_map);
2943 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2944 if (!maybe_obj->ToObject(&obj)) return false;
2947 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2948 if (!maybe_elements->ToObject(&elements)) return false;
2950 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2951 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2952 set_message_listeners(JSObject::cast(obj));
2958 void Heap::CreateJSEntryStub() {
2960 set_js_entry_code(*stub.GetCode(isolate()));
2964 void Heap::CreateJSConstructEntryStub() {
2965 JSConstructEntryStub stub;
2966 set_js_construct_entry_code(*stub.GetCode(isolate()));
2970 void Heap::CreateFixedStubs() {
2971 // Here we create roots for fixed stubs. They are needed at GC
2972 // for cooking and uncooking (check out frames.cc).
2973 // The eliminates the need for doing dictionary lookup in the
2974 // stub cache for these stubs.
2975 HandleScope scope(isolate());
2976 // gcc-4.4 has problem generating correct code of following snippet:
2977 // { JSEntryStub stub;
2978 // js_entry_code_ = *stub.GetCode();
2980 // { JSConstructEntryStub stub;
2981 // js_construct_entry_code_ = *stub.GetCode();
2983 // To workaround the problem, make separate functions without inlining.
2984 Heap::CreateJSEntryStub();
2985 Heap::CreateJSConstructEntryStub();
2987 // Create stubs that should be there, so we don't unexpectedly have to
2988 // create them if we need them during the creation of another stub.
2989 // Stub creation mixes raw pointers and handles in an unsafe manner so
2990 // we cannot create stubs while we are creating stubs.
2991 CodeStub::GenerateStubsAheadOfTime(isolate());
2995 bool Heap::CreateInitialObjects() {
2998 // The -0 value must be set before NumberFromDouble works.
2999 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3000 if (!maybe_obj->ToObject(&obj)) return false;
3002 set_minus_zero_value(HeapNumber::cast(obj));
3003 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3005 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3006 if (!maybe_obj->ToObject(&obj)) return false;
3008 set_nan_value(HeapNumber::cast(obj));
3010 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3011 if (!maybe_obj->ToObject(&obj)) return false;
3013 set_infinity_value(HeapNumber::cast(obj));
3015 // The hole has not been created yet, but we want to put something
3016 // predictable in the gaps in the string table, so lets make that Smi zero.
3017 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3019 // Allocate initial string table.
3020 { MaybeObject* maybe_obj =
3021 StringTable::Allocate(this, kInitialStringTableSize);
3022 if (!maybe_obj->ToObject(&obj)) return false;
3024 // Don't use set_string_table() due to asserts.
3025 roots_[kStringTableRootIndex] = obj;
3027 // Finish initializing oddballs after creating the string table.
3028 { MaybeObject* maybe_obj =
3029 undefined_value()->Initialize("undefined",
3031 Oddball::kUndefined);
3032 if (!maybe_obj->ToObject(&obj)) return false;
3035 // Initialize the null_value.
3036 { MaybeObject* maybe_obj =
3037 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3038 if (!maybe_obj->ToObject(&obj)) return false;
3041 { MaybeObject* maybe_obj = CreateOddball("true",
3044 if (!maybe_obj->ToObject(&obj)) return false;
3046 set_true_value(Oddball::cast(obj));
3048 { MaybeObject* maybe_obj = CreateOddball("false",
3051 if (!maybe_obj->ToObject(&obj)) return false;
3053 set_false_value(Oddball::cast(obj));
3055 { MaybeObject* maybe_obj = CreateOddball("hole",
3058 if (!maybe_obj->ToObject(&obj)) return false;
3060 set_the_hole_value(Oddball::cast(obj));
3062 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3064 Oddball::kUninitialized);
3065 if (!maybe_obj->ToObject(&obj)) return false;
3067 set_uninitialized_value(Oddball::cast(obj));
3069 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3071 Oddball::kArgumentMarker);
3072 if (!maybe_obj->ToObject(&obj)) return false;
3074 set_arguments_marker(Oddball::cast(obj));
3076 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3079 if (!maybe_obj->ToObject(&obj)) return false;
3081 set_no_interceptor_result_sentinel(obj);
3083 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3086 if (!maybe_obj->ToObject(&obj)) return false;
3088 set_termination_exception(obj);
3090 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3091 { MaybeObject* maybe_obj =
3092 InternalizeUtf8String(constant_string_table[i].contents);
3093 if (!maybe_obj->ToObject(&obj)) return false;
3095 roots_[constant_string_table[i].index] = String::cast(obj);
3098 // Allocate the hidden string which is used to identify the hidden properties
3099 // in JSObjects. The hash code has a special value so that it will not match
3100 // the empty string when searching for the property. It cannot be part of the
3101 // loop above because it needs to be allocated manually with the special
3102 // hash code in place. The hash code for the hidden_string is zero to ensure
3103 // that it will always be at the first entry in property descriptors.
3104 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3105 OneByteVector("", 0), String::kEmptyStringHash);
3106 if (!maybe_obj->ToObject(&obj)) return false;
3108 hidden_string_ = String::cast(obj);
3110 // Allocate the code_stubs dictionary. The initial size is set to avoid
3111 // expanding the dictionary during bootstrapping.
3112 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3113 if (!maybe_obj->ToObject(&obj)) return false;
3115 set_code_stubs(UnseededNumberDictionary::cast(obj));
3118 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3119 // is set to avoid expanding the dictionary during bootstrapping.
3120 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3121 if (!maybe_obj->ToObject(&obj)) return false;
3123 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3125 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3126 if (!maybe_obj->ToObject(&obj)) return false;
3128 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3130 set_instanceof_cache_function(Smi::FromInt(0));
3131 set_instanceof_cache_map(Smi::FromInt(0));
3132 set_instanceof_cache_answer(Smi::FromInt(0));
3136 // Allocate the dictionary of intrinsic function names.
3137 { MaybeObject* maybe_obj =
3138 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3139 if (!maybe_obj->ToObject(&obj)) return false;
3141 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3143 if (!maybe_obj->ToObject(&obj)) return false;
3145 set_intrinsic_function_names(NameDictionary::cast(obj));
3147 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3148 if (!maybe_obj->ToObject(&obj)) return false;
3150 set_number_string_cache(FixedArray::cast(obj));
3152 // Allocate cache for single character one byte strings.
3153 { MaybeObject* maybe_obj =
3154 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3155 if (!maybe_obj->ToObject(&obj)) return false;
3157 set_single_character_string_cache(FixedArray::cast(obj));
3159 // Allocate cache for string split.
3160 { MaybeObject* maybe_obj = AllocateFixedArray(
3161 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3162 if (!maybe_obj->ToObject(&obj)) return false;
3164 set_string_split_cache(FixedArray::cast(obj));
3166 { MaybeObject* maybe_obj = AllocateFixedArray(
3167 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3168 if (!maybe_obj->ToObject(&obj)) return false;
3170 set_regexp_multiple_cache(FixedArray::cast(obj));
3172 // Allocate cache for external strings pointing to native source code.
3173 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3174 if (!maybe_obj->ToObject(&obj)) return false;
3176 set_natives_source_cache(FixedArray::cast(obj));
3178 // Allocate object to hold object observation state.
3179 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3180 if (!maybe_obj->ToObject(&obj)) return false;
3182 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3183 if (!maybe_obj->ToObject(&obj)) return false;
3185 set_observation_state(JSObject::cast(obj));
3187 { MaybeObject* maybe_obj = AllocateSymbol();
3188 if (!maybe_obj->ToObject(&obj)) return false;
3190 set_frozen_symbol(Symbol::cast(obj));
3192 { MaybeObject* maybe_obj = AllocateSymbol();
3193 if (!maybe_obj->ToObject(&obj)) return false;
3195 set_elements_transition_symbol(Symbol::cast(obj));
3197 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3198 if (!maybe_obj->ToObject(&obj)) return false;
3200 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3201 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3203 { MaybeObject* maybe_obj = AllocateSymbol();
3204 if (!maybe_obj->ToObject(&obj)) return false;
3206 set_observed_symbol(Symbol::cast(obj));
3208 // Handling of script id generation is in Factory::NewScript.
3209 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3211 // Initialize keyed lookup cache.
3212 isolate_->keyed_lookup_cache()->Clear();
3214 // Initialize context slot cache.
3215 isolate_->context_slot_cache()->Clear();
3217 // Initialize descriptor cache.
3218 isolate_->descriptor_lookup_cache()->Clear();
3220 // Initialize compilation cache.
3221 isolate_->compilation_cache()->Clear();
3227 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3228 RootListIndex writable_roots[] = {
3229 kStoreBufferTopRootIndex,
3230 kStackLimitRootIndex,
3231 kNumberStringCacheRootIndex,
3232 kInstanceofCacheFunctionRootIndex,
3233 kInstanceofCacheMapRootIndex,
3234 kInstanceofCacheAnswerRootIndex,
3235 kCodeStubsRootIndex,
3236 kNonMonomorphicCacheRootIndex,
3237 kPolymorphicCodeCacheRootIndex,
3238 kLastScriptIdRootIndex,
3239 kEmptyScriptRootIndex,
3240 kRealStackLimitRootIndex,
3241 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3242 kConstructStubDeoptPCOffsetRootIndex,
3243 kGetterStubDeoptPCOffsetRootIndex,
3244 kSetterStubDeoptPCOffsetRootIndex,
3245 kStringTableRootIndex,
3248 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3249 if (root_index == writable_roots[i])
3256 Object* RegExpResultsCache::Lookup(Heap* heap,
3258 Object* key_pattern,
3259 ResultsCacheType type) {
3261 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3262 if (type == STRING_SPLIT_SUBSTRINGS) {
3263 ASSERT(key_pattern->IsString());
3264 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3265 cache = heap->string_split_cache();
3267 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3268 ASSERT(key_pattern->IsFixedArray());
3269 cache = heap->regexp_multiple_cache();
3272 uint32_t hash = key_string->Hash();
3273 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3274 ~(kArrayEntriesPerCacheEntry - 1));
3275 if (cache->get(index + kStringOffset) == key_string &&
3276 cache->get(index + kPatternOffset) == key_pattern) {
3277 return cache->get(index + kArrayOffset);
3280 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3281 if (cache->get(index + kStringOffset) == key_string &&
3282 cache->get(index + kPatternOffset) == key_pattern) {
3283 return cache->get(index + kArrayOffset);
3285 return Smi::FromInt(0);
3289 void RegExpResultsCache::Enter(Heap* heap,
3291 Object* key_pattern,
3292 FixedArray* value_array,
3293 ResultsCacheType type) {
3295 if (!key_string->IsInternalizedString()) return;
3296 if (type == STRING_SPLIT_SUBSTRINGS) {
3297 ASSERT(key_pattern->IsString());
3298 if (!key_pattern->IsInternalizedString()) return;
3299 cache = heap->string_split_cache();
3301 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3302 ASSERT(key_pattern->IsFixedArray());
3303 cache = heap->regexp_multiple_cache();
3306 uint32_t hash = key_string->Hash();
3307 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3308 ~(kArrayEntriesPerCacheEntry - 1));
3309 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3310 cache->set(index + kStringOffset, key_string);
3311 cache->set(index + kPatternOffset, key_pattern);
3312 cache->set(index + kArrayOffset, value_array);
3315 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3316 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3317 cache->set(index2 + kStringOffset, key_string);
3318 cache->set(index2 + kPatternOffset, key_pattern);
3319 cache->set(index2 + kArrayOffset, value_array);
3321 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3322 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3323 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3324 cache->set(index + kStringOffset, key_string);
3325 cache->set(index + kPatternOffset, key_pattern);
3326 cache->set(index + kArrayOffset, value_array);
3329 // If the array is a reasonably short list of substrings, convert it into a
3330 // list of internalized strings.
3331 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3332 for (int i = 0; i < value_array->length(); i++) {
3333 String* str = String::cast(value_array->get(i));
3334 Object* internalized_str;
3335 MaybeObject* maybe_string = heap->InternalizeString(str);
3336 if (maybe_string->ToObject(&internalized_str)) {
3337 value_array->set(i, internalized_str);
3341 // Convert backing store to a copy-on-write array.
3342 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3346 void RegExpResultsCache::Clear(FixedArray* cache) {
3347 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3348 cache->set(i, Smi::FromInt(0));
3353 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3354 MaybeObject* maybe_obj =
3355 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3360 int Heap::FullSizeNumberStringCacheLength() {
3361 // Compute the size of the number string cache based on the max newspace size.
3362 // The number string cache has a minimum size based on twice the initial cache
3363 // size to ensure that it is bigger after being made 'full size'.
3364 int number_string_cache_size = max_semispace_size_ / 512;
3365 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3366 Min(0x4000, number_string_cache_size));
3367 // There is a string and a number per entry so the length is twice the number
3369 return number_string_cache_size * 2;
3373 void Heap::AllocateFullSizeNumberStringCache() {
3374 // The idea is to have a small number string cache in the snapshot to keep
3375 // boot-time memory usage down. If we expand the number string cache already
3376 // while creating the snapshot then that didn't work out.
3377 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3378 MaybeObject* maybe_obj =
3379 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3381 if (maybe_obj->ToObject(&new_cache)) {
3382 // We don't bother to repopulate the cache with entries from the old cache.
3383 // It will be repopulated soon enough with new strings.
3384 set_number_string_cache(FixedArray::cast(new_cache));
3386 // If allocation fails then we just return without doing anything. It is only
3387 // a cache, so best effort is OK here.
3391 void Heap::FlushNumberStringCache() {
3392 // Flush the number to string cache.
3393 int len = number_string_cache()->length();
3394 for (int i = 0; i < len; i++) {
3395 number_string_cache()->set_undefined(this, i);
3400 static inline int double_get_hash(double d) {
3401 DoubleRepresentation rep(d);
3402 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3406 static inline int smi_get_hash(Smi* smi) {
3407 return smi->value();
3411 Object* Heap::GetNumberStringCache(Object* number) {
3413 int mask = (number_string_cache()->length() >> 1) - 1;
3414 if (number->IsSmi()) {
3415 hash = smi_get_hash(Smi::cast(number)) & mask;
3417 hash = double_get_hash(number->Number()) & mask;
3419 Object* key = number_string_cache()->get(hash * 2);
3420 if (key == number) {
3421 return String::cast(number_string_cache()->get(hash * 2 + 1));
3422 } else if (key->IsHeapNumber() &&
3423 number->IsHeapNumber() &&
3424 key->Number() == number->Number()) {
3425 return String::cast(number_string_cache()->get(hash * 2 + 1));
3427 return undefined_value();
3431 void Heap::SetNumberStringCache(Object* number, String* string) {
3433 int mask = (number_string_cache()->length() >> 1) - 1;
3434 if (number->IsSmi()) {
3435 hash = smi_get_hash(Smi::cast(number)) & mask;
3437 hash = double_get_hash(number->Number()) & mask;
3439 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3440 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3441 // The first time we have a hash collision, we move to the full sized
3442 // number string cache.
3443 AllocateFullSizeNumberStringCache();
3446 number_string_cache()->set(hash * 2, number);
3447 number_string_cache()->set(hash * 2 + 1, string);
3451 MaybeObject* Heap::NumberToString(Object* number,
3452 bool check_number_string_cache,
3453 PretenureFlag pretenure) {
3454 isolate_->counters()->number_to_string_runtime()->Increment();
3455 if (check_number_string_cache) {
3456 Object* cached = GetNumberStringCache(number);
3457 if (cached != undefined_value()) {
3463 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3465 if (number->IsSmi()) {
3466 int num = Smi::cast(number)->value();
3467 str = IntToCString(num, buffer);
3469 double num = HeapNumber::cast(number)->value();
3470 str = DoubleToCString(num, buffer);
3474 MaybeObject* maybe_js_string =
3475 AllocateStringFromOneByte(CStrVector(str), pretenure);
3476 if (maybe_js_string->ToObject(&js_string)) {
3477 SetNumberStringCache(number, String::cast(js_string));
3479 return maybe_js_string;
3483 MaybeObject* Heap::Uint32ToString(uint32_t value,
3484 bool check_number_string_cache) {
3486 MaybeObject* maybe = NumberFromUint32(value);
3487 if (!maybe->To<Object>(&number)) return maybe;
3488 return NumberToString(number, check_number_string_cache);
3492 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3493 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3497 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3498 ExternalArrayType array_type) {
3499 switch (array_type) {
3500 case kExternalByteArray:
3501 return kExternalByteArrayMapRootIndex;
3502 case kExternalUnsignedByteArray:
3503 return kExternalUnsignedByteArrayMapRootIndex;
3504 case kExternalShortArray:
3505 return kExternalShortArrayMapRootIndex;
3506 case kExternalUnsignedShortArray:
3507 return kExternalUnsignedShortArrayMapRootIndex;
3508 case kExternalIntArray:
3509 return kExternalIntArrayMapRootIndex;
3510 case kExternalUnsignedIntArray:
3511 return kExternalUnsignedIntArrayMapRootIndex;
3512 case kExternalFloatArray:
3513 return kExternalFloatArrayMapRootIndex;
3514 case kExternalDoubleArray:
3515 return kExternalDoubleArrayMapRootIndex;
3516 case kExternalPixelArray:
3517 return kExternalPixelArrayMapRootIndex;
3520 return kUndefinedValueRootIndex;
3524 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3525 ElementsKind elementsKind) {
3526 switch (elementsKind) {
3527 case EXTERNAL_BYTE_ELEMENTS:
3528 return kEmptyExternalByteArrayRootIndex;
3529 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3530 return kEmptyExternalUnsignedByteArrayRootIndex;
3531 case EXTERNAL_SHORT_ELEMENTS:
3532 return kEmptyExternalShortArrayRootIndex;
3533 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3534 return kEmptyExternalUnsignedShortArrayRootIndex;
3535 case EXTERNAL_INT_ELEMENTS:
3536 return kEmptyExternalIntArrayRootIndex;
3537 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3538 return kEmptyExternalUnsignedIntArrayRootIndex;
3539 case EXTERNAL_FLOAT_ELEMENTS:
3540 return kEmptyExternalFloatArrayRootIndex;
3541 case EXTERNAL_DOUBLE_ELEMENTS:
3542 return kEmptyExternalDoubleArrayRootIndex;
3543 case EXTERNAL_PIXEL_ELEMENTS:
3544 return kEmptyExternalPixelArrayRootIndex;
3547 return kUndefinedValueRootIndex;
3552 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3553 return ExternalArray::cast(
3554 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3560 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3561 // We need to distinguish the minus zero value and this cannot be
3562 // done after conversion to int. Doing this by comparing bit
3563 // patterns is faster than using fpclassify() et al.
3564 static const DoubleRepresentation minus_zero(-0.0);
3566 DoubleRepresentation rep(value);
3567 if (rep.bits == minus_zero.bits) {
3568 return AllocateHeapNumber(-0.0, pretenure);
3571 int int_value = FastD2I(value);
3572 if (value == int_value && Smi::IsValid(int_value)) {
3573 return Smi::FromInt(int_value);
3576 // Materialize the value in the heap.
3577 return AllocateHeapNumber(value, pretenure);
3581 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3582 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3583 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3584 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3586 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3587 if (!maybe_result->To(&result)) return maybe_result;
3588 result->set_foreign_address(address);
3593 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3594 SharedFunctionInfo* share;
3595 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3596 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3598 // Set pointer fields.
3599 share->set_name(name);
3600 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3601 share->set_code(illegal);
3602 share->set_optimized_code_map(Smi::FromInt(0));
3603 share->set_scope_info(ScopeInfo::Empty(isolate_));
3604 Code* construct_stub =
3605 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3606 share->set_construct_stub(construct_stub);
3607 share->set_instance_class_name(Object_string());
3608 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3609 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3610 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3611 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3612 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3613 share->set_ast_node_count(0);
3614 share->set_counters(0);
3616 // Set integer fields (smi or int, depending on the architecture).
3617 share->set_length(0);
3618 share->set_formal_parameter_count(0);
3619 share->set_expected_nof_properties(0);
3620 share->set_num_literals(0);
3621 share->set_start_position_and_type(0);
3622 share->set_end_position(0);
3623 share->set_function_token_position(0);
3624 // All compiler hints default to false or 0.
3625 share->set_compiler_hints(0);
3626 share->set_opt_count(0);
3632 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3637 Object* stack_trace,
3638 Object* stack_frames) {
3640 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3641 if (!maybe_result->ToObject(&result)) return maybe_result;
3643 JSMessageObject* message = JSMessageObject::cast(result);
3644 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3645 message->initialize_elements();
3646 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3647 message->set_type(type);
3648 message->set_arguments(arguments);
3649 message->set_start_position(start_position);
3650 message->set_end_position(end_position);
3651 message->set_script(script);
3652 message->set_stack_trace(stack_trace);
3653 message->set_stack_frames(stack_frames);
3659 // Returns true for a character in a range. Both limits are inclusive.
3660 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3661 // This makes uses of the the unsigned wraparound.
3662 return character - from <= to - from;
3666 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3671 // Numeric strings have a different hash algorithm not known by
3672 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3673 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3674 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3676 // Now we know the length is 2, we might as well make use of that fact
3677 // when building the new string.
3678 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3680 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3682 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3683 if (!maybe_result->ToObject(&result)) return maybe_result;
3685 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3686 dest[0] = static_cast<uint8_t>(c1);
3687 dest[1] = static_cast<uint8_t>(c2);
3691 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3692 if (!maybe_result->ToObject(&result)) return maybe_result;
3694 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3702 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3703 int first_length = first->length();
3704 if (first_length == 0) {
3708 int second_length = second->length();
3709 if (second_length == 0) {
3713 int length = first_length + second_length;
3715 // Optimization for 2-byte strings often used as keys in a decompression
3716 // dictionary. Check whether we already have the string in the string
3717 // table to prevent creation of many unneccesary strings.
3719 uint16_t c1 = first->Get(0);
3720 uint16_t c2 = second->Get(0);
3721 return MakeOrFindTwoCharacterString(this, c1, c2);
3724 bool first_is_one_byte = first->IsOneByteRepresentation();
3725 bool second_is_one_byte = second->IsOneByteRepresentation();
3726 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3727 // Make sure that an out of memory exception is thrown if the length
3728 // of the new cons string is too large.
3729 if (length > String::kMaxLength || length < 0) {
3730 isolate()->context()->mark_out_of_memory();
3731 return Failure::OutOfMemoryException(0x4);
3734 bool is_one_byte_data_in_two_byte_string = false;
3736 // At least one of the strings uses two-byte representation so we
3737 // can't use the fast case code for short ASCII strings below, but
3738 // we can try to save memory if all chars actually fit in ASCII.
3739 is_one_byte_data_in_two_byte_string =
3740 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3741 if (is_one_byte_data_in_two_byte_string) {
3742 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3746 // If the resulting string is small make a flat string.
3747 if (length < ConsString::kMinLength) {
3748 // Note that neither of the two inputs can be a slice because:
3749 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3750 ASSERT(first->IsFlat());
3751 ASSERT(second->IsFlat());
3754 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3755 if (!maybe_result->ToObject(&result)) return maybe_result;
3757 // Copy the characters into the new object.
3758 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3761 if (first->IsExternalString()) {
3762 src = ExternalAsciiString::cast(first)->GetChars();
3764 src = SeqOneByteString::cast(first)->GetChars();
3766 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3767 // Copy second part.
3768 if (second->IsExternalString()) {
3769 src = ExternalAsciiString::cast(second)->GetChars();
3771 src = SeqOneByteString::cast(second)->GetChars();
3773 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3776 if (is_one_byte_data_in_two_byte_string) {
3778 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3779 if (!maybe_result->ToObject(&result)) return maybe_result;
3781 // Copy the characters into the new object.
3782 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3783 String::WriteToFlat(first, dest, 0, first_length);
3784 String::WriteToFlat(second, dest + first_length, 0, second_length);
3785 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3790 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3791 if (!maybe_result->ToObject(&result)) return maybe_result;
3793 // Copy the characters into the new object.
3794 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3795 String::WriteToFlat(first, dest, 0, first_length);
3796 String::WriteToFlat(second, dest + first_length, 0, second_length);
3801 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3802 cons_ascii_string_map() : cons_string_map();
3805 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3806 if (!maybe_result->ToObject(&result)) return maybe_result;
3809 DisallowHeapAllocation no_gc;
3810 ConsString* cons_string = ConsString::cast(result);
3811 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3812 cons_string->set_length(length);
3813 cons_string->set_hash_field(String::kEmptyHashField);
3814 cons_string->set_first(first, mode);
3815 cons_string->set_second(second, mode);
3820 MaybeObject* Heap::AllocateSubString(String* buffer,
3823 PretenureFlag pretenure) {
3824 int length = end - start;
3826 return empty_string();
3827 } else if (length == 1) {
3828 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3829 } else if (length == 2) {
3830 // Optimization for 2-byte strings often used as keys in a decompression
3831 // dictionary. Check whether we already have the string in the string
3832 // table to prevent creation of many unnecessary strings.
3833 uint16_t c1 = buffer->Get(start);
3834 uint16_t c2 = buffer->Get(start + 1);
3835 return MakeOrFindTwoCharacterString(this, c1, c2);
3838 // Make an attempt to flatten the buffer to reduce access time.
3839 buffer = buffer->TryFlattenGetString();
3841 if (!FLAG_string_slices ||
3842 !buffer->IsFlat() ||
3843 length < SlicedString::kMinLength ||
3844 pretenure == TENURED) {
3846 // WriteToFlat takes care of the case when an indirect string has a
3847 // different encoding from its underlying string. These encodings may
3848 // differ because of externalization.
3849 bool is_one_byte = buffer->IsOneByteRepresentation();
3850 { MaybeObject* maybe_result = is_one_byte
3851 ? AllocateRawOneByteString(length, pretenure)
3852 : AllocateRawTwoByteString(length, pretenure);
3853 if (!maybe_result->ToObject(&result)) return maybe_result;
3855 String* string_result = String::cast(result);
3856 // Copy the characters into the new object.
3858 ASSERT(string_result->IsOneByteRepresentation());
3859 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3860 String::WriteToFlat(buffer, dest, start, end);
3862 ASSERT(string_result->IsTwoByteRepresentation());
3863 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3864 String::WriteToFlat(buffer, dest, start, end);
3869 ASSERT(buffer->IsFlat());
3871 if (FLAG_verify_heap) {
3872 buffer->StringVerify();
3877 // When slicing an indirect string we use its encoding for a newly created
3878 // slice and don't check the encoding of the underlying string. This is safe
3879 // even if the encodings are different because of externalization. If an
3880 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3881 // codes of the underlying string must still fit into ASCII (because
3882 // externalization must not change char codes).
3883 { Map* map = buffer->IsOneByteRepresentation()
3884 ? sliced_ascii_string_map()
3885 : sliced_string_map();
3886 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3887 if (!maybe_result->ToObject(&result)) return maybe_result;
3890 DisallowHeapAllocation no_gc;
3891 SlicedString* sliced_string = SlicedString::cast(result);
3892 sliced_string->set_length(length);
3893 sliced_string->set_hash_field(String::kEmptyHashField);
3894 if (buffer->IsConsString()) {
3895 ConsString* cons = ConsString::cast(buffer);
3896 ASSERT(cons->second()->length() == 0);
3897 sliced_string->set_parent(cons->first());
3898 sliced_string->set_offset(start);
3899 } else if (buffer->IsSlicedString()) {
3900 // Prevent nesting sliced strings.
3901 SlicedString* parent_slice = SlicedString::cast(buffer);
3902 sliced_string->set_parent(parent_slice->parent());
3903 sliced_string->set_offset(start + parent_slice->offset());
3905 sliced_string->set_parent(buffer);
3906 sliced_string->set_offset(start);
3908 ASSERT(sliced_string->parent()->IsSeqString() ||
3909 sliced_string->parent()->IsExternalString());
3914 MaybeObject* Heap::AllocateExternalStringFromAscii(
3915 const ExternalAsciiString::Resource* resource) {
3916 size_t length = resource->length();
3917 if (length > static_cast<size_t>(String::kMaxLength)) {
3918 isolate()->context()->mark_out_of_memory();
3919 return Failure::OutOfMemoryException(0x5);
3922 Map* map = external_ascii_string_map();
3924 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3925 if (!maybe_result->ToObject(&result)) return maybe_result;
3928 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3929 external_string->set_length(static_cast<int>(length));
3930 external_string->set_hash_field(String::kEmptyHashField);
3931 external_string->set_resource(resource);
3937 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3938 const ExternalTwoByteString::Resource* resource) {
3939 size_t length = resource->length();
3940 if (length > static_cast<size_t>(String::kMaxLength)) {
3941 isolate()->context()->mark_out_of_memory();
3942 return Failure::OutOfMemoryException(0x6);
3945 // For small strings we check whether the resource contains only
3946 // one byte characters. If yes, we use a different string map.
3947 static const size_t kOneByteCheckLengthLimit = 32;
3948 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3949 String::IsOneByte(resource->data(), static_cast<int>(length));
3950 Map* map = is_one_byte ?
3951 external_string_with_one_byte_data_map() : external_string_map();
3953 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3954 if (!maybe_result->ToObject(&result)) return maybe_result;
3957 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3958 external_string->set_length(static_cast<int>(length));
3959 external_string->set_hash_field(String::kEmptyHashField);
3960 external_string->set_resource(resource);
3966 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3967 if (code <= String::kMaxOneByteCharCode) {
3968 Object* value = single_character_string_cache()->get(code);
3969 if (value != undefined_value()) return value;
3972 buffer[0] = static_cast<uint8_t>(code);
3974 MaybeObject* maybe_result =
3975 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3977 if (!maybe_result->ToObject(&result)) return maybe_result;
3978 single_character_string_cache()->set(code, result);
3983 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3984 if (!maybe_result->ToObject(&result)) return maybe_result;
3986 String* answer = String::cast(result);
3987 answer->Set(0, code);
3992 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3993 if (length < 0 || length > ByteArray::kMaxLength) {
3994 return Failure::OutOfMemoryException(0x7);
3996 if (pretenure == NOT_TENURED) {
3997 return AllocateByteArray(length);
3999 int size = ByteArray::SizeFor(length);
4001 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4002 ? old_data_space_->AllocateRaw(size)
4003 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4004 if (!maybe_result->ToObject(&result)) return maybe_result;
4007 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4009 reinterpret_cast<ByteArray*>(result)->set_length(length);
4014 MaybeObject* Heap::AllocateByteArray(int length) {
4015 if (length < 0 || length > ByteArray::kMaxLength) {
4016 return Failure::OutOfMemoryException(0x8);
4018 int size = ByteArray::SizeFor(length);
4019 AllocationSpace space =
4020 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4022 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4023 if (!maybe_result->ToObject(&result)) return maybe_result;
4026 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4028 reinterpret_cast<ByteArray*>(result)->set_length(length);
4033 void Heap::CreateFillerObjectAt(Address addr, int size) {
4034 if (size == 0) return;
4035 HeapObject* filler = HeapObject::FromAddress(addr);
4036 if (size == kPointerSize) {
4037 filler->set_map_no_write_barrier(one_pointer_filler_map());
4038 } else if (size == 2 * kPointerSize) {
4039 filler->set_map_no_write_barrier(two_pointer_filler_map());
4041 filler->set_map_no_write_barrier(free_space_map());
4042 FreeSpace::cast(filler)->set_size(size);
4047 MaybeObject* Heap::AllocateExternalArray(int length,
4048 ExternalArrayType array_type,
4049 void* external_pointer,
4050 PretenureFlag pretenure) {
4051 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4053 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4056 if (!maybe_result->ToObject(&result)) return maybe_result;
4059 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4060 MapForExternalArrayType(array_type));
4061 reinterpret_cast<ExternalArray*>(result)->set_length(length);
4062 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4069 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4071 Handle<Object> self_reference,
4073 bool crankshafted) {
4074 // Allocate ByteArray before the Code object, so that we do not risk
4075 // leaving uninitialized Code object (and breaking the heap).
4076 ByteArray* reloc_info;
4077 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4078 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4081 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4082 int obj_size = Code::SizeFor(body_size);
4083 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4084 MaybeObject* maybe_result;
4085 // Large code objects and code objects which should stay at a fixed address
4086 // are allocated in large object space.
4088 bool force_lo_space = obj_size > code_space()->AreaSize();
4089 if (force_lo_space) {
4090 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4092 maybe_result = code_space_->AllocateRaw(obj_size);
4094 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4096 if (immovable && !force_lo_space &&
4097 // Objects on the first page of each space are never moved.
4098 !code_space_->FirstPage()->Contains(result->address())) {
4099 // Discard the first code allocation, which was on a page where it could be
4101 CreateFillerObjectAt(result->address(), obj_size);
4102 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4103 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4106 // Initialize the object
4107 result->set_map_no_write_barrier(code_map());
4108 Code* code = Code::cast(result);
4109 ASSERT(!isolate_->code_range()->exists() ||
4110 isolate_->code_range()->contains(code->address()));
4111 code->set_instruction_size(desc.instr_size);
4112 code->set_relocation_info(reloc_info);
4113 code->set_flags(flags);
4114 if (code->is_call_stub() || code->is_keyed_call_stub()) {
4115 code->set_check_type(RECEIVER_MAP_CHECK);
4117 code->set_is_crankshafted(crankshafted);
4118 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4119 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4120 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4121 code->set_gc_metadata(Smi::FromInt(0));
4122 code->set_ic_age(global_ic_age_);
4123 code->set_prologue_offset(kPrologueOffsetNotSet);
4124 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4125 code->set_marked_for_deoptimization(false);
4127 // Allow self references to created code object by patching the handle to
4128 // point to the newly allocated Code object.
4129 if (!self_reference.is_null()) {
4130 *(self_reference.location()) = code;
4132 // Migrate generated code.
4133 // The generated code can contain Object** values (typically from handles)
4134 // that are dereferenced during the copy to point directly to the actual heap
4135 // objects. These pointers can include references to the code object itself,
4136 // through the self_reference parameter.
4137 code->CopyFrom(desc);
4140 if (FLAG_verify_heap) {
4148 MaybeObject* Heap::CopyCode(Code* code) {
4149 // Allocate an object the same size as the code object.
4150 int obj_size = code->Size();
4151 MaybeObject* maybe_result;
4152 if (obj_size > code_space()->AreaSize()) {
4153 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4155 maybe_result = code_space_->AllocateRaw(obj_size);
4159 if (!maybe_result->ToObject(&result)) return maybe_result;
4161 // Copy code object.
4162 Address old_addr = code->address();
4163 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4164 CopyBlock(new_addr, old_addr, obj_size);
4165 // Relocate the copy.
4166 Code* new_code = Code::cast(result);
4167 ASSERT(!isolate_->code_range()->exists() ||
4168 isolate_->code_range()->contains(code->address()));
4169 new_code->Relocate(new_addr - old_addr);
4174 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4175 // Allocate ByteArray before the Code object, so that we do not risk
4176 // leaving uninitialized Code object (and breaking the heap).
4177 Object* reloc_info_array;
4178 { MaybeObject* maybe_reloc_info_array =
4179 AllocateByteArray(reloc_info.length(), TENURED);
4180 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4181 return maybe_reloc_info_array;
4185 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4187 int new_obj_size = Code::SizeFor(new_body_size);
4189 Address old_addr = code->address();
4191 size_t relocation_offset =
4192 static_cast<size_t>(code->instruction_end() - old_addr);
4194 MaybeObject* maybe_result;
4195 if (new_obj_size > code_space()->AreaSize()) {
4196 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4198 maybe_result = code_space_->AllocateRaw(new_obj_size);
4202 if (!maybe_result->ToObject(&result)) return maybe_result;
4204 // Copy code object.
4205 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4207 // Copy header and instructions.
4208 CopyBytes(new_addr, old_addr, relocation_offset);
4210 Code* new_code = Code::cast(result);
4211 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4213 // Copy patched rinfo.
4214 CopyBytes(new_code->relocation_start(),
4216 static_cast<size_t>(reloc_info.length()));
4218 // Relocate the copy.
4219 ASSERT(!isolate_->code_range()->exists() ||
4220 isolate_->code_range()->contains(code->address()));
4221 new_code->Relocate(new_addr - old_addr);
4224 if (FLAG_verify_heap) {
4232 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4233 Handle<AllocationSite> allocation_site) {
4234 ASSERT(gc_state_ == NOT_IN_GC);
4235 ASSERT(map->instance_type() != MAP_TYPE);
4236 // If allocation failures are disallowed, we may allocate in a different
4237 // space when new space is full and the object is not a large object.
4238 AllocationSpace retry_space =
4239 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4240 int size = map->instance_size() + AllocationMemento::kSize;
4242 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4243 if (!maybe_result->ToObject(&result)) return maybe_result;
4244 // No need for write barrier since object is white and map is in old space.
4245 HeapObject::cast(result)->set_map_no_write_barrier(map);
4246 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4247 reinterpret_cast<Address>(result) + map->instance_size());
4248 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4249 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4254 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4255 ASSERT(gc_state_ == NOT_IN_GC);
4256 ASSERT(map->instance_type() != MAP_TYPE);
4257 // If allocation failures are disallowed, we may allocate in a different
4258 // space when new space is full and the object is not a large object.
4259 AllocationSpace retry_space =
4260 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4261 int size = map->instance_size();
4263 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4264 if (!maybe_result->ToObject(&result)) return maybe_result;
4265 // No need for write barrier since object is white and map is in old space.
4266 HeapObject::cast(result)->set_map_no_write_barrier(map);
4271 void Heap::InitializeFunction(JSFunction* function,
4272 SharedFunctionInfo* shared,
4273 Object* prototype) {
4274 ASSERT(!prototype->IsMap());
4275 function->initialize_properties();
4276 function->initialize_elements();
4277 function->set_shared(shared);
4278 function->set_code(shared->code());
4279 function->set_prototype_or_initial_map(prototype);
4280 function->set_context(undefined_value());
4281 function->set_literals_or_bindings(empty_fixed_array());
4282 function->set_next_function_link(undefined_value());
4286 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4287 // Make sure to use globals from the function's context, since the function
4288 // can be from a different context.
4289 Context* native_context = function->context()->native_context();
4291 if (function->shared()->is_generator()) {
4292 // Generator prototypes can share maps since they don't have "constructor"
4294 new_map = native_context->generator_object_prototype_map();
4296 // Each function prototype gets a fresh map to avoid unwanted sharing of
4297 // maps between prototypes of different constructors.
4298 JSFunction* object_function = native_context->object_function();
4299 ASSERT(object_function->has_initial_map());
4300 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4301 if (!maybe_map->To(&new_map)) return maybe_map;
4305 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4306 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4308 if (!function->shared()->is_generator()) {
4309 MaybeObject* maybe_failure =
4310 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4311 constructor_string(), function, DONT_ENUM);
4312 if (maybe_failure->IsFailure()) return maybe_failure;
4319 MaybeObject* Heap::AllocateFunction(Map* function_map,
4320 SharedFunctionInfo* shared,
4322 PretenureFlag pretenure) {
4323 AllocationSpace space =
4324 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4326 { MaybeObject* maybe_result = Allocate(function_map, space);
4327 if (!maybe_result->ToObject(&result)) return maybe_result;
4329 InitializeFunction(JSFunction::cast(result), shared, prototype);
4334 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4335 // To get fast allocation and map sharing for arguments objects we
4336 // allocate them based on an arguments boilerplate.
4338 JSObject* boilerplate;
4339 int arguments_object_size;
4340 bool strict_mode_callee = callee->IsJSFunction() &&
4341 !JSFunction::cast(callee)->shared()->is_classic_mode();
4342 if (strict_mode_callee) {
4344 isolate()->context()->native_context()->
4345 strict_mode_arguments_boilerplate();
4346 arguments_object_size = kArgumentsObjectSizeStrict;
4349 isolate()->context()->native_context()->arguments_boilerplate();
4350 arguments_object_size = kArgumentsObjectSize;
4353 // This calls Copy directly rather than using Heap::AllocateRaw so we
4354 // duplicate the check here.
4355 ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4357 // Check that the size of the boilerplate matches our
4358 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4359 // on the size being a known constant.
4360 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4362 // Do the allocation.
4364 { MaybeObject* maybe_result =
4365 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4366 if (!maybe_result->ToObject(&result)) return maybe_result;
4369 // Copy the content. The arguments boilerplate doesn't have any
4370 // fields that point to new space so it's safe to skip the write
4372 CopyBlock(HeapObject::cast(result)->address(),
4373 boilerplate->address(),
4374 JSObject::kHeaderSize);
4376 // Set the length property.
4377 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4378 Smi::FromInt(length),
4379 SKIP_WRITE_BARRIER);
4380 // Set the callee property for non-strict mode arguments object only.
4381 if (!strict_mode_callee) {
4382 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4386 // Check the state of the object
4387 ASSERT(JSObject::cast(result)->HasFastProperties());
4388 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4394 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4395 ASSERT(!fun->has_initial_map());
4397 // First create a new map with the size and number of in-object properties
4398 // suggested by the function.
4399 InstanceType instance_type;
4401 int in_object_properties;
4402 if (fun->shared()->is_generator()) {
4403 instance_type = JS_GENERATOR_OBJECT_TYPE;
4404 instance_size = JSGeneratorObject::kSize;
4405 in_object_properties = 0;
4407 instance_type = JS_OBJECT_TYPE;
4408 instance_size = fun->shared()->CalculateInstanceSize();
4409 in_object_properties = fun->shared()->CalculateInObjectProperties();
4412 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4413 if (!maybe_map->To(&map)) return maybe_map;
4415 // Fetch or allocate prototype.
4417 if (fun->has_instance_prototype()) {
4418 prototype = fun->instance_prototype();
4420 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4421 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4423 map->set_inobject_properties(in_object_properties);
4424 map->set_unused_property_fields(in_object_properties);
4425 map->set_prototype(prototype);
4426 ASSERT(map->has_fast_object_elements());
4428 if (!fun->shared()->is_generator()) {
4429 fun->shared()->StartInobjectSlackTracking(map);
4436 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4437 FixedArray* properties,
4439 obj->set_properties(properties);
4440 obj->initialize_elements();
4441 // TODO(1240798): Initialize the object's body using valid initial values
4442 // according to the object's initial map. For example, if the map's
4443 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4444 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4445 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4446 // verification code has to cope with (temporarily) invalid objects. See
4447 // for example, JSArray::JSArrayVerify).
4449 // We cannot always fill with one_pointer_filler_map because objects
4450 // created from API functions expect their internal fields to be initialized
4451 // with undefined_value.
4452 // Pre-allocated fields need to be initialized with undefined_value as well
4453 // so that object accesses before the constructor completes (e.g. in the
4454 // debugger) will not cause a crash.
4455 if (map->constructor()->IsJSFunction() &&
4456 JSFunction::cast(map->constructor())->shared()->
4457 IsInobjectSlackTrackingInProgress()) {
4458 // We might want to shrink the object later.
4459 ASSERT(obj->GetInternalFieldCount() == 0);
4460 filler = Heap::one_pointer_filler_map();
4462 filler = Heap::undefined_value();
4464 obj->InitializeBody(map, Heap::undefined_value(), filler);
4468 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4469 // JSFunctions should be allocated using AllocateFunction to be
4470 // properly initialized.
4471 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4473 // Both types of global objects should be allocated using
4474 // AllocateGlobalObject to be properly initialized.
4475 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4476 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4478 // Allocate the backing storage for the properties.
4479 int prop_size = map->InitialPropertiesLength();
4480 ASSERT(prop_size >= 0);
4482 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4483 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4486 // Allocate the JSObject.
4487 AllocationSpace space =
4488 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4489 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4491 MaybeObject* maybe_obj = Allocate(map, space);
4492 if (!maybe_obj->To(&obj)) return maybe_obj;
4494 // Initialize the JSObject.
4495 InitializeJSObjectFromMap(JSObject::cast(obj),
4496 FixedArray::cast(properties),
4498 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4499 JSObject::cast(obj)->HasExternalArrayElements());
4504 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4505 Handle<AllocationSite> allocation_site) {
4506 // JSFunctions should be allocated using AllocateFunction to be
4507 // properly initialized.
4508 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4510 // Both types of global objects should be allocated using
4511 // AllocateGlobalObject to be properly initialized.
4512 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4513 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4515 // Allocate the backing storage for the properties.
4516 int prop_size = map->InitialPropertiesLength();
4517 ASSERT(prop_size >= 0);
4519 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4520 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4523 // Allocate the JSObject.
4524 AllocationSpace space = NEW_SPACE;
4525 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4527 MaybeObject* maybe_obj =
4528 AllocateWithAllocationSite(map, space, allocation_site);
4529 if (!maybe_obj->To(&obj)) return maybe_obj;
4531 // Initialize the JSObject.
4532 InitializeJSObjectFromMap(JSObject::cast(obj),
4533 FixedArray::cast(properties),
4535 ASSERT(JSObject::cast(obj)->HasFastElements());
4540 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4541 PretenureFlag pretenure) {
4542 // Allocate the initial map if absent.
4543 if (!constructor->has_initial_map()) {
4544 Object* initial_map;
4545 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4546 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4548 constructor->set_initial_map(Map::cast(initial_map));
4549 Map::cast(initial_map)->set_constructor(constructor);
4551 // Allocate the object based on the constructors initial map.
4552 MaybeObject* result = AllocateJSObjectFromMap(
4553 constructor->initial_map(), pretenure);
4555 // Make sure result is NOT a global object if valid.
4556 Object* non_failure;
4557 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4563 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4564 Handle<AllocationSite> allocation_site) {
4565 // Allocate the initial map if absent.
4566 if (!constructor->has_initial_map()) {
4567 Object* initial_map;
4568 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4569 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4571 constructor->set_initial_map(Map::cast(initial_map));
4572 Map::cast(initial_map)->set_constructor(constructor);
4574 // Allocate the object based on the constructors initial map, or the payload
4576 Map* initial_map = constructor->initial_map();
4578 Smi* smi = Smi::cast(allocation_site->transition_info());
4579 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4580 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4581 if (to_kind != initial_map->elements_kind()) {
4582 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4583 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4584 // Possibly alter the mode, since we found an updated elements kind
4585 // in the type info cell.
4586 mode = AllocationSite::GetMode(to_kind);
4589 MaybeObject* result;
4590 if (mode == TRACK_ALLOCATION_SITE) {
4591 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4594 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4597 // Make sure result is NOT a global object if valid.
4598 Object* non_failure;
4599 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4605 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4606 ASSERT(function->shared()->is_generator());
4608 if (function->has_initial_map()) {
4609 map = function->initial_map();
4611 // Allocate the initial map if absent.
4612 MaybeObject* maybe_map = AllocateInitialMap(function);
4613 if (!maybe_map->To(&map)) return maybe_map;
4614 function->set_initial_map(map);
4615 map->set_constructor(function);
4617 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4618 return AllocateJSObjectFromMap(map);
4622 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4623 // Allocate a fresh map. Modules do not have a prototype.
4625 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4626 if (!maybe_map->To(&map)) return maybe_map;
4627 // Allocate the object based on the map.
4629 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4630 if (!maybe_module->To(&module)) return maybe_module;
4631 module->set_context(context);
4632 module->set_scope_info(scope_info);
4637 MaybeObject* Heap::AllocateJSArrayAndStorage(
4638 ElementsKind elements_kind,
4641 ArrayStorageAllocationMode mode,
4642 PretenureFlag pretenure) {
4643 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4645 if (!maybe_array->To(&array)) return maybe_array;
4647 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4648 // for performance reasons.
4649 ASSERT(capacity >= length);
4651 if (capacity == 0) {
4652 array->set_length(Smi::FromInt(0));
4653 array->set_elements(empty_fixed_array());
4657 FixedArrayBase* elms;
4658 MaybeObject* maybe_elms = NULL;
4659 if (IsFastDoubleElementsKind(elements_kind)) {
4660 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4661 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4663 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4664 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4667 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4668 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4669 maybe_elms = AllocateUninitializedFixedArray(capacity);
4671 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4672 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4675 if (!maybe_elms->To(&elms)) return maybe_elms;
4677 array->set_elements(elms);
4678 array->set_length(Smi::FromInt(length));
4683 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4684 ElementsKind elements_kind,
4687 Handle<AllocationSite> allocation_site,
4688 ArrayStorageAllocationMode mode) {
4689 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4692 if (!maybe_array->To(&array)) return maybe_array;
4693 return AllocateJSArrayStorage(array, length, capacity, mode);
4697 MaybeObject* Heap::AllocateJSArrayStorage(
4701 ArrayStorageAllocationMode mode) {
4702 ASSERT(capacity >= length);
4704 if (capacity == 0) {
4705 array->set_length(Smi::FromInt(0));
4706 array->set_elements(empty_fixed_array());
4710 FixedArrayBase* elms;
4711 MaybeObject* maybe_elms = NULL;
4712 ElementsKind elements_kind = array->GetElementsKind();
4713 if (IsFastDoubleElementsKind(elements_kind)) {
4714 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4715 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4717 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4718 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4721 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4722 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4723 maybe_elms = AllocateUninitializedFixedArray(capacity);
4725 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4726 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4729 if (!maybe_elms->To(&elms)) return maybe_elms;
4731 array->set_elements(elms);
4732 array->set_length(Smi::FromInt(length));
4737 MaybeObject* Heap::AllocateJSArrayWithElements(
4738 FixedArrayBase* elements,
4739 ElementsKind elements_kind,
4741 PretenureFlag pretenure) {
4742 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4744 if (!maybe_array->To(&array)) return maybe_array;
4746 array->set_elements(elements);
4747 array->set_length(Smi::FromInt(length));
4748 array->ValidateElements();
4753 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4755 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4756 // maps. Will probably depend on the identity of the handler object, too.
4758 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4759 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4760 map->set_prototype(prototype);
4762 // Allocate the proxy object.
4764 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4765 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4766 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4767 result->set_handler(handler);
4768 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4773 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4775 Object* construct_trap,
4776 Object* prototype) {
4778 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4779 // maps. Will probably depend on the identity of the handler object, too.
4781 MaybeObject* maybe_map_obj =
4782 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4783 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4784 map->set_prototype(prototype);
4786 // Allocate the proxy object.
4787 JSFunctionProxy* result;
4788 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4789 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4790 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4791 result->set_handler(handler);
4792 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4793 result->set_call_trap(call_trap);
4794 result->set_construct_trap(construct_trap);
4799 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4800 ASSERT(constructor->has_initial_map());
4801 Map* map = constructor->initial_map();
4802 ASSERT(map->is_dictionary_map());
4804 // Make sure no field properties are described in the initial map.
4805 // This guarantees us that normalizing the properties does not
4806 // require us to change property values to PropertyCells.
4807 ASSERT(map->NextFreePropertyIndex() == 0);
4809 // Make sure we don't have a ton of pre-allocated slots in the
4810 // global objects. They will be unused once we normalize the object.
4811 ASSERT(map->unused_property_fields() == 0);
4812 ASSERT(map->inobject_properties() == 0);
4814 // Initial size of the backing store to avoid resize of the storage during
4815 // bootstrapping. The size differs between the JS global object ad the
4817 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4819 // Allocate a dictionary object for backing storage.
4820 NameDictionary* dictionary;
4821 MaybeObject* maybe_dictionary =
4822 NameDictionary::Allocate(
4824 map->NumberOfOwnDescriptors() * 2 + initial_size);
4825 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4827 // The global object might be created from an object template with accessors.
4828 // Fill these accessors into the dictionary.
4829 DescriptorArray* descs = map->instance_descriptors();
4830 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4831 PropertyDetails details = descs->GetDetails(i);
4832 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4833 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4834 Object* value = descs->GetCallbacksObject(i);
4835 MaybeObject* maybe_value = AllocatePropertyCell(value);
4836 if (!maybe_value->ToObject(&value)) return maybe_value;
4838 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4839 if (!maybe_added->To(&dictionary)) return maybe_added;
4842 // Allocate the global object and initialize it with the backing store.
4844 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4845 if (!maybe_global->To(&global)) return maybe_global;
4847 InitializeJSObjectFromMap(global, dictionary, map);
4849 // Create a new map for the global object.
4851 MaybeObject* maybe_map = map->CopyDropDescriptors();
4852 if (!maybe_map->To(&new_map)) return maybe_map;
4853 new_map->set_dictionary_map(true);
4855 // Set up the global object as a normalized object.
4856 global->set_map(new_map);
4857 global->set_properties(dictionary);
4859 // Make sure result is a global object with properties in dictionary.
4860 ASSERT(global->IsGlobalObject());
4861 ASSERT(!global->HasFastProperties());
4866 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4867 // Never used to copy functions. If functions need to be copied we
4868 // have to be careful to clear the literals array.
4869 SLOW_ASSERT(!source->IsJSFunction());
4872 Map* map = source->map();
4873 int object_size = map->instance_size();
4876 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4878 // If we're forced to always allocate, we use the general allocation
4879 // functions which may leave us with an object in old space.
4880 if (always_allocate()) {
4881 { MaybeObject* maybe_clone =
4882 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4883 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4885 Address clone_address = HeapObject::cast(clone)->address();
4886 CopyBlock(clone_address,
4889 // Update write barrier for all fields that lie beyond the header.
4890 RecordWrites(clone_address,
4891 JSObject::kHeaderSize,
4892 (object_size - JSObject::kHeaderSize) / kPointerSize);
4894 wb_mode = SKIP_WRITE_BARRIER;
4896 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4897 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4899 SLOW_ASSERT(InNewSpace(clone));
4900 // Since we know the clone is allocated in new space, we can copy
4901 // the contents without worrying about updating the write barrier.
4902 CopyBlock(HeapObject::cast(clone)->address(),
4908 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4909 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4910 FixedArray* properties = FixedArray::cast(source->properties());
4911 // Update elements if necessary.
4912 if (elements->length() > 0) {
4914 { MaybeObject* maybe_elem;
4915 if (elements->map() == fixed_cow_array_map()) {
4916 maybe_elem = FixedArray::cast(elements);
4917 } else if (source->HasFastDoubleElements()) {
4918 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4920 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4922 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4924 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4926 // Update properties if necessary.
4927 if (properties->length() > 0) {
4929 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4930 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4932 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4934 // Return the new clone.
4939 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4941 AllocationSite* site) {
4942 // Never used to copy functions. If functions need to be copied we
4943 // have to be careful to clear the literals array.
4944 SLOW_ASSERT(!source->IsJSFunction());
4947 Map* map = source->map();
4948 int object_size = map->instance_size();
4951 ASSERT(map->CanTrackAllocationSite());
4952 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4953 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4955 // If we're forced to always allocate, we use the general allocation
4956 // functions which may leave us with an object in old space.
4957 int adjusted_object_size = object_size;
4958 if (always_allocate()) {
4959 // We'll only track origin if we are certain to allocate in new space
4960 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4961 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4962 adjusted_object_size += AllocationMemento::kSize;
4965 { MaybeObject* maybe_clone =
4966 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4967 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4969 Address clone_address = HeapObject::cast(clone)->address();
4970 CopyBlock(clone_address,
4973 // Update write barrier for all fields that lie beyond the header.
4974 int write_barrier_offset = adjusted_object_size > object_size
4975 ? JSArray::kSize + AllocationMemento::kSize
4976 : JSObject::kHeaderSize;
4977 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4978 RecordWrites(clone_address,
4979 write_barrier_offset,
4980 (object_size - write_barrier_offset) / kPointerSize);
4983 // Track allocation site information, if we failed to allocate it inline.
4984 if (InNewSpace(clone) &&
4985 adjusted_object_size == object_size) {
4986 MaybeObject* maybe_alloc_memento =
4987 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4988 AllocationMemento* alloc_memento;
4989 if (maybe_alloc_memento->To(&alloc_memento)) {
4990 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4991 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4995 wb_mode = SKIP_WRITE_BARRIER;
4996 adjusted_object_size += AllocationMemento::kSize;
4998 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4999 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5001 SLOW_ASSERT(InNewSpace(clone));
5002 // Since we know the clone is allocated in new space, we can copy
5003 // the contents without worrying about updating the write barrier.
5004 CopyBlock(HeapObject::cast(clone)->address(),
5009 if (adjusted_object_size > object_size) {
5010 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5011 reinterpret_cast<Address>(clone) + object_size);
5012 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5013 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5017 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5018 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5019 FixedArray* properties = FixedArray::cast(source->properties());
5020 // Update elements if necessary.
5021 if (elements->length() > 0) {
5023 { MaybeObject* maybe_elem;
5024 if (elements->map() == fixed_cow_array_map()) {
5025 maybe_elem = FixedArray::cast(elements);
5026 } else if (source->HasFastDoubleElements()) {
5027 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5029 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5031 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5033 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5035 // Update properties if necessary.
5036 if (properties->length() > 0) {
5038 { MaybeObject* maybe_prop = CopyFixedArray(properties);
5039 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5041 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5043 // Return the new clone.
5048 MaybeObject* Heap::ReinitializeJSReceiver(
5049 JSReceiver* object, InstanceType type, int size) {
5050 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5052 // Allocate fresh map.
5053 // TODO(rossberg): Once we optimize proxies, cache these maps.
5055 MaybeObject* maybe = AllocateMap(type, size);
5056 if (!maybe->To<Map>(&map)) return maybe;
5058 // Check that the receiver has at least the size of the fresh object.
5059 int size_difference = object->map()->instance_size() - map->instance_size();
5060 ASSERT(size_difference >= 0);
5062 map->set_prototype(object->map()->prototype());
5064 // Allocate the backing storage for the properties.
5065 int prop_size = map->unused_property_fields() - map->inobject_properties();
5067 maybe = AllocateFixedArray(prop_size, TENURED);
5068 if (!maybe->ToObject(&properties)) return maybe;
5070 // Functions require some allocation, which might fail here.
5071 SharedFunctionInfo* shared = NULL;
5072 if (type == JS_FUNCTION_TYPE) {
5075 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5076 if (!maybe->To<String>(&name)) return maybe;
5077 maybe = AllocateSharedFunctionInfo(name);
5078 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5081 // Because of possible retries of this function after failure,
5082 // we must NOT fail after this point, where we have changed the type!
5084 // Reset the map for the object.
5085 object->set_map(map);
5086 JSObject* jsobj = JSObject::cast(object);
5088 // Reinitialize the object from the constructor map.
5089 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5091 // Functions require some minimal initialization.
5092 if (type == JS_FUNCTION_TYPE) {
5093 map->set_function_with_prototype(true);
5094 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5095 JSFunction::cast(object)->set_context(
5096 isolate()->context()->native_context());
5099 // Put in filler if the new object is smaller than the old.
5100 if (size_difference > 0) {
5101 CreateFillerObjectAt(
5102 object->address() + map->instance_size(), size_difference);
5109 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5110 JSGlobalProxy* object) {
5111 ASSERT(constructor->has_initial_map());
5112 Map* map = constructor->initial_map();
5114 // Check that the already allocated object has the same size and type as
5115 // objects allocated using the constructor.
5116 ASSERT(map->instance_size() == object->map()->instance_size());
5117 ASSERT(map->instance_type() == object->map()->instance_type());
5119 // Allocate the backing storage for the properties.
5120 int prop_size = map->unused_property_fields() - map->inobject_properties();
5122 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5123 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5126 // Reset the map for the object.
5127 object->set_map(constructor->initial_map());
5129 // Reinitialize the object from the constructor map.
5130 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5135 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5136 PretenureFlag pretenure) {
5137 int length = string.length();
5139 return Heap::LookupSingleCharacterStringFromCode(string[0]);
5142 { MaybeObject* maybe_result =
5143 AllocateRawOneByteString(string.length(), pretenure);
5144 if (!maybe_result->ToObject(&result)) return maybe_result;
5147 // Copy the characters into the new object.
5148 CopyChars(SeqOneByteString::cast(result)->GetChars(),
5155 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5156 int non_ascii_start,
5157 PretenureFlag pretenure) {
5158 // Continue counting the number of characters in the UTF-8 string, starting
5159 // from the first non-ascii character or word.
5160 Access<UnicodeCache::Utf8Decoder>
5161 decoder(isolate_->unicode_cache()->utf8_decoder());
5162 decoder->Reset(string.start() + non_ascii_start,
5163 string.length() - non_ascii_start);
5164 int utf16_length = decoder->Utf16Length();
5165 ASSERT(utf16_length > 0);
5169 int chars = non_ascii_start + utf16_length;
5170 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5171 if (!maybe_result->ToObject(&result)) return maybe_result;
5173 // Convert and copy the characters into the new object.
5174 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5175 // Copy ascii portion.
5176 uint16_t* data = twobyte->GetChars();
5177 if (non_ascii_start != 0) {
5178 const char* ascii_data = string.start();
5179 for (int i = 0; i < non_ascii_start; i++) {
5180 *data++ = *ascii_data++;
5183 // Now write the remainder.
5184 decoder->WriteUtf16(data, utf16_length);
5189 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5190 PretenureFlag pretenure) {
5191 // Check if the string is an ASCII string.
5193 int length = string.length();
5194 const uc16* start = string.start();
5196 if (String::IsOneByte(start, length)) {
5197 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5198 if (!maybe_result->ToObject(&result)) return maybe_result;
5199 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5200 } else { // It's not a one byte string.
5201 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5202 if (!maybe_result->ToObject(&result)) return maybe_result;
5203 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5209 Map* Heap::InternalizedStringMapForString(String* string) {
5210 // If the string is in new space it cannot be used as internalized.
5211 if (InNewSpace(string)) return NULL;
5213 // Find the corresponding internalized string map for strings.
5214 switch (string->map()->instance_type()) {
5215 case STRING_TYPE: return internalized_string_map();
5216 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5217 case CONS_STRING_TYPE: return cons_internalized_string_map();
5218 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5219 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5220 case EXTERNAL_ASCII_STRING_TYPE:
5221 return external_ascii_internalized_string_map();
5222 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5223 return external_internalized_string_with_one_byte_data_map();
5224 case SHORT_EXTERNAL_STRING_TYPE:
5225 return short_external_internalized_string_map();
5226 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5227 return short_external_ascii_internalized_string_map();
5228 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5229 return short_external_internalized_string_with_one_byte_data_map();
5230 default: return NULL; // No match found.
5235 static inline void WriteOneByteData(Vector<const char> vector,
5238 // Only works for ascii.
5239 ASSERT(vector.length() == len);
5240 OS::MemCopy(chars, vector.start(), len);
5243 static inline void WriteTwoByteData(Vector<const char> vector,
5246 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5247 unsigned stream_length = vector.length();
5248 while (stream_length != 0) {
5249 unsigned consumed = 0;
5250 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5251 ASSERT(c != unibrow::Utf8::kBadChar);
5252 ASSERT(consumed <= stream_length);
5253 stream_length -= consumed;
5255 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5258 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5259 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5266 ASSERT(stream_length == 0);
5271 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5272 ASSERT(s->length() == len);
5273 String::WriteToFlat(s, chars, 0, len);
5277 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5278 ASSERT(s->length() == len);
5279 String::WriteToFlat(s, chars, 0, len);
5283 template<bool is_one_byte, typename T>
5284 MaybeObject* Heap::AllocateInternalizedStringImpl(
5285 T t, int chars, uint32_t hash_field) {
5287 // Compute map and object size.
5292 if (chars > SeqOneByteString::kMaxLength) {
5293 return Failure::OutOfMemoryException(0x9);
5295 map = ascii_internalized_string_map();
5296 size = SeqOneByteString::SizeFor(chars);
5298 if (chars > SeqTwoByteString::kMaxLength) {
5299 return Failure::OutOfMemoryException(0xa);
5301 map = internalized_string_map();
5302 size = SeqTwoByteString::SizeFor(chars);
5307 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5308 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5309 : old_data_space_->AllocateRaw(size);
5310 if (!maybe_result->ToObject(&result)) return maybe_result;
5313 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5314 // Set length and hash fields of the allocated string.
5315 String* answer = String::cast(result);
5316 answer->set_length(chars);
5317 answer->set_hash_field(hash_field);
5319 ASSERT_EQ(size, answer->Size());
5322 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5324 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5330 // Need explicit instantiations.
5332 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5334 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5335 String*, int, uint32_t);
5337 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5338 Vector<const char>, int, uint32_t);
5341 MaybeObject* Heap::AllocateRawOneByteString(int length,
5342 PretenureFlag pretenure) {
5343 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5344 return Failure::OutOfMemoryException(0xb);
5346 int size = SeqOneByteString::SizeFor(length);
5347 ASSERT(size <= SeqOneByteString::kMaxSize);
5348 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5349 AllocationSpace retry_space = OLD_DATA_SPACE;
5351 if (size > Page::kMaxNonCodeHeapObjectSize) {
5352 // Allocate in large object space, retry space will be ignored.
5357 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5358 if (!maybe_result->ToObject(&result)) return maybe_result;
5361 // Partially initialize the object.
5362 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5363 String::cast(result)->set_length(length);
5364 String::cast(result)->set_hash_field(String::kEmptyHashField);
5365 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5371 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5372 PretenureFlag pretenure) {
5373 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5374 return Failure::OutOfMemoryException(0xc);
5376 int size = SeqTwoByteString::SizeFor(length);
5377 ASSERT(size <= SeqTwoByteString::kMaxSize);
5378 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5379 AllocationSpace retry_space = OLD_DATA_SPACE;
5381 if (size > Page::kMaxNonCodeHeapObjectSize) {
5382 // Allocate in large object space, retry space will be ignored.
5387 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5388 if (!maybe_result->ToObject(&result)) return maybe_result;
5391 // Partially initialize the object.
5392 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5393 String::cast(result)->set_length(length);
5394 String::cast(result)->set_hash_field(String::kEmptyHashField);
5395 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5400 MaybeObject* Heap::AllocateJSArray(
5401 ElementsKind elements_kind,
5402 PretenureFlag pretenure) {
5403 Context* native_context = isolate()->context()->native_context();
5404 JSFunction* array_function = native_context->array_function();
5405 Map* map = array_function->initial_map();
5406 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5407 if (transition_map != NULL) map = transition_map;
5408 return AllocateJSObjectFromMap(map, pretenure);
5412 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5413 ElementsKind elements_kind,
5414 Handle<AllocationSite> allocation_site) {
5415 Context* native_context = isolate()->context()->native_context();
5416 JSFunction* array_function = native_context->array_function();
5417 Map* map = array_function->initial_map();
5418 Object* maybe_map_array = native_context->js_array_maps();
5419 if (!maybe_map_array->IsUndefined()) {
5420 Object* maybe_transitioned_map =
5421 FixedArray::cast(maybe_map_array)->get(elements_kind);
5422 if (!maybe_transitioned_map->IsUndefined()) {
5423 map = Map::cast(maybe_transitioned_map);
5426 return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5430 MaybeObject* Heap::AllocateEmptyFixedArray() {
5431 int size = FixedArray::SizeFor(0);
5433 { MaybeObject* maybe_result =
5434 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5435 if (!maybe_result->ToObject(&result)) return maybe_result;
5437 // Initialize the object.
5438 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5440 reinterpret_cast<FixedArray*>(result)->set_length(0);
5445 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5446 return AllocateExternalArray(0, array_type, NULL, TENURED);
5450 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5451 if (length < 0 || length > FixedArray::kMaxLength) {
5452 return Failure::OutOfMemoryException(0xd);
5455 // Use the general function if we're forced to always allocate.
5456 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5457 // Allocate the raw data for a fixed array.
5458 int size = FixedArray::SizeFor(length);
5459 return size <= Page::kMaxNonCodeHeapObjectSize
5460 ? new_space_.AllocateRaw(size)
5461 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5465 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5466 int len = src->length();
5468 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5469 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5471 if (InNewSpace(obj)) {
5472 HeapObject* dst = HeapObject::cast(obj);
5473 dst->set_map_no_write_barrier(map);
5474 CopyBlock(dst->address() + kPointerSize,
5475 src->address() + kPointerSize,
5476 FixedArray::SizeFor(len) - kPointerSize);
5479 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5480 FixedArray* result = FixedArray::cast(obj);
5481 result->set_length(len);
5484 DisallowHeapAllocation no_gc;
5485 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5486 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5491 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5493 int len = src->length();
5495 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5496 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5498 HeapObject* dst = HeapObject::cast(obj);
5499 dst->set_map_no_write_barrier(map);
5501 dst->address() + FixedDoubleArray::kLengthOffset,
5502 src->address() + FixedDoubleArray::kLengthOffset,
5503 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5508 MaybeObject* Heap::AllocateFixedArray(int length) {
5509 ASSERT(length >= 0);
5510 if (length == 0) return empty_fixed_array();
5512 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5513 if (!maybe_result->ToObject(&result)) return maybe_result;
5515 // Initialize header.
5516 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5517 array->set_map_no_write_barrier(fixed_array_map());
5518 array->set_length(length);
5520 ASSERT(!InNewSpace(undefined_value()));
5521 MemsetPointer(array->data_start(), undefined_value(), length);
5526 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5527 if (length < 0 || length > FixedArray::kMaxLength) {
5528 return Failure::OutOfMemoryException(0xe);
5530 int size = FixedArray::SizeFor(length);
5531 AllocationSpace space =
5532 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5533 AllocationSpace retry_space = OLD_POINTER_SPACE;
5535 if (size > Page::kMaxNonCodeHeapObjectSize) {
5536 // Allocate in large object space, retry space will be ignored.
5540 return AllocateRaw(size, space, retry_space);
5544 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5547 PretenureFlag pretenure,
5549 ASSERT(length >= 0);
5550 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5551 if (length == 0) return heap->empty_fixed_array();
5553 ASSERT(!heap->InNewSpace(filler));
5555 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5556 if (!maybe_result->ToObject(&result)) return maybe_result;
5559 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5560 FixedArray* array = FixedArray::cast(result);
5561 array->set_length(length);
5562 MemsetPointer(array->data_start(), filler, length);
5567 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5568 return AllocateFixedArrayWithFiller(this,
5575 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5576 PretenureFlag pretenure) {
5577 return AllocateFixedArrayWithFiller(this,
5584 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5585 if (length == 0) return empty_fixed_array();
5588 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5589 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5592 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5594 FixedArray::cast(obj)->set_length(length);
5599 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5600 int size = FixedDoubleArray::SizeFor(0);
5602 { MaybeObject* maybe_result =
5603 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5604 if (!maybe_result->ToObject(&result)) return maybe_result;
5606 // Initialize the object.
5607 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5608 fixed_double_array_map());
5609 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5614 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5616 PretenureFlag pretenure) {
5617 if (length == 0) return empty_fixed_array();
5619 Object* elements_object;
5620 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5621 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5622 FixedDoubleArray* elements =
5623 reinterpret_cast<FixedDoubleArray*>(elements_object);
5625 elements->set_map_no_write_barrier(fixed_double_array_map());
5626 elements->set_length(length);
5631 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5633 PretenureFlag pretenure) {
5634 if (length == 0) return empty_fixed_array();
5636 Object* elements_object;
5637 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5638 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5639 FixedDoubleArray* elements =
5640 reinterpret_cast<FixedDoubleArray*>(elements_object);
5642 for (int i = 0; i < length; ++i) {
5643 elements->set_the_hole(i);
5646 elements->set_map_no_write_barrier(fixed_double_array_map());
5647 elements->set_length(length);
5652 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5653 PretenureFlag pretenure) {
5654 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5655 return Failure::OutOfMemoryException(0xf);
5657 int size = FixedDoubleArray::SizeFor(length);
5658 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5659 AllocationSpace retry_space = OLD_DATA_SPACE;
5661 #ifndef V8_HOST_ARCH_64_BIT
5662 size += kPointerSize;
5665 if (size > Page::kMaxNonCodeHeapObjectSize) {
5666 // Allocate in large object space, retry space will be ignored.
5671 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5672 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5675 return EnsureDoubleAligned(this, object, size);
5679 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5681 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5682 if (!maybe_result->ToObject(&result)) return maybe_result;
5684 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5686 ASSERT(result->IsHashTable());
5691 MaybeObject* Heap::AllocateSymbol() {
5692 // Statically ensure that it is safe to allocate symbols in paged spaces.
5693 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5696 MaybeObject* maybe =
5697 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5698 if (!maybe->ToObject(&result)) return maybe;
5700 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5702 // Generate a random hash value.
5706 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5708 } while (hash == 0 && attempts < 30);
5709 if (hash == 0) hash = 1; // never return 0
5711 Symbol::cast(result)->set_hash_field(
5712 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5713 Symbol::cast(result)->set_name(undefined_value());
5715 ASSERT(result->IsSymbol());
5720 MaybeObject* Heap::AllocateNativeContext() {
5722 { MaybeObject* maybe_result =
5723 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5724 if (!maybe_result->ToObject(&result)) return maybe_result;
5726 Context* context = reinterpret_cast<Context*>(result);
5727 context->set_map_no_write_barrier(native_context_map());
5728 context->set_js_array_maps(undefined_value());
5729 ASSERT(context->IsNativeContext());
5730 ASSERT(result->IsContext());
5735 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5736 ScopeInfo* scope_info) {
5738 { MaybeObject* maybe_result =
5739 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5740 if (!maybe_result->ToObject(&result)) return maybe_result;
5742 Context* context = reinterpret_cast<Context*>(result);
5743 context->set_map_no_write_barrier(global_context_map());
5744 context->set_closure(function);
5745 context->set_previous(function->context());
5746 context->set_extension(scope_info);
5747 context->set_global_object(function->context()->global_object());
5748 ASSERT(context->IsGlobalContext());
5749 ASSERT(result->IsContext());
5754 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5756 { MaybeObject* maybe_result =
5757 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5758 if (!maybe_result->ToObject(&result)) return maybe_result;
5760 Context* context = reinterpret_cast<Context*>(result);
5761 context->set_map_no_write_barrier(module_context_map());
5762 // Instance link will be set later.
5763 context->set_extension(Smi::FromInt(0));
5768 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5769 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5771 { MaybeObject* maybe_result = AllocateFixedArray(length);
5772 if (!maybe_result->ToObject(&result)) return maybe_result;
5774 Context* context = reinterpret_cast<Context*>(result);
5775 context->set_map_no_write_barrier(function_context_map());
5776 context->set_closure(function);
5777 context->set_previous(function->context());
5778 context->set_extension(Smi::FromInt(0));
5779 context->set_global_object(function->context()->global_object());
5784 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5787 Object* thrown_object) {
5788 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5790 { MaybeObject* maybe_result =
5791 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5792 if (!maybe_result->ToObject(&result)) return maybe_result;
5794 Context* context = reinterpret_cast<Context*>(result);
5795 context->set_map_no_write_barrier(catch_context_map());
5796 context->set_closure(function);
5797 context->set_previous(previous);
5798 context->set_extension(name);
5799 context->set_global_object(previous->global_object());
5800 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5805 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5807 JSReceiver* extension) {
5809 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5810 if (!maybe_result->ToObject(&result)) return maybe_result;
5812 Context* context = reinterpret_cast<Context*>(result);
5813 context->set_map_no_write_barrier(with_context_map());
5814 context->set_closure(function);
5815 context->set_previous(previous);
5816 context->set_extension(extension);
5817 context->set_global_object(previous->global_object());
5822 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5824 ScopeInfo* scope_info) {
5826 { MaybeObject* maybe_result =
5827 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5828 if (!maybe_result->ToObject(&result)) return maybe_result;
5830 Context* context = reinterpret_cast<Context*>(result);
5831 context->set_map_no_write_barrier(block_context_map());
5832 context->set_closure(function);
5833 context->set_previous(previous);
5834 context->set_extension(scope_info);
5835 context->set_global_object(previous->global_object());
5840 MaybeObject* Heap::AllocateScopeInfo(int length) {
5841 FixedArray* scope_info;
5842 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5843 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5844 scope_info->set_map_no_write_barrier(scope_info_map());
5849 MaybeObject* Heap::AllocateExternal(void* value) {
5851 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5852 if (!maybe_result->To(&foreign)) return maybe_result;
5855 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5856 if (!maybe_result->To(&external)) return maybe_result;
5858 external->SetInternalField(0, foreign);
5863 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5866 #define MAKE_CASE(NAME, Name, name) \
5867 case NAME##_TYPE: map = name##_map(); break;
5868 STRUCT_LIST(MAKE_CASE)
5872 return Failure::InternalError();
5874 int size = map->instance_size();
5875 AllocationSpace space =
5876 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5878 { MaybeObject* maybe_result = Allocate(map, space);
5879 if (!maybe_result->ToObject(&result)) return maybe_result;
5881 Struct::cast(result)->InitializeBody(size);
5886 bool Heap::IsHeapIterable() {
5887 return (!old_pointer_space()->was_swept_conservatively() &&
5888 !old_data_space()->was_swept_conservatively());
5892 void Heap::EnsureHeapIsIterable() {
5893 ASSERT(AllowHeapAllocation::IsAllowed());
5894 if (!IsHeapIterable()) {
5895 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5897 ASSERT(IsHeapIterable());
5901 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5902 incremental_marking()->Step(step_size,
5903 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5905 if (incremental_marking()->IsComplete()) {
5906 bool uncommit = false;
5907 if (gc_count_at_last_idle_gc_ == gc_count_) {
5908 // No GC since the last full GC, the mutator is probably not active.
5909 isolate_->compilation_cache()->Clear();
5912 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5913 mark_sweeps_since_idle_round_started_++;
5914 gc_count_at_last_idle_gc_ = gc_count_;
5916 new_space_.Shrink();
5917 UncommitFromSpace();
5923 bool Heap::IdleNotification(int hint) {
5924 // Hints greater than this value indicate that
5925 // the embedder is requesting a lot of GC work.
5926 const int kMaxHint = 1000;
5927 const int kMinHintForIncrementalMarking = 10;
5928 // Minimal hint that allows to do full GC.
5929 const int kMinHintForFullGC = 100;
5930 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5931 // The size factor is in range [5..250]. The numbers here are chosen from
5932 // experiments. If you changes them, make sure to test with
5933 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5934 intptr_t step_size =
5935 size_factor * IncrementalMarking::kAllocatedThreshold;
5937 if (contexts_disposed_ > 0) {
5938 if (hint >= kMaxHint) {
5939 // The embedder is requesting a lot of GC work after context disposal,
5940 // we age inline caches so that they don't keep objects from
5941 // the old context alive.
5944 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5945 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5946 incremental_marking()->IsStopped()) {
5947 HistogramTimerScope scope(isolate_->counters()->gc_context());
5948 CollectAllGarbage(kReduceMemoryFootprintMask,
5949 "idle notification: contexts disposed");
5951 AdvanceIdleIncrementalMarking(step_size);
5952 contexts_disposed_ = 0;
5954 // After context disposal there is likely a lot of garbage remaining, reset
5955 // the idle notification counters in order to trigger more incremental GCs
5956 // on subsequent idle notifications.
5961 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5962 return IdleGlobalGC();
5965 // By doing small chunks of GC work in each IdleNotification,
5966 // perform a round of incremental GCs and after that wait until
5967 // the mutator creates enough garbage to justify a new round.
5968 // An incremental GC progresses as follows:
5969 // 1. many incremental marking steps,
5970 // 2. one old space mark-sweep-compact,
5971 // 3. many lazy sweep steps.
5972 // Use mark-sweep-compact events to count incremental GCs in a round.
5974 if (incremental_marking()->IsStopped()) {
5975 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5976 !IsSweepingComplete() &&
5977 !AdvanceSweepers(static_cast<int>(step_size))) {
5982 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5983 if (EnoughGarbageSinceLastIdleRound()) {
5990 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5991 mark_sweeps_since_idle_round_started_;
5993 if (incremental_marking()->IsStopped()) {
5994 // If there are no more than two GCs left in this idle round and we are
5995 // allowed to do a full GC, then make those GCs full in order to compact
5997 // TODO(ulan): Once we enable code compaction for incremental marking,
5998 // we can get rid of this special case and always start incremental marking.
5999 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6000 CollectAllGarbage(kReduceMemoryFootprintMask,
6001 "idle notification: finalize idle round");
6002 mark_sweeps_since_idle_round_started_++;
6003 } else if (hint > kMinHintForIncrementalMarking) {
6004 incremental_marking()->Start();
6007 if (!incremental_marking()->IsStopped() &&
6008 hint > kMinHintForIncrementalMarking) {
6009 AdvanceIdleIncrementalMarking(step_size);
6012 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6021 bool Heap::IdleGlobalGC() {
6022 static const int kIdlesBeforeScavenge = 4;
6023 static const int kIdlesBeforeMarkSweep = 7;
6024 static const int kIdlesBeforeMarkCompact = 8;
6025 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6026 static const unsigned int kGCsBetweenCleanup = 4;
6028 if (!last_idle_notification_gc_count_init_) {
6029 last_idle_notification_gc_count_ = gc_count_;
6030 last_idle_notification_gc_count_init_ = true;
6033 bool uncommit = true;
6034 bool finished = false;
6036 // Reset the number of idle notifications received when a number of
6037 // GCs have taken place. This allows another round of cleanup based
6038 // on idle notifications if enough work has been carried out to
6039 // provoke a number of garbage collections.
6040 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6041 number_idle_notifications_ =
6042 Min(number_idle_notifications_ + 1, kMaxIdleCount);
6044 number_idle_notifications_ = 0;
6045 last_idle_notification_gc_count_ = gc_count_;
6048 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6049 CollectGarbage(NEW_SPACE, "idle notification");
6050 new_space_.Shrink();
6051 last_idle_notification_gc_count_ = gc_count_;
6052 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6053 // Before doing the mark-sweep collections we clear the
6054 // compilation cache to avoid hanging on to source code and
6055 // generated code for cached functions.
6056 isolate_->compilation_cache()->Clear();
6058 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6059 new_space_.Shrink();
6060 last_idle_notification_gc_count_ = gc_count_;
6062 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6063 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6064 new_space_.Shrink();
6065 last_idle_notification_gc_count_ = gc_count_;
6066 number_idle_notifications_ = 0;
6068 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6069 // If we have received more than kIdlesBeforeMarkCompact idle
6070 // notifications we do not perform any cleanup because we don't
6071 // expect to gain much by doing so.
6075 if (uncommit) UncommitFromSpace();
6083 void Heap::Print() {
6084 if (!HasBeenSetUp()) return;
6085 isolate()->PrintStack(stdout);
6086 AllSpaces spaces(this);
6087 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6093 void Heap::ReportCodeStatistics(const char* title) {
6094 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6095 PagedSpace::ResetCodeStatistics();
6096 // We do not look for code in new space, map space, or old space. If code
6097 // somehow ends up in those spaces, we would miss it here.
6098 code_space_->CollectCodeStatistics();
6099 lo_space_->CollectCodeStatistics();
6100 PagedSpace::ReportCodeStatistics();
6104 // This function expects that NewSpace's allocated objects histogram is
6105 // populated (via a call to CollectStatistics or else as a side effect of a
6106 // just-completed scavenge collection).
6107 void Heap::ReportHeapStatistics(const char* title) {
6109 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6111 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6112 old_generation_allocation_limit_);
6115 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6116 isolate_->global_handles()->PrintStats();
6119 PrintF("Heap statistics : ");
6120 isolate_->memory_allocator()->ReportStatistics();
6121 PrintF("To space : ");
6122 new_space_.ReportStatistics();
6123 PrintF("Old pointer space : ");
6124 old_pointer_space_->ReportStatistics();
6125 PrintF("Old data space : ");
6126 old_data_space_->ReportStatistics();
6127 PrintF("Code space : ");
6128 code_space_->ReportStatistics();
6129 PrintF("Map space : ");
6130 map_space_->ReportStatistics();
6131 PrintF("Cell space : ");
6132 cell_space_->ReportStatistics();
6133 PrintF("PropertyCell space : ");
6134 property_cell_space_->ReportStatistics();
6135 PrintF("Large object space : ");
6136 lo_space_->ReportStatistics();
6137 PrintF(">>>>>> ========================================= >>>>>>\n");
6142 bool Heap::Contains(HeapObject* value) {
6143 return Contains(value->address());
6147 bool Heap::Contains(Address addr) {
6148 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6149 return HasBeenSetUp() &&
6150 (new_space_.ToSpaceContains(addr) ||
6151 old_pointer_space_->Contains(addr) ||
6152 old_data_space_->Contains(addr) ||
6153 code_space_->Contains(addr) ||
6154 map_space_->Contains(addr) ||
6155 cell_space_->Contains(addr) ||
6156 property_cell_space_->Contains(addr) ||
6157 lo_space_->SlowContains(addr));
6161 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6162 return InSpace(value->address(), space);
6166 bool Heap::InSpace(Address addr, AllocationSpace space) {
6167 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6168 if (!HasBeenSetUp()) return false;
6172 return new_space_.ToSpaceContains(addr);
6173 case OLD_POINTER_SPACE:
6174 return old_pointer_space_->Contains(addr);
6175 case OLD_DATA_SPACE:
6176 return old_data_space_->Contains(addr);
6178 return code_space_->Contains(addr);
6180 return map_space_->Contains(addr);
6182 return cell_space_->Contains(addr);
6183 case PROPERTY_CELL_SPACE:
6184 return property_cell_space_->Contains(addr);
6186 return lo_space_->SlowContains(addr);
6194 void Heap::Verify() {
6195 CHECK(HasBeenSetUp());
6197 store_buffer()->Verify();
6199 VerifyPointersVisitor visitor;
6200 IterateRoots(&visitor, VISIT_ONLY_STRONG);
6202 new_space_.Verify();
6204 old_pointer_space_->Verify(&visitor);
6205 map_space_->Verify(&visitor);
6207 VerifyPointersVisitor no_dirty_regions_visitor;
6208 old_data_space_->Verify(&no_dirty_regions_visitor);
6209 code_space_->Verify(&no_dirty_regions_visitor);
6210 cell_space_->Verify(&no_dirty_regions_visitor);
6211 property_cell_space_->Verify(&no_dirty_regions_visitor);
6213 lo_space_->Verify();
6218 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6219 Object* result = NULL;
6221 { MaybeObject* maybe_new_table =
6222 string_table()->LookupUtf8String(string, &result);
6223 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6225 // Can't use set_string_table because StringTable::cast knows that
6226 // StringTable is a singleton and checks for identity.
6227 roots_[kStringTableRootIndex] = new_table;
6228 ASSERT(result != NULL);
6233 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6234 Object* result = NULL;
6236 { MaybeObject* maybe_new_table =
6237 string_table()->LookupOneByteString(string, &result);
6238 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6240 // Can't use set_string_table because StringTable::cast knows that
6241 // StringTable is a singleton and checks for identity.
6242 roots_[kStringTableRootIndex] = new_table;
6243 ASSERT(result != NULL);
6248 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6251 Object* result = NULL;
6253 { MaybeObject* maybe_new_table =
6254 string_table()->LookupSubStringOneByteString(string,
6258 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6260 // Can't use set_string_table because StringTable::cast knows that
6261 // StringTable is a singleton and checks for identity.
6262 roots_[kStringTableRootIndex] = new_table;
6263 ASSERT(result != NULL);
6268 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6269 Object* result = NULL;
6271 { MaybeObject* maybe_new_table =
6272 string_table()->LookupTwoByteString(string, &result);
6273 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6275 // Can't use set_string_table because StringTable::cast knows that
6276 // StringTable is a singleton and checks for identity.
6277 roots_[kStringTableRootIndex] = new_table;
6278 ASSERT(result != NULL);
6283 MaybeObject* Heap::InternalizeString(String* string) {
6284 if (string->IsInternalizedString()) return string;
6285 Object* result = NULL;
6287 { MaybeObject* maybe_new_table =
6288 string_table()->LookupString(string, &result);
6289 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6291 // Can't use set_string_table because StringTable::cast knows that
6292 // StringTable is a singleton and checks for identity.
6293 roots_[kStringTableRootIndex] = new_table;
6294 ASSERT(result != NULL);
6299 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6300 if (string->IsInternalizedString()) {
6304 return string_table()->LookupStringIfExists(string, result);
6308 void Heap::ZapFromSpace() {
6309 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6310 new_space_.FromSpaceEnd());
6311 while (it.has_next()) {
6312 NewSpacePage* page = it.next();
6313 for (Address cursor = page->area_start(), limit = page->area_end();
6315 cursor += kPointerSize) {
6316 Memory::Address_at(cursor) = kFromSpaceZapValue;
6322 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6324 ObjectSlotCallback callback) {
6325 Address slot_address = start;
6327 // We are not collecting slots on new space objects during mutation
6328 // thus we have to scan for pointers to evacuation candidates when we
6329 // promote objects. But we should not record any slots in non-black
6330 // objects. Grey object's slots would be rescanned.
6331 // White object might not survive until the end of collection
6332 // it would be a violation of the invariant to record it's slots.
6333 bool record_slots = false;
6334 if (incremental_marking()->IsCompacting()) {
6335 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6336 record_slots = Marking::IsBlack(mark_bit);
6339 while (slot_address < end) {
6340 Object** slot = reinterpret_cast<Object**>(slot_address);
6341 Object* object = *slot;
6342 // If the store buffer becomes overfull we mark pages as being exempt from
6343 // the store buffer. These pages are scanned to find pointers that point
6344 // to the new space. In that case we may hit newly promoted objects and
6345 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6346 if (object->IsHeapObject()) {
6347 if (Heap::InFromSpace(object)) {
6348 callback(reinterpret_cast<HeapObject**>(slot),
6349 HeapObject::cast(object));
6350 Object* new_object = *slot;
6351 if (InNewSpace(new_object)) {
6352 SLOW_ASSERT(Heap::InToSpace(new_object));
6353 SLOW_ASSERT(new_object->IsHeapObject());
6354 store_buffer_.EnterDirectlyIntoStoreBuffer(
6355 reinterpret_cast<Address>(slot));
6357 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6358 } else if (record_slots &&
6359 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6360 mark_compact_collector()->RecordSlot(slot, slot, object);
6363 slot_address += kPointerSize;
6369 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6372 bool IsAMapPointerAddress(Object** addr) {
6373 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6374 int mod = a % Map::kSize;
6375 return mod >= Map::kPointerFieldsBeginOffset &&
6376 mod < Map::kPointerFieldsEndOffset;
6380 bool EverythingsAPointer(Object** addr) {
6385 static void CheckStoreBuffer(Heap* heap,
6388 Object**** store_buffer_position,
6389 Object*** store_buffer_top,
6390 CheckStoreBufferFilter filter,
6391 Address special_garbage_start,
6392 Address special_garbage_end) {
6393 Map* free_space_map = heap->free_space_map();
6394 for ( ; current < limit; current++) {
6395 Object* o = *current;
6396 Address current_address = reinterpret_cast<Address>(current);
6398 if (o == free_space_map) {
6399 Address current_address = reinterpret_cast<Address>(current);
6400 FreeSpace* free_space =
6401 FreeSpace::cast(HeapObject::FromAddress(current_address));
6402 int skip = free_space->Size();
6403 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6405 current_address += skip - kPointerSize;
6406 current = reinterpret_cast<Object**>(current_address);
6409 // Skip the current linear allocation space between top and limit which is
6410 // unmarked with the free space map, but can contain junk.
6411 if (current_address == special_garbage_start &&
6412 special_garbage_end != special_garbage_start) {
6413 current_address = special_garbage_end - kPointerSize;
6414 current = reinterpret_cast<Object**>(current_address);
6417 if (!(*filter)(current)) continue;
6418 ASSERT(current_address < special_garbage_start ||
6419 current_address >= special_garbage_end);
6420 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6421 // We have to check that the pointer does not point into new space
6422 // without trying to cast it to a heap object since the hash field of
6423 // a string can contain values like 1 and 3 which are tagged null
6425 if (!heap->InNewSpace(o)) continue;
6426 while (**store_buffer_position < current &&
6427 *store_buffer_position < store_buffer_top) {
6428 (*store_buffer_position)++;
6430 if (**store_buffer_position != current ||
6431 *store_buffer_position == store_buffer_top) {
6432 Object** obj_start = current;
6433 while (!(*obj_start)->IsMap()) obj_start--;
6440 // Check that the store buffer contains all intergenerational pointers by
6441 // scanning a page and ensuring that all pointers to young space are in the
6443 void Heap::OldPointerSpaceCheckStoreBuffer() {
6444 OldSpace* space = old_pointer_space();
6445 PageIterator pages(space);
6447 store_buffer()->SortUniq();
6449 while (pages.has_next()) {
6450 Page* page = pages.next();
6451 Object** current = reinterpret_cast<Object**>(page->area_start());
6453 Address end = page->area_end();
6455 Object*** store_buffer_position = store_buffer()->Start();
6456 Object*** store_buffer_top = store_buffer()->Top();
6458 Object** limit = reinterpret_cast<Object**>(end);
6459 CheckStoreBuffer(this,
6462 &store_buffer_position,
6464 &EverythingsAPointer,
6471 void Heap::MapSpaceCheckStoreBuffer() {
6472 MapSpace* space = map_space();
6473 PageIterator pages(space);
6475 store_buffer()->SortUniq();
6477 while (pages.has_next()) {
6478 Page* page = pages.next();
6479 Object** current = reinterpret_cast<Object**>(page->area_start());
6481 Address end = page->area_end();
6483 Object*** store_buffer_position = store_buffer()->Start();
6484 Object*** store_buffer_top = store_buffer()->Top();
6486 Object** limit = reinterpret_cast<Object**>(end);
6487 CheckStoreBuffer(this,
6490 &store_buffer_position,
6492 &IsAMapPointerAddress,
6499 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6500 LargeObjectIterator it(lo_space());
6501 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6502 // We only have code, sequential strings, or fixed arrays in large
6503 // object space, and only fixed arrays can possibly contain pointers to
6504 // the young generation.
6505 if (object->IsFixedArray()) {
6506 Object*** store_buffer_position = store_buffer()->Start();
6507 Object*** store_buffer_top = store_buffer()->Top();
6508 Object** current = reinterpret_cast<Object**>(object->address());
6510 reinterpret_cast<Object**>(object->address() + object->Size());
6511 CheckStoreBuffer(this,
6514 &store_buffer_position,
6516 &EverythingsAPointer,
6525 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6526 IterateStrongRoots(v, mode);
6527 IterateWeakRoots(v, mode);
6531 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6532 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6533 v->Synchronize(VisitorSynchronization::kStringTable);
6534 if (mode != VISIT_ALL_IN_SCAVENGE &&
6535 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6536 // Scavenge collections have special processing for this.
6537 external_string_table_.Iterate(v);
6539 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6543 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6544 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6545 v->Synchronize(VisitorSynchronization::kStrongRootList);
6547 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6548 v->Synchronize(VisitorSynchronization::kInternalizedString);
6550 isolate_->bootstrapper()->Iterate(v);
6551 v->Synchronize(VisitorSynchronization::kBootstrapper);
6552 isolate_->Iterate(v);
6553 v->Synchronize(VisitorSynchronization::kTop);
6554 Relocatable::Iterate(v);
6555 v->Synchronize(VisitorSynchronization::kRelocatable);
6557 #ifdef ENABLE_DEBUGGER_SUPPORT
6558 isolate_->debug()->Iterate(v);
6559 if (isolate_->deoptimizer_data() != NULL) {
6560 isolate_->deoptimizer_data()->Iterate(v);
6563 v->Synchronize(VisitorSynchronization::kDebug);
6564 isolate_->compilation_cache()->Iterate(v);
6565 v->Synchronize(VisitorSynchronization::kCompilationCache);
6567 // Iterate over local handles in handle scopes.
6568 isolate_->handle_scope_implementer()->Iterate(v);
6569 isolate_->IterateDeferredHandles(v);
6570 v->Synchronize(VisitorSynchronization::kHandleScope);
6572 // Iterate over the builtin code objects and code stubs in the
6573 // heap. Note that it is not necessary to iterate over code objects
6574 // on scavenge collections.
6575 if (mode != VISIT_ALL_IN_SCAVENGE) {
6576 isolate_->builtins()->IterateBuiltins(v);
6578 v->Synchronize(VisitorSynchronization::kBuiltins);
6580 // Iterate over global handles.
6582 case VISIT_ONLY_STRONG:
6583 isolate_->global_handles()->IterateStrongRoots(v);
6585 case VISIT_ALL_IN_SCAVENGE:
6586 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6588 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6590 isolate_->global_handles()->IterateAllRoots(v);
6593 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6595 // Iterate over eternal handles.
6596 if (mode == VISIT_ALL_IN_SCAVENGE) {
6597 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6599 isolate_->eternal_handles()->IterateAllRoots(v);
6601 v->Synchronize(VisitorSynchronization::kEternalHandles);
6603 // Iterate over pointers being held by inactive threads.
6604 isolate_->thread_manager()->Iterate(v);
6605 v->Synchronize(VisitorSynchronization::kThreadManager);
6607 // Iterate over the pointers the Serialization/Deserialization code is
6609 // During garbage collection this keeps the partial snapshot cache alive.
6610 // During deserialization of the startup snapshot this creates the partial
6611 // snapshot cache and deserializes the objects it refers to. During
6612 // serialization this does nothing, since the partial snapshot cache is
6613 // empty. However the next thing we do is create the partial snapshot,
6614 // filling up the partial snapshot cache with objects it needs as we go.
6615 SerializerDeserializer::Iterate(v);
6616 // We don't do a v->Synchronize call here, because in debug mode that will
6617 // output a flag to the snapshot. However at this point the serializer and
6618 // deserializer are deliberately a little unsynchronized (see above) so the
6619 // checking of the sync flag in the snapshot would fail.
6623 // TODO(1236194): Since the heap size is configurable on the command line
6624 // and through the API, we should gracefully handle the case that the heap
6625 // size is not big enough to fit all the initial objects.
6626 bool Heap::ConfigureHeap(int max_semispace_size,
6627 intptr_t max_old_gen_size,
6628 intptr_t max_executable_size) {
6629 if (HasBeenSetUp()) return false;
6631 if (FLAG_stress_compaction) {
6632 // This will cause more frequent GCs when stressing.
6633 max_semispace_size_ = Page::kPageSize;
6636 if (max_semispace_size > 0) {
6637 if (max_semispace_size < Page::kPageSize) {
6638 max_semispace_size = Page::kPageSize;
6639 if (FLAG_trace_gc) {
6640 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6641 Page::kPageSize >> 10);
6644 max_semispace_size_ = max_semispace_size;
6647 if (Snapshot::IsEnabled()) {
6648 // If we are using a snapshot we always reserve the default amount
6649 // of memory for each semispace because code in the snapshot has
6650 // write-barrier code that relies on the size and alignment of new
6651 // space. We therefore cannot use a larger max semispace size
6652 // than the default reserved semispace size.
6653 if (max_semispace_size_ > reserved_semispace_size_) {
6654 max_semispace_size_ = reserved_semispace_size_;
6655 if (FLAG_trace_gc) {
6656 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6657 reserved_semispace_size_ >> 10);
6661 // If we are not using snapshots we reserve space for the actual
6662 // max semispace size.
6663 reserved_semispace_size_ = max_semispace_size_;
6666 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6667 if (max_executable_size > 0) {
6668 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6671 // The max executable size must be less than or equal to the max old
6673 if (max_executable_size_ > max_old_generation_size_) {
6674 max_executable_size_ = max_old_generation_size_;
6677 // The new space size must be a power of two to support single-bit testing
6679 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6680 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6681 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6683 // The external allocation limit should be below 256 MB on all architectures
6684 // to avoid unnecessary low memory notifications, as that is the threshold
6685 // for some embedders.
6686 external_allocation_limit_ = 12 * max_semispace_size_;
6687 ASSERT(external_allocation_limit_ <= 256 * MB);
6689 // The old generation is paged and needs at least one page for each space.
6690 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6691 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6693 RoundUp(max_old_generation_size_,
6701 bool Heap::ConfigureHeapDefault() {
6702 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6703 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6704 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6708 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6709 *stats->start_marker = HeapStats::kStartMarker;
6710 *stats->end_marker = HeapStats::kEndMarker;
6711 *stats->new_space_size = new_space_.SizeAsInt();
6712 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6713 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6714 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6715 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6716 *stats->old_data_space_capacity = old_data_space_->Capacity();
6717 *stats->code_space_size = code_space_->SizeOfObjects();
6718 *stats->code_space_capacity = code_space_->Capacity();
6719 *stats->map_space_size = map_space_->SizeOfObjects();
6720 *stats->map_space_capacity = map_space_->Capacity();
6721 *stats->cell_space_size = cell_space_->SizeOfObjects();
6722 *stats->cell_space_capacity = cell_space_->Capacity();
6723 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6724 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6725 *stats->lo_space_size = lo_space_->Size();
6726 isolate_->global_handles()->RecordStats(stats);
6727 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6728 *stats->memory_allocator_capacity =
6729 isolate()->memory_allocator()->Size() +
6730 isolate()->memory_allocator()->Available();
6731 *stats->os_error = OS::GetLastError();
6732 isolate()->memory_allocator()->Available();
6733 if (take_snapshot) {
6734 HeapIterator iterator(this);
6735 for (HeapObject* obj = iterator.next();
6737 obj = iterator.next()) {
6738 InstanceType type = obj->map()->instance_type();
6739 ASSERT(0 <= type && type <= LAST_TYPE);
6740 stats->objects_per_type[type]++;
6741 stats->size_per_type[type] += obj->Size();
6747 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6748 return old_pointer_space_->SizeOfObjects()
6749 + old_data_space_->SizeOfObjects()
6750 + code_space_->SizeOfObjects()
6751 + map_space_->SizeOfObjects()
6752 + cell_space_->SizeOfObjects()
6753 + property_cell_space_->SizeOfObjects()
6754 + lo_space_->SizeOfObjects();
6758 intptr_t Heap::PromotedExternalMemorySize() {
6759 if (amount_of_external_allocated_memory_
6760 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6761 return amount_of_external_allocated_memory_
6762 - amount_of_external_allocated_memory_at_last_global_gc_;
6766 V8_DECLARE_ONCE(initialize_gc_once);
6768 static void InitializeGCOnce() {
6769 InitializeScavengingVisitorsTables();
6770 NewSpaceScavenger::Initialize();
6771 MarkCompactCollector::Initialize();
6775 bool Heap::SetUp() {
6777 allocation_timeout_ = FLAG_gc_interval;
6780 // Initialize heap spaces and initial maps and objects. Whenever something
6781 // goes wrong, just return false. The caller should check the results and
6782 // call Heap::TearDown() to release allocated memory.
6784 // If the heap is not yet configured (e.g. through the API), configure it.
6785 // Configuration is based on the flags new-space-size (really the semispace
6786 // size) and old-space-size if set or the initial values of semispace_size_
6787 // and old_generation_size_ otherwise.
6789 if (!ConfigureHeapDefault()) return false;
6792 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6794 MarkMapPointersAsEncoded(false);
6796 // Set up memory allocator.
6797 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6800 // Set up new space.
6801 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6805 // Initialize old pointer space.
6806 old_pointer_space_ =
6808 max_old_generation_size_,
6811 if (old_pointer_space_ == NULL) return false;
6812 if (!old_pointer_space_->SetUp()) return false;
6814 // Initialize old data space.
6817 max_old_generation_size_,
6820 if (old_data_space_ == NULL) return false;
6821 if (!old_data_space_->SetUp()) return false;
6823 // Initialize the code space, set its maximum capacity to the old
6824 // generation size. It needs executable memory.
6825 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6826 // virtual address space, so that they can call each other with near calls.
6827 if (code_range_size_ > 0) {
6828 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6834 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6835 if (code_space_ == NULL) return false;
6836 if (!code_space_->SetUp()) return false;
6838 // Initialize map space.
6839 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6840 if (map_space_ == NULL) return false;
6841 if (!map_space_->SetUp()) return false;
6843 // Initialize simple cell space.
6844 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6845 if (cell_space_ == NULL) return false;
6846 if (!cell_space_->SetUp()) return false;
6848 // Initialize global property cell space.
6849 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6850 PROPERTY_CELL_SPACE);
6851 if (property_cell_space_ == NULL) return false;
6852 if (!property_cell_space_->SetUp()) return false;
6854 // The large object code space may contain code or data. We set the memory
6855 // to be non-executable here for safety, but this means we need to enable it
6856 // explicitly when allocating large code objects.
6857 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6858 if (lo_space_ == NULL) return false;
6859 if (!lo_space_->SetUp()) return false;
6861 // Set up the seed that is used to randomize the string hash function.
6862 ASSERT(hash_seed() == 0);
6863 if (FLAG_randomize_hashes) {
6864 if (FLAG_hash_seed == 0) {
6866 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6868 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6872 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6873 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6875 store_buffer()->SetUp();
6877 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6879 relocation_mutex_locked_by_optimizer_thread_ = false;
6886 bool Heap::CreateHeapObjects() {
6887 // Create initial maps.
6888 if (!CreateInitialMaps()) return false;
6889 if (!CreateApiObjects()) return false;
6891 // Create initial objects
6892 if (!CreateInitialObjects()) return false;
6894 native_contexts_list_ = undefined_value();
6895 array_buffers_list_ = undefined_value();
6896 allocation_sites_list_ = undefined_value();
6901 void Heap::SetStackLimits() {
6902 ASSERT(isolate_ != NULL);
6903 ASSERT(isolate_ == isolate());
6904 // On 64 bit machines, pointers are generally out of range of Smis. We write
6905 // something that looks like an out of range Smi to the GC.
6907 // Set up the special root array entries containing the stack limits.
6908 // These are actually addresses, but the tag makes the GC ignore it.
6909 roots_[kStackLimitRootIndex] =
6910 reinterpret_cast<Object*>(
6911 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6912 roots_[kRealStackLimitRootIndex] =
6913 reinterpret_cast<Object*>(
6914 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6918 void Heap::TearDown() {
6920 if (FLAG_verify_heap) {
6925 if (FLAG_print_cumulative_gc_stat) {
6927 PrintF("gc_count=%d ", gc_count_);
6928 PrintF("mark_sweep_count=%d ", ms_count_);
6929 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6930 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6931 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6932 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6933 get_max_alive_after_gc());
6934 PrintF("total_marking_time=%.1f ", marking_time());
6935 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6939 TearDownArrayBuffers();
6941 isolate_->global_handles()->TearDown();
6943 external_string_table_.TearDown();
6945 mark_compact_collector()->TearDown();
6947 new_space_.TearDown();
6949 if (old_pointer_space_ != NULL) {
6950 old_pointer_space_->TearDown();
6951 delete old_pointer_space_;
6952 old_pointer_space_ = NULL;
6955 if (old_data_space_ != NULL) {
6956 old_data_space_->TearDown();
6957 delete old_data_space_;
6958 old_data_space_ = NULL;
6961 if (code_space_ != NULL) {
6962 code_space_->TearDown();
6967 if (map_space_ != NULL) {
6968 map_space_->TearDown();
6973 if (cell_space_ != NULL) {
6974 cell_space_->TearDown();
6979 if (property_cell_space_ != NULL) {
6980 property_cell_space_->TearDown();
6981 delete property_cell_space_;
6982 property_cell_space_ = NULL;
6985 if (lo_space_ != NULL) {
6986 lo_space_->TearDown();
6991 store_buffer()->TearDown();
6992 incremental_marking()->TearDown();
6994 isolate_->memory_allocator()->TearDown();
6996 delete relocation_mutex_;
7000 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7001 ASSERT(callback != NULL);
7002 GCPrologueCallbackPair pair(callback, gc_type);
7003 ASSERT(!gc_prologue_callbacks_.Contains(pair));
7004 return gc_prologue_callbacks_.Add(pair);
7008 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7009 ASSERT(callback != NULL);
7010 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7011 if (gc_prologue_callbacks_[i].callback == callback) {
7012 gc_prologue_callbacks_.Remove(i);
7020 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7021 ASSERT(callback != NULL);
7022 GCEpilogueCallbackPair pair(callback, gc_type);
7023 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7024 return gc_epilogue_callbacks_.Add(pair);
7028 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7029 ASSERT(callback != NULL);
7030 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7031 if (gc_epilogue_callbacks_[i].callback == callback) {
7032 gc_epilogue_callbacks_.Remove(i);
7042 class PrintHandleVisitor: public ObjectVisitor {
7044 void VisitPointers(Object** start, Object** end) {
7045 for (Object** p = start; p < end; p++)
7046 PrintF(" handle %p to %p\n",
7047 reinterpret_cast<void*>(p),
7048 reinterpret_cast<void*>(*p));
7053 void Heap::PrintHandles() {
7054 PrintF("Handles:\n");
7055 PrintHandleVisitor v;
7056 isolate_->handle_scope_implementer()->Iterate(&v);
7062 Space* AllSpaces::next() {
7063 switch (counter_++) {
7065 return heap_->new_space();
7066 case OLD_POINTER_SPACE:
7067 return heap_->old_pointer_space();
7068 case OLD_DATA_SPACE:
7069 return heap_->old_data_space();
7071 return heap_->code_space();
7073 return heap_->map_space();
7075 return heap_->cell_space();
7076 case PROPERTY_CELL_SPACE:
7077 return heap_->property_cell_space();
7079 return heap_->lo_space();
7086 PagedSpace* PagedSpaces::next() {
7087 switch (counter_++) {
7088 case OLD_POINTER_SPACE:
7089 return heap_->old_pointer_space();
7090 case OLD_DATA_SPACE:
7091 return heap_->old_data_space();
7093 return heap_->code_space();
7095 return heap_->map_space();
7097 return heap_->cell_space();
7098 case PROPERTY_CELL_SPACE:
7099 return heap_->property_cell_space();
7107 OldSpace* OldSpaces::next() {
7108 switch (counter_++) {
7109 case OLD_POINTER_SPACE:
7110 return heap_->old_pointer_space();
7111 case OLD_DATA_SPACE:
7112 return heap_->old_data_space();
7114 return heap_->code_space();
7121 SpaceIterator::SpaceIterator(Heap* heap)
7123 current_space_(FIRST_SPACE),
7129 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7131 current_space_(FIRST_SPACE),
7133 size_func_(size_func) {
7137 SpaceIterator::~SpaceIterator() {
7138 // Delete active iterator if any.
7143 bool SpaceIterator::has_next() {
7144 // Iterate until no more spaces.
7145 return current_space_ != LAST_SPACE;
7149 ObjectIterator* SpaceIterator::next() {
7150 if (iterator_ != NULL) {
7153 // Move to the next space
7155 if (current_space_ > LAST_SPACE) {
7160 // Return iterator for the new current space.
7161 return CreateIterator();
7165 // Create an iterator for the space to iterate.
7166 ObjectIterator* SpaceIterator::CreateIterator() {
7167 ASSERT(iterator_ == NULL);
7169 switch (current_space_) {
7171 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7173 case OLD_POINTER_SPACE:
7175 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7177 case OLD_DATA_SPACE:
7178 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7181 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7184 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7187 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7189 case PROPERTY_CELL_SPACE:
7190 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7194 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7198 // Return the newly allocated iterator;
7199 ASSERT(iterator_ != NULL);
7204 class HeapObjectsFilter {
7206 virtual ~HeapObjectsFilter() {}
7207 virtual bool SkipObject(HeapObject* object) = 0;
7211 class UnreachableObjectsFilter : public HeapObjectsFilter {
7213 UnreachableObjectsFilter() {
7214 MarkReachableObjects();
7217 ~UnreachableObjectsFilter() {
7218 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7221 bool SkipObject(HeapObject* object) {
7222 MarkBit mark_bit = Marking::MarkBitFrom(object);
7223 return !mark_bit.Get();
7227 class MarkingVisitor : public ObjectVisitor {
7229 MarkingVisitor() : marking_stack_(10) {}
7231 void VisitPointers(Object** start, Object** end) {
7232 for (Object** p = start; p < end; p++) {
7233 if (!(*p)->IsHeapObject()) continue;
7234 HeapObject* obj = HeapObject::cast(*p);
7235 MarkBit mark_bit = Marking::MarkBitFrom(obj);
7236 if (!mark_bit.Get()) {
7238 marking_stack_.Add(obj);
7243 void TransitiveClosure() {
7244 while (!marking_stack_.is_empty()) {
7245 HeapObject* obj = marking_stack_.RemoveLast();
7251 List<HeapObject*> marking_stack_;
7254 void MarkReachableObjects() {
7255 Heap* heap = Isolate::Current()->heap();
7256 MarkingVisitor visitor;
7257 heap->IterateRoots(&visitor, VISIT_ALL);
7258 visitor.TransitiveClosure();
7261 DisallowHeapAllocation no_allocation_;
7265 HeapIterator::HeapIterator(Heap* heap)
7267 filtering_(HeapIterator::kNoFiltering),
7273 HeapIterator::HeapIterator(Heap* heap,
7274 HeapIterator::HeapObjectsFiltering filtering)
7276 filtering_(filtering),
7282 HeapIterator::~HeapIterator() {
7287 void HeapIterator::Init() {
7288 // Start the iteration.
7289 space_iterator_ = new SpaceIterator(heap_);
7290 switch (filtering_) {
7291 case kFilterUnreachable:
7292 filter_ = new UnreachableObjectsFilter;
7297 object_iterator_ = space_iterator_->next();
7301 void HeapIterator::Shutdown() {
7303 // Assert that in filtering mode we have iterated through all
7304 // objects. Otherwise, heap will be left in an inconsistent state.
7305 if (filtering_ != kNoFiltering) {
7306 ASSERT(object_iterator_ == NULL);
7309 // Make sure the last iterator is deallocated.
7310 delete space_iterator_;
7311 space_iterator_ = NULL;
7312 object_iterator_ = NULL;
7318 HeapObject* HeapIterator::next() {
7319 if (filter_ == NULL) return NextObject();
7321 HeapObject* obj = NextObject();
7322 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7327 HeapObject* HeapIterator::NextObject() {
7328 // No iterator means we are done.
7329 if (object_iterator_ == NULL) return NULL;
7331 if (HeapObject* obj = object_iterator_->next_object()) {
7332 // If the current iterator has more objects we are fine.
7335 // Go though the spaces looking for one that has objects.
7336 while (space_iterator_->has_next()) {
7337 object_iterator_ = space_iterator_->next();
7338 if (HeapObject* obj = object_iterator_->next_object()) {
7343 // Done with the last space.
7344 object_iterator_ = NULL;
7349 void HeapIterator::reset() {
7350 // Restart the iterator.
7358 Object* const PathTracer::kAnyGlobalObject = NULL;
7360 class PathTracer::MarkVisitor: public ObjectVisitor {
7362 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7363 void VisitPointers(Object** start, Object** end) {
7364 // Scan all HeapObject pointers in [start, end)
7365 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7366 if ((*p)->IsHeapObject())
7367 tracer_->MarkRecursively(p, this);
7372 PathTracer* tracer_;
7376 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7378 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7379 void VisitPointers(Object** start, Object** end) {
7380 // Scan all HeapObject pointers in [start, end)
7381 for (Object** p = start; p < end; p++) {
7382 if ((*p)->IsHeapObject())
7383 tracer_->UnmarkRecursively(p, this);
7388 PathTracer* tracer_;
7392 void PathTracer::VisitPointers(Object** start, Object** end) {
7393 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7394 // Visit all HeapObject pointers in [start, end)
7395 for (Object** p = start; !done && (p < end); p++) {
7396 if ((*p)->IsHeapObject()) {
7398 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7404 void PathTracer::Reset() {
7405 found_target_ = false;
7406 object_stack_.Clear();
7410 void PathTracer::TracePathFrom(Object** root) {
7411 ASSERT((search_target_ == kAnyGlobalObject) ||
7412 search_target_->IsHeapObject());
7413 found_target_in_trace_ = false;
7416 MarkVisitor mark_visitor(this);
7417 MarkRecursively(root, &mark_visitor);
7419 UnmarkVisitor unmark_visitor(this);
7420 UnmarkRecursively(root, &unmark_visitor);
7426 static bool SafeIsNativeContext(HeapObject* obj) {
7427 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7431 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7432 if (!(*p)->IsHeapObject()) return;
7434 HeapObject* obj = HeapObject::cast(*p);
7436 Object* map = obj->map();
7438 if (!map->IsHeapObject()) return; // visited before
7440 if (found_target_in_trace_) return; // stop if target found
7441 object_stack_.Add(obj);
7442 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7443 (obj == search_target_)) {
7444 found_target_in_trace_ = true;
7445 found_target_ = true;
7449 bool is_native_context = SafeIsNativeContext(obj);
7452 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7454 Address map_addr = map_p->address();
7456 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7458 // Scan the object body.
7459 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7460 // This is specialized to scan Context's properly.
7461 Object** start = reinterpret_cast<Object**>(obj->address() +
7462 Context::kHeaderSize);
7463 Object** end = reinterpret_cast<Object**>(obj->address() +
7464 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7465 mark_visitor->VisitPointers(start, end);
7467 obj->IterateBody(map_p->instance_type(),
7468 obj->SizeFromMap(map_p),
7472 // Scan the map after the body because the body is a lot more interesting
7473 // when doing leak detection.
7474 MarkRecursively(&map, mark_visitor);
7476 if (!found_target_in_trace_) // don't pop if found the target
7477 object_stack_.RemoveLast();
7481 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7482 if (!(*p)->IsHeapObject()) return;
7484 HeapObject* obj = HeapObject::cast(*p);
7486 Object* map = obj->map();
7488 if (map->IsHeapObject()) return; // unmarked already
7490 Address map_addr = reinterpret_cast<Address>(map);
7492 map_addr -= kMarkTag;
7494 ASSERT_TAG_ALIGNED(map_addr);
7496 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7498 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7500 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7502 obj->IterateBody(Map::cast(map_p)->instance_type(),
7503 obj->SizeFromMap(Map::cast(map_p)),
7508 void PathTracer::ProcessResults() {
7509 if (found_target_) {
7510 PrintF("=====================================\n");
7511 PrintF("==== Path to object ====\n");
7512 PrintF("=====================================\n\n");
7514 ASSERT(!object_stack_.is_empty());
7515 for (int i = 0; i < object_stack_.length(); i++) {
7516 if (i > 0) PrintF("\n |\n |\n V\n\n");
7517 Object* obj = object_stack_[i];
7520 PrintF("=====================================\n");
7525 // Triggers a depth-first traversal of reachable objects from one
7526 // given root object and finds a path to a specific heap object and
7528 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7529 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7530 tracer.VisitPointer(&root);
7534 // Triggers a depth-first traversal of reachable objects from roots
7535 // and finds a path to a specific heap object and prints it.
7536 void Heap::TracePathToObject(Object* target) {
7537 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7538 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7542 // Triggers a depth-first traversal of reachable objects from roots
7543 // and finds a path to any global object and prints it. Useful for
7544 // determining the source for leaks of global objects.
7545 void Heap::TracePathToGlobal() {
7546 PathTracer tracer(PathTracer::kAnyGlobalObject,
7547 PathTracer::FIND_ALL,
7549 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7554 static intptr_t CountTotalHolesSize(Heap* heap) {
7555 intptr_t holes_size = 0;
7556 OldSpaces spaces(heap);
7557 for (OldSpace* space = spaces.next();
7559 space = spaces.next()) {
7560 holes_size += space->Waste() + space->Available();
7566 GCTracer::GCTracer(Heap* heap,
7567 const char* gc_reason,
7568 const char* collector_reason)
7570 start_object_size_(0),
7571 start_memory_size_(0),
7574 allocated_since_last_gc_(0),
7575 spent_in_mutator_(0),
7576 promoted_objects_size_(0),
7577 nodes_died_in_new_space_(0),
7578 nodes_copied_in_new_space_(0),
7581 gc_reason_(gc_reason),
7582 collector_reason_(collector_reason) {
7583 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7584 start_time_ = OS::TimeCurrentMillis();
7585 start_object_size_ = heap_->SizeOfObjects();
7586 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7588 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7592 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7594 allocated_since_last_gc_ =
7595 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7597 if (heap_->last_gc_end_timestamp_ > 0) {
7598 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7601 steps_count_ = heap_->incremental_marking()->steps_count();
7602 steps_took_ = heap_->incremental_marking()->steps_took();
7603 longest_step_ = heap_->incremental_marking()->longest_step();
7604 steps_count_since_last_gc_ =
7605 heap_->incremental_marking()->steps_count_since_last_gc();
7606 steps_took_since_last_gc_ =
7607 heap_->incremental_marking()->steps_took_since_last_gc();
7611 GCTracer::~GCTracer() {
7612 // Printf ONE line iff flag is set.
7613 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7615 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7617 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7618 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7620 double time = heap_->last_gc_end_timestamp_ - start_time_;
7622 // Update cumulative GC statistics if required.
7623 if (FLAG_print_cumulative_gc_stat) {
7624 heap_->total_gc_time_ms_ += time;
7625 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7626 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7627 heap_->alive_after_last_gc_);
7629 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7632 } else if (FLAG_trace_gc_verbose) {
7633 heap_->total_gc_time_ms_ += time;
7636 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7638 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7640 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7641 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7643 if (!FLAG_trace_gc_nvp) {
7644 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7646 double end_memory_size_mb =
7647 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7649 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7651 static_cast<double>(start_object_size_) / MB,
7652 static_cast<double>(start_memory_size_) / MB,
7653 SizeOfHeapObjects(),
7654 end_memory_size_mb);
7656 if (external_time > 0) PrintF("%d / ", external_time);
7657 PrintF("%.1f ms", time);
7658 if (steps_count_ > 0) {
7659 if (collector_ == SCAVENGER) {
7660 PrintF(" (+ %.1f ms in %d steps since last GC)",
7661 steps_took_since_last_gc_,
7662 steps_count_since_last_gc_);
7664 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7665 "biggest step %.1f ms)",
7672 if (gc_reason_ != NULL) {
7673 PrintF(" [%s]", gc_reason_);
7676 if (collector_reason_ != NULL) {
7677 PrintF(" [%s]", collector_reason_);
7682 PrintF("pause=%.1f ", time);
7683 PrintF("mutator=%.1f ", spent_in_mutator_);
7685 switch (collector_) {
7689 case MARK_COMPACTOR:
7697 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7698 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7699 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7700 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7701 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7702 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7703 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7704 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7705 PrintF("compaction_ptrs=%.1f ",
7706 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7707 PrintF("intracompaction_ptrs=%.1f ",
7708 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7709 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7710 PrintF("weakcollection_process=%.1f ",
7711 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7712 PrintF("weakcollection_clear=%.1f ",
7713 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7715 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7716 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7717 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7718 in_free_list_or_wasted_before_gc_);
7719 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7721 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7722 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7723 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7724 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7725 PrintF("nodes_promoted=%d ", nodes_promoted_);
7727 if (collector_ == SCAVENGER) {
7728 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7729 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7731 PrintF("stepscount=%d ", steps_count_);
7732 PrintF("stepstook=%.1f ", steps_took_);
7733 PrintF("longeststep=%.1f ", longest_step_);
7739 heap_->PrintShortHeapStatistics();
7743 const char* GCTracer::CollectorString() {
7744 switch (collector_) {
7747 case MARK_COMPACTOR:
7748 return "Mark-sweep";
7750 return "Unknown GC";
7754 int KeyedLookupCache::Hash(Map* map, Name* name) {
7755 // Uses only lower 32 bits if pointers are larger.
7756 uintptr_t addr_hash =
7757 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7758 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7762 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7763 int index = (Hash(map, name) & kHashMask);
7764 for (int i = 0; i < kEntriesPerBucket; i++) {
7765 Key& key = keys_[index + i];
7766 if ((key.map == map) && key.name->Equals(name)) {
7767 return field_offsets_[index + i];
7774 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7775 if (!name->IsUniqueName()) {
7776 String* internalized_string;
7777 if (!HEAP->InternalizeStringIfExists(
7778 String::cast(name), &internalized_string)) {
7781 name = internalized_string;
7783 // This cache is cleared only between mark compact passes, so we expect the
7784 // cache to only contain old space names.
7785 ASSERT(!HEAP->InNewSpace(name));
7787 int index = (Hash(map, name) & kHashMask);
7788 // After a GC there will be free slots, so we use them in order (this may
7789 // help to get the most frequently used one in position 0).
7790 for (int i = 0; i< kEntriesPerBucket; i++) {
7791 Key& key = keys_[index];
7792 Object* free_entry_indicator = NULL;
7793 if (key.map == free_entry_indicator) {
7796 field_offsets_[index + i] = field_offset;
7800 // No free entry found in this bucket, so we move them all down one and
7801 // put the new entry at position zero.
7802 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7803 Key& key = keys_[index + i];
7804 Key& key2 = keys_[index + i - 1];
7806 field_offsets_[index + i] = field_offsets_[index + i - 1];
7809 // Write the new first entry.
7810 Key& key = keys_[index];
7813 field_offsets_[index] = field_offset;
7817 void KeyedLookupCache::Clear() {
7818 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7822 void DescriptorLookupCache::Clear() {
7823 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7828 void Heap::GarbageCollectionGreedyCheck() {
7829 ASSERT(FLAG_gc_greedy);
7830 if (isolate_->bootstrapper()->IsActive()) return;
7831 if (disallow_allocation_failure()) return;
7832 CollectGarbage(NEW_SPACE);
7837 TranscendentalCache::SubCache::SubCache(Type t)
7839 isolate_(Isolate::Current()) {
7840 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7841 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7842 for (int i = 0; i < kCacheSize; i++) {
7843 elements_[i].in[0] = in0;
7844 elements_[i].in[1] = in1;
7845 elements_[i].output = NULL;
7850 void TranscendentalCache::Clear() {
7851 for (int i = 0; i < kNumberOfCaches; i++) {
7852 if (caches_[i] != NULL) {
7860 void ExternalStringTable::CleanUp() {
7862 for (int i = 0; i < new_space_strings_.length(); ++i) {
7863 if (new_space_strings_[i] == heap_->the_hole_value()) {
7866 if (heap_->InNewSpace(new_space_strings_[i])) {
7867 new_space_strings_[last++] = new_space_strings_[i];
7869 old_space_strings_.Add(new_space_strings_[i]);
7872 new_space_strings_.Rewind(last);
7873 new_space_strings_.Trim();
7876 for (int i = 0; i < old_space_strings_.length(); ++i) {
7877 if (old_space_strings_[i] == heap_->the_hole_value()) {
7880 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7881 old_space_strings_[last++] = old_space_strings_[i];
7883 old_space_strings_.Rewind(last);
7884 old_space_strings_.Trim();
7886 if (FLAG_verify_heap) {
7893 void ExternalStringTable::TearDown() {
7894 new_space_strings_.Free();
7895 old_space_strings_.Free();
7899 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7900 chunk->set_next_chunk(chunks_queued_for_free_);
7901 chunks_queued_for_free_ = chunk;
7905 void Heap::FreeQueuedChunks() {
7906 if (chunks_queued_for_free_ == NULL) return;
7909 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7910 next = chunk->next_chunk();
7911 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7913 if (chunk->owner()->identity() == LO_SPACE) {
7914 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7915 // If FromAnyPointerAddress encounters a slot that belongs to a large
7916 // chunk queued for deletion it will fail to find the chunk because
7917 // it try to perform a search in the list of pages owned by of the large
7918 // object space and queued chunks were detached from that list.
7919 // To work around this we split large chunk into normal kPageSize aligned
7920 // pieces and initialize size, owner and flags field of every piece.
7921 // If FromAnyPointerAddress encounters a slot that belongs to one of
7922 // these smaller pieces it will treat it as a slot on a normal Page.
7923 Address chunk_end = chunk->address() + chunk->size();
7924 MemoryChunk* inner = MemoryChunk::FromAddress(
7925 chunk->address() + Page::kPageSize);
7926 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7927 while (inner <= inner_last) {
7928 // Size of a large chunk is always a multiple of
7929 // OS::AllocateAlignment() so there is always
7930 // enough space for a fake MemoryChunk header.
7931 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7932 // Guard against overflow.
7933 if (area_end < inner->address()) area_end = chunk_end;
7934 inner->SetArea(inner->address(), area_end);
7935 inner->set_size(Page::kPageSize);
7936 inner->set_owner(lo_space());
7937 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7938 inner = MemoryChunk::FromAddress(
7939 inner->address() + Page::kPageSize);
7943 isolate_->heap()->store_buffer()->Compact();
7944 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7945 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7946 next = chunk->next_chunk();
7947 isolate_->memory_allocator()->Free(chunk);
7949 chunks_queued_for_free_ = NULL;
7953 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7954 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7955 // Tag the page pointer to make it findable in the dump file.
7957 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7959 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7961 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7962 reinterpret_cast<Address>(p);
7963 remembered_unmapped_pages_index_++;
7964 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7968 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7969 memset(object_counts_, 0, sizeof(object_counts_));
7970 memset(object_sizes_, 0, sizeof(object_sizes_));
7971 if (clear_last_time_stats) {
7972 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7973 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7978 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7981 void Heap::CheckpointObjectStats() {
7982 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7983 Counters* counters = isolate()->counters();
7984 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7985 counters->count_of_##name()->Increment( \
7986 static_cast<int>(object_counts_[name])); \
7987 counters->count_of_##name()->Decrement( \
7988 static_cast<int>(object_counts_last_time_[name])); \
7989 counters->size_of_##name()->Increment( \
7990 static_cast<int>(object_sizes_[name])); \
7991 counters->size_of_##name()->Decrement( \
7992 static_cast<int>(object_sizes_last_time_[name]));
7993 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7994 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7996 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7997 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7998 counters->count_of_CODE_TYPE_##name()->Increment( \
7999 static_cast<int>(object_counts_[index])); \
8000 counters->count_of_CODE_TYPE_##name()->Decrement( \
8001 static_cast<int>(object_counts_last_time_[index])); \
8002 counters->size_of_CODE_TYPE_##name()->Increment( \
8003 static_cast<int>(object_sizes_[index])); \
8004 counters->size_of_CODE_TYPE_##name()->Decrement( \
8005 static_cast<int>(object_sizes_last_time_[index]));
8006 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8007 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8008 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8009 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
8010 counters->count_of_FIXED_ARRAY_##name()->Increment( \
8011 static_cast<int>(object_counts_[index])); \
8012 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
8013 static_cast<int>(object_counts_last_time_[index])); \
8014 counters->size_of_FIXED_ARRAY_##name()->Increment( \
8015 static_cast<int>(object_sizes_[index])); \
8016 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
8017 static_cast<int>(object_sizes_last_time_[index]));
8018 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8019 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8021 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8022 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8027 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8028 if (FLAG_parallel_recompilation) {
8029 heap_->relocation_mutex_->Lock();
8031 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8032 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8037 } } // namespace v8::internal