1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72 code_range_size_(512*MB),
74 #define LUMP_OF_MEMORY MB
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(192*MB),
82 max_executable_size_(max_old_generation_size_),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
87 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88 max_executable_size_(256l * LUMP_OF_MEMORY),
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95 survived_since_last_expansion_(0),
97 always_allocate_scope_depth_(0),
98 linear_allocation_scope_depth_(0),
99 contexts_disposed_(0),
101 flush_monomorphic_ics_(false),
102 scan_on_scavenge_pages_(0),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
109 property_cell_space_(NULL),
111 gc_state_(NOT_IN_GC),
112 gc_post_processing_depth_(0),
115 remembered_unmapped_pages_index_(0),
116 unflattened_strings_length_(0),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
121 new_space_high_promotion_mode_active_(false),
122 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false),
128 store_buffer_rebuilder_(store_buffer()),
129 hidden_string_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
133 total_regexp_code_generated_(0),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 low_survival_rate_period_length_(0),
139 previous_survival_rate_trend_(Heap::STABLE),
140 survival_rate_trend_(Heap::STABLE),
142 total_gc_time_ms_(0.0),
143 max_alive_after_gc_(0),
144 min_in_mutator_(kMaxInt),
145 alive_after_last_gc_(0),
146 last_gc_end_timestamp_(0.0),
151 incremental_marking_(this),
152 number_idle_notifications_(0),
153 last_idle_notification_gc_count_(0),
154 last_idle_notification_gc_count_init_(false),
155 mark_sweeps_since_idle_round_started_(0),
156 gc_count_at_last_idle_gc_(0),
157 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158 gcs_since_last_deopt_(0),
160 no_weak_embedded_maps_verification_scope_depth_(0),
162 promotion_queue_(this),
164 chunks_queued_for_free_(NULL),
165 relocation_mutex_(NULL) {
166 // Allow build-time customization of the max semispace size. Building
167 // V8 with snapshots and a non-default max semispace size is much
168 // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 intptr_t max_virtual = OS::MaxVirtualMemory();
175 if (max_virtual > 0) {
176 if (code_range_size_ > 0) {
177 // Reserve no more than 1/8 of the memory for the code range.
178 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183 native_contexts_list_ = NULL;
184 array_buffers_list_ = Smi::FromInt(0);
185 allocation_sites_list_ = Smi::FromInt(0);
186 mark_compact_collector_.heap_ = this;
187 external_string_table_.heap_ = this;
188 // Put a dummy entry in the remembered pages so we can find the list the
189 // minidump even if there are no real unmapped pages.
190 RememberUnmappedPage(NULL, false);
192 ClearObjectStats(true);
196 intptr_t Heap::Capacity() {
197 if (!HasBeenSetUp()) return 0;
199 return new_space_.Capacity() +
200 old_pointer_space_->Capacity() +
201 old_data_space_->Capacity() +
202 code_space_->Capacity() +
203 map_space_->Capacity() +
204 cell_space_->Capacity() +
205 property_cell_space_->Capacity();
209 intptr_t Heap::CommittedMemory() {
210 if (!HasBeenSetUp()) return 0;
212 return new_space_.CommittedMemory() +
213 old_pointer_space_->CommittedMemory() +
214 old_data_space_->CommittedMemory() +
215 code_space_->CommittedMemory() +
216 map_space_->CommittedMemory() +
217 cell_space_->CommittedMemory() +
218 property_cell_space_->CommittedMemory() +
223 size_t Heap::CommittedPhysicalMemory() {
224 if (!HasBeenSetUp()) return 0;
226 return new_space_.CommittedPhysicalMemory() +
227 old_pointer_space_->CommittedPhysicalMemory() +
228 old_data_space_->CommittedPhysicalMemory() +
229 code_space_->CommittedPhysicalMemory() +
230 map_space_->CommittedPhysicalMemory() +
231 cell_space_->CommittedPhysicalMemory() +
232 property_cell_space_->CommittedPhysicalMemory() +
233 lo_space_->CommittedPhysicalMemory();
237 intptr_t Heap::CommittedMemoryExecutable() {
238 if (!HasBeenSetUp()) return 0;
240 return isolate()->memory_allocator()->SizeExecutable();
244 intptr_t Heap::Available() {
245 if (!HasBeenSetUp()) return 0;
247 return new_space_.Available() +
248 old_pointer_space_->Available() +
249 old_data_space_->Available() +
250 code_space_->Available() +
251 map_space_->Available() +
252 cell_space_->Available() +
253 property_cell_space_->Available();
257 bool Heap::HasBeenSetUp() {
258 return old_pointer_space_ != NULL &&
259 old_data_space_ != NULL &&
260 code_space_ != NULL &&
261 map_space_ != NULL &&
262 cell_space_ != NULL &&
263 property_cell_space_ != NULL &&
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269 if (IntrusiveMarking::IsMarked(object)) {
270 return IntrusiveMarking::SizeOfMarkedObject(object);
272 return object->SizeFromMap(object->map());
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277 const char** reason) {
278 // Is global GC requested?
279 if (space != NEW_SPACE) {
280 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281 *reason = "GC in old space requested";
282 return MARK_COMPACTOR;
285 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286 *reason = "GC in old space forced by flags";
287 return MARK_COMPACTOR;
290 // Is enough data promoted to justify a global GC?
291 if (OldGenerationAllocationLimitReached()) {
292 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293 *reason = "promotion limit reached";
294 return MARK_COMPACTOR;
297 // Have allocation in OLD and LO failed?
298 if (old_gen_exhausted_) {
299 isolate_->counters()->
300 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301 *reason = "old generations exhausted";
302 return MARK_COMPACTOR;
305 // Is there enough space left in OLD to guarantee that a scavenge can
308 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309 // for object promotion. It counts only the bytes that the memory
310 // allocator has not yet allocated from the OS and assigned to any space,
311 // and does not count available bytes already in the old space or code
312 // space. Undercounting is safe---we may get an unrequested full GC when
313 // a scavenge would have succeeded.
314 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315 isolate_->counters()->
316 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317 *reason = "scavenge might not succeed";
318 return MARK_COMPACTOR;
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330 // Heap::ReportHeapStatistics will also log NewSpace statistics when
331 // compiled --log-gc is set. The following logic is used to avoid
334 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335 if (FLAG_heap_stats) {
336 ReportHeapStatistics("Before GC");
337 } else if (FLAG_log_gc) {
338 new_space_.ReportStatistics();
340 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
343 new_space_.CollectStatistics();
344 new_space_.ReportStatistics();
345 new_space_.ClearHistograms();
351 void Heap::PrintShortHeapStatistics() {
352 if (!FLAG_trace_gc_verbose) return;
353 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
354 ", available: %6" V8_PTR_PREFIX "d KB\n",
355 isolate_->memory_allocator()->Size() / KB,
356 isolate_->memory_allocator()->Available() / KB);
357 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
358 ", available: %6" V8_PTR_PREFIX "d KB"
359 ", committed: %6" V8_PTR_PREFIX "d KB\n",
360 new_space_.Size() / KB,
361 new_space_.Available() / KB,
362 new_space_.CommittedMemory() / KB);
363 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
364 ", available: %6" V8_PTR_PREFIX "d KB"
365 ", committed: %6" V8_PTR_PREFIX "d KB\n",
366 old_pointer_space_->SizeOfObjects() / KB,
367 old_pointer_space_->Available() / KB,
368 old_pointer_space_->CommittedMemory() / KB);
369 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
370 ", available: %6" V8_PTR_PREFIX "d KB"
371 ", committed: %6" V8_PTR_PREFIX "d KB\n",
372 old_data_space_->SizeOfObjects() / KB,
373 old_data_space_->Available() / KB,
374 old_data_space_->CommittedMemory() / KB);
375 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
376 ", available: %6" V8_PTR_PREFIX "d KB"
377 ", committed: %6" V8_PTR_PREFIX "d KB\n",
378 code_space_->SizeOfObjects() / KB,
379 code_space_->Available() / KB,
380 code_space_->CommittedMemory() / KB);
381 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
382 ", available: %6" V8_PTR_PREFIX "d KB"
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 map_space_->SizeOfObjects() / KB,
385 map_space_->Available() / KB,
386 map_space_->CommittedMemory() / KB);
387 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
388 ", available: %6" V8_PTR_PREFIX "d KB"
389 ", committed: %6" V8_PTR_PREFIX "d KB\n",
390 cell_space_->SizeOfObjects() / KB,
391 cell_space_->Available() / KB,
392 cell_space_->CommittedMemory() / KB);
393 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394 ", available: %6" V8_PTR_PREFIX "d KB"
395 ", committed: %6" V8_PTR_PREFIX "d KB\n",
396 property_cell_space_->SizeOfObjects() / KB,
397 property_cell_space_->Available() / KB,
398 property_cell_space_->CommittedMemory() / KB);
399 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400 ", available: %6" V8_PTR_PREFIX "d KB"
401 ", committed: %6" V8_PTR_PREFIX "d KB\n",
402 lo_space_->SizeOfObjects() / KB,
403 lo_space_->Available() / KB,
404 lo_space_->CommittedMemory() / KB);
405 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
406 ", available: %6" V8_PTR_PREFIX "d KB"
407 ", committed: %6" V8_PTR_PREFIX "d KB\n",
408 this->SizeOfObjects() / KB,
409 this->Available() / KB,
410 this->CommittedMemory() / KB);
411 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412 amount_of_external_allocated_memory_ / KB);
413 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420 // Similar to the before GC, we use some complicated logic to ensure that
421 // NewSpace statistics are logged exactly once when --log-gc is turned on.
423 if (FLAG_heap_stats) {
424 new_space_.CollectStatistics();
425 ReportHeapStatistics("After GC");
426 } else if (FLAG_log_gc) {
427 new_space_.ReportStatistics();
430 if (FLAG_log_gc) new_space_.ReportStatistics();
435 void Heap::GarbageCollectionPrologue() {
436 { AllowHeapAllocation for_the_first_part_of_prologue;
437 isolate_->transcendental_cache()->Clear();
438 ClearJSFunctionResultCaches();
440 unflattened_strings_length_ = 0;
442 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443 mark_compact_collector()->EnableCodeFlushing(true);
447 if (FLAG_verify_heap) {
454 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
456 if (FLAG_gc_verbose) Print();
458 ReportStatisticsBeforeGC();
461 store_buffer()->GCPrologue();
465 intptr_t Heap::SizeOfObjects() {
467 AllSpaces spaces(this);
468 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469 total += space->SizeOfObjects();
475 void Heap::RepairFreeListsAfterBoot() {
476 PagedSpaces spaces(this);
477 for (PagedSpace* space = spaces.next();
479 space = spaces.next()) {
480 space->RepairFreeListsAfterBoot();
485 void Heap::GarbageCollectionEpilogue() {
486 store_buffer()->GCEpilogue();
488 // In release mode, we only zap the from space under heap verification.
489 if (Heap::ShouldZapGarbage()) {
494 if (FLAG_verify_heap) {
499 AllowHeapAllocation for_the_rest_of_the_epilogue;
502 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503 if (FLAG_print_handles) PrintHandles();
504 if (FLAG_gc_verbose) Print();
505 if (FLAG_code_stats) ReportCodeStatistics("After GC");
507 if (FLAG_deopt_every_n_garbage_collections > 0) {
508 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509 Deoptimizer::DeoptimizeAll(isolate());
510 gcs_since_last_deopt_ = 0;
514 isolate_->counters()->alive_after_last_gc()->Set(
515 static_cast<int>(SizeOfObjects()));
517 isolate_->counters()->string_table_capacity()->Set(
518 string_table()->Capacity());
519 isolate_->counters()->number_of_symbols()->Set(
520 string_table()->NumberOfElements());
522 if (CommittedMemory() > 0) {
523 isolate_->counters()->external_fragmentation_total()->AddSample(
524 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
526 isolate_->counters()->heap_fraction_map_space()->AddSample(
528 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529 isolate_->counters()->heap_fraction_cell_space()->AddSample(
531 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532 isolate_->counters()->heap_fraction_property_cell_space()->
533 AddSample(static_cast<int>(
534 (property_cell_space()->CommittedMemory() * 100.0) /
537 isolate_->counters()->heap_sample_total_committed()->AddSample(
538 static_cast<int>(CommittedMemory() / KB));
539 isolate_->counters()->heap_sample_total_used()->AddSample(
540 static_cast<int>(SizeOfObjects() / KB));
541 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542 static_cast<int>(map_space()->CommittedMemory() / KB));
543 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544 static_cast<int>(cell_space()->CommittedMemory() / KB));
545 isolate_->counters()->
546 heap_sample_property_cell_space_committed()->
547 AddSample(static_cast<int>(
548 property_cell_space()->CommittedMemory() / KB));
551 #define UPDATE_COUNTERS_FOR_SPACE(space) \
552 isolate_->counters()->space##_bytes_available()->Set( \
553 static_cast<int>(space()->Available())); \
554 isolate_->counters()->space##_bytes_committed()->Set( \
555 static_cast<int>(space()->CommittedMemory())); \
556 isolate_->counters()->space##_bytes_used()->Set( \
557 static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
559 if (space()->CommittedMemory() > 0) { \
560 isolate_->counters()->external_fragmentation_##space()->AddSample( \
561 static_cast<int>(100 - \
562 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
565 UPDATE_COUNTERS_FOR_SPACE(space) \
566 UPDATE_FRAGMENTATION_FOR_SPACE(space)
568 UPDATE_COUNTERS_FOR_SPACE(new_space)
569 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
581 ReportStatisticsAfterGC();
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584 isolate_->debug()->AfterGarbageCollection();
585 #endif // ENABLE_DEBUGGER_SUPPORT
587 error_object_list_.DeferredFormatStackTrace(isolate());
591 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
592 // Since we are ignoring the return value, the exact choice of space does
593 // not matter, so long as we do not specify NEW_SPACE, which would not
595 mark_compact_collector_.SetFlags(flags);
596 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
597 mark_compact_collector_.SetFlags(kNoGCFlags);
601 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
602 // Since we are ignoring the return value, the exact choice of space does
603 // not matter, so long as we do not specify NEW_SPACE, which would not
605 // Major GC would invoke weak handle callbacks on weakly reachable
606 // handles, but won't collect weakly reachable objects until next
607 // major GC. Therefore if we collect aggressively and weak handle callback
608 // has been invoked, we rerun major GC to release objects which become
610 // Note: as weak callbacks can execute arbitrary code, we cannot
611 // hope that eventually there will be no weak callbacks invocations.
612 // Therefore stop recollecting after several attempts.
613 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
614 kReduceMemoryFootprintMask);
615 isolate_->compilation_cache()->Clear();
616 const int kMaxNumberOfAttempts = 7;
617 const int kMinNumberOfAttempts = 2;
618 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
619 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
620 attempt + 1 >= kMinNumberOfAttempts) {
624 mark_compact_collector()->SetFlags(kNoGCFlags);
627 incremental_marking()->UncommitMarkingDeque();
631 bool Heap::CollectGarbage(AllocationSpace space,
632 GarbageCollector collector,
633 const char* gc_reason,
634 const char* collector_reason) {
635 // The VM is in the GC state until exiting this function.
636 VMState<GC> state(isolate_);
639 // Reset the allocation timeout to the GC interval, but make sure to
640 // allow at least a few allocations after a collection. The reason
641 // for this is that we have a lot of allocation sequences and we
642 // assume that a garbage collection will allow the subsequent
643 // allocation attempts to go through.
644 allocation_timeout_ = Max(6, FLAG_gc_interval);
647 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
648 if (FLAG_trace_incremental_marking) {
649 PrintF("[IncrementalMarking] Scavenge during marking.\n");
653 if (collector == MARK_COMPACTOR &&
654 !mark_compact_collector()->abort_incremental_marking() &&
655 !incremental_marking()->IsStopped() &&
656 !incremental_marking()->should_hurry() &&
657 FLAG_incremental_marking_steps) {
658 // Make progress in incremental marking.
659 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
660 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
661 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
662 if (!incremental_marking()->IsComplete()) {
663 if (FLAG_trace_incremental_marking) {
664 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
666 collector = SCAVENGER;
667 collector_reason = "incremental marking delaying mark-sweep";
671 bool next_gc_likely_to_collect_more = false;
673 { GCTracer tracer(this, gc_reason, collector_reason);
674 ASSERT(AllowHeapAllocation::IsAllowed());
675 DisallowHeapAllocation no_allocation_during_gc;
676 GarbageCollectionPrologue();
677 // The GC count was incremented in the prologue. Tell the tracer about
679 tracer.set_gc_count(gc_count_);
681 // Tell the tracer which collector we've selected.
682 tracer.set_collector(collector);
685 HistogramTimerScope histogram_timer_scope(
686 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
687 : isolate_->counters()->gc_compactor());
688 next_gc_likely_to_collect_more =
689 PerformGarbageCollection(collector, &tracer);
692 GarbageCollectionEpilogue();
695 // Start incremental marking for the next cycle. The heap snapshot
696 // generator needs incremental marking to stay off after it aborted.
697 if (!mark_compact_collector()->abort_incremental_marking() &&
698 incremental_marking()->IsStopped() &&
699 incremental_marking()->WorthActivating() &&
700 NextGCIsLikelyToBeFull()) {
701 incremental_marking()->Start();
704 return next_gc_likely_to_collect_more;
708 void Heap::PerformScavenge() {
709 GCTracer tracer(this, NULL, NULL);
710 if (incremental_marking()->IsStopped()) {
711 PerformGarbageCollection(SCAVENGER, &tracer);
713 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
718 void Heap::MoveElements(FixedArray* array,
722 if (len == 0) return;
724 ASSERT(array->map() != HEAP->fixed_cow_array_map());
725 Object** dst_objects = array->data_start() + dst_index;
726 OS::MemMove(dst_objects,
727 array->data_start() + src_index,
729 if (!InNewSpace(array)) {
730 for (int i = 0; i < len; i++) {
731 // TODO(hpayer): check store buffer for entries
732 if (InNewSpace(dst_objects[i])) {
733 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
737 incremental_marking()->RecordWrites(array);
742 // Helper class for verifying the string table.
743 class StringTableVerifier : public ObjectVisitor {
745 void VisitPointers(Object** start, Object** end) {
746 // Visit all HeapObject pointers in [start, end).
747 for (Object** p = start; p < end; p++) {
748 if ((*p)->IsHeapObject()) {
749 // Check that the string is actually internalized.
750 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
751 (*p)->IsInternalizedString());
758 static void VerifyStringTable() {
759 StringTableVerifier verifier;
760 HEAP->string_table()->IterateElements(&verifier);
762 #endif // VERIFY_HEAP
765 static bool AbortIncrementalMarkingAndCollectGarbage(
767 AllocationSpace space,
768 const char* gc_reason = NULL) {
769 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
770 bool result = heap->CollectGarbage(space, gc_reason);
771 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
776 void Heap::ReserveSpace(
778 Address *locations_out) {
779 bool gc_performed = true;
781 static const int kThreshold = 20;
782 while (gc_performed && counter++ < kThreshold) {
783 gc_performed = false;
784 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
785 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
786 if (sizes[space] != 0) {
787 MaybeObject* allocation;
788 if (space == NEW_SPACE) {
789 allocation = new_space()->AllocateRaw(sizes[space]);
791 allocation = paged_space(space)->AllocateRaw(sizes[space]);
794 if (!allocation->To<FreeListNode>(&node)) {
795 if (space == NEW_SPACE) {
796 Heap::CollectGarbage(NEW_SPACE,
797 "failed to reserve space in the new space");
799 AbortIncrementalMarkingAndCollectGarbage(
801 static_cast<AllocationSpace>(space),
802 "failed to reserve space in paged space");
807 // Mark with a free list node, in case we have a GC before
809 node->set_size(this, sizes[space]);
810 locations_out[space] = node->address();
817 // Failed to reserve the space after several attempts.
818 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
823 void Heap::EnsureFromSpaceIsCommitted() {
824 if (new_space_.CommitFromSpaceIfNeeded()) return;
826 // Committing memory to from space failed.
827 // Memory is exhausted and we will die.
828 V8::FatalProcessOutOfMemory("Committing semi space failed.");
832 void Heap::ClearJSFunctionResultCaches() {
833 if (isolate_->bootstrapper()->IsActive()) return;
835 Object* context = native_contexts_list_;
836 while (!context->IsUndefined()) {
837 // Get the caches for this context. GC can happen when the context
838 // is not fully initialized, so the caches can be undefined.
839 Object* caches_or_undefined =
840 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
841 if (!caches_or_undefined->IsUndefined()) {
842 FixedArray* caches = FixedArray::cast(caches_or_undefined);
844 int length = caches->length();
845 for (int i = 0; i < length; i++) {
846 JSFunctionResultCache::cast(caches->get(i))->Clear();
849 // Get the next context:
850 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
855 void Heap::ClearNormalizedMapCaches() {
856 if (isolate_->bootstrapper()->IsActive() &&
857 !incremental_marking()->IsMarking()) {
861 Object* context = native_contexts_list_;
862 while (!context->IsUndefined()) {
863 // GC can happen when the context is not fully initialized,
864 // so the cache can be undefined.
866 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
867 if (!cache->IsUndefined()) {
868 NormalizedMapCache::cast(cache)->Clear();
870 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
875 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
876 double survival_rate =
877 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
878 start_new_space_size;
880 if (survival_rate > kYoungSurvivalRateHighThreshold) {
881 high_survival_rate_period_length_++;
883 high_survival_rate_period_length_ = 0;
886 if (survival_rate < kYoungSurvivalRateLowThreshold) {
887 low_survival_rate_period_length_++;
889 low_survival_rate_period_length_ = 0;
892 double survival_rate_diff = survival_rate_ - survival_rate;
894 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
895 set_survival_rate_trend(DECREASING);
896 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
897 set_survival_rate_trend(INCREASING);
899 set_survival_rate_trend(STABLE);
902 survival_rate_ = survival_rate;
905 bool Heap::PerformGarbageCollection(GarbageCollector collector,
907 bool next_gc_likely_to_collect_more = false;
909 if (collector != SCAVENGER) {
910 PROFILE(isolate_, CodeMovingGCEvent());
914 if (FLAG_verify_heap) {
920 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
923 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
924 VMState<EXTERNAL> state(isolate_);
925 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
928 EnsureFromSpaceIsCommitted();
930 int start_new_space_size = Heap::new_space()->SizeAsInt();
932 if (IsHighSurvivalRate()) {
933 // We speed up the incremental marker if it is running so that it
934 // does not fall behind the rate of promotion, which would cause a
935 // constantly growing old space.
936 incremental_marking()->NotifyOfHighPromotionRate();
939 if (collector == MARK_COMPACTOR) {
940 // Perform mark-sweep with optional compaction.
944 UpdateSurvivalRateTrend(start_new_space_size);
946 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
948 old_generation_allocation_limit_ =
949 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
951 old_gen_exhausted_ = false;
957 UpdateSurvivalRateTrend(start_new_space_size);
960 if (!new_space_high_promotion_mode_active_ &&
961 new_space_.Capacity() == new_space_.MaximumCapacity() &&
962 IsStableOrIncreasingSurvivalTrend() &&
963 IsHighSurvivalRate()) {
964 // Stable high survival rates even though young generation is at
965 // maximum capacity indicates that most objects will be promoted.
966 // To decrease scavenger pauses and final mark-sweep pauses, we
967 // have to limit maximal capacity of the young generation.
968 SetNewSpaceHighPromotionModeActive(true);
970 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
971 new_space_.InitialCapacity() / MB);
973 // Support for global pre-tenuring uses the high promotion mode as a
974 // heuristic indicator of whether to pretenure or not, we trigger
975 // deoptimization here to take advantage of pre-tenuring as soon as
977 if (FLAG_pretenuring) {
978 isolate_->stack_guard()->FullDeopt();
980 } else if (new_space_high_promotion_mode_active_ &&
981 IsStableOrDecreasingSurvivalTrend() &&
982 IsLowSurvivalRate()) {
983 // Decreasing low survival rates might indicate that the above high
984 // promotion mode is over and we should allow the young generation
986 SetNewSpaceHighPromotionModeActive(false);
988 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
989 new_space_.MaximumCapacity() / MB);
991 // Trigger deoptimization here to turn off pre-tenuring as soon as
993 if (FLAG_pretenuring) {
994 isolate_->stack_guard()->FullDeopt();
998 if (new_space_high_promotion_mode_active_ &&
999 new_space_.Capacity() > new_space_.InitialCapacity()) {
1000 new_space_.Shrink();
1003 isolate_->counters()->objs_since_last_young()->Set(0);
1005 // Callbacks that fire after this point might trigger nested GCs and
1006 // restart incremental marking, the assertion can't be moved down.
1007 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1009 gc_post_processing_depth_++;
1010 { AllowHeapAllocation allow_allocation;
1011 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1012 next_gc_likely_to_collect_more =
1013 isolate_->global_handles()->PostGarbageCollectionProcessing(
1016 gc_post_processing_depth_--;
1018 // Update relocatables.
1019 Relocatable::PostGarbageCollectionProcessing();
1021 if (collector == MARK_COMPACTOR) {
1022 // Register the amount of external allocated memory.
1023 amount_of_external_allocated_memory_at_last_global_gc_ =
1024 amount_of_external_allocated_memory_;
1028 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1029 VMState<EXTERNAL> state(isolate_);
1030 CallGCEpilogueCallbacks(gc_type);
1034 if (FLAG_verify_heap) {
1035 VerifyStringTable();
1039 return next_gc_likely_to_collect_more;
1043 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1044 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1045 global_gc_prologue_callback_();
1047 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1048 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1049 gc_prologue_callbacks_[i].callback(gc_type, flags);
1055 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1056 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1057 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1058 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1061 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1062 global_gc_epilogue_callback_();
1067 void Heap::MarkCompact(GCTracer* tracer) {
1068 gc_state_ = MARK_COMPACT;
1069 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1071 mark_compact_collector_.Prepare(tracer);
1074 tracer->set_full_gc_count(ms_count_);
1076 MarkCompactPrologue();
1078 mark_compact_collector_.CollectGarbage();
1080 LOG(isolate_, ResourceEvent("markcompact", "end"));
1082 gc_state_ = NOT_IN_GC;
1084 isolate_->counters()->objs_since_last_full()->Set(0);
1086 contexts_disposed_ = 0;
1088 flush_monomorphic_ics_ = false;
1092 void Heap::MarkCompactPrologue() {
1093 // At any old GC clear the keyed lookup cache to enable collection of unused
1095 isolate_->keyed_lookup_cache()->Clear();
1096 isolate_->context_slot_cache()->Clear();
1097 isolate_->descriptor_lookup_cache()->Clear();
1098 RegExpResultsCache::Clear(string_split_cache());
1099 RegExpResultsCache::Clear(regexp_multiple_cache());
1101 isolate_->compilation_cache()->MarkCompactPrologue();
1103 CompletelyClearInstanceofCache();
1105 FlushNumberStringCache();
1106 if (FLAG_cleanup_code_caches_at_gc) {
1107 polymorphic_code_cache()->set_cache(undefined_value());
1110 ClearNormalizedMapCaches();
1114 // Helper class for copying HeapObjects
1115 class ScavengeVisitor: public ObjectVisitor {
1117 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1119 void VisitPointer(Object** p) { ScavengePointer(p); }
1121 void VisitPointers(Object** start, Object** end) {
1122 // Copy all HeapObject pointers in [start, end)
1123 for (Object** p = start; p < end; p++) ScavengePointer(p);
1127 void ScavengePointer(Object** p) {
1128 Object* object = *p;
1129 if (!heap_->InNewSpace(object)) return;
1130 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1131 reinterpret_cast<HeapObject*>(object));
1139 // Visitor class to verify pointers in code or data space do not point into
1141 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1143 void VisitPointers(Object** start, Object**end) {
1144 for (Object** current = start; current < end; current++) {
1145 if ((*current)->IsHeapObject()) {
1146 CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1153 static void VerifyNonPointerSpacePointers() {
1154 // Verify that there are no pointers to new space in spaces where we
1155 // do not expect them.
1156 VerifyNonPointerSpacePointersVisitor v;
1157 HeapObjectIterator code_it(HEAP->code_space());
1158 for (HeapObject* object = code_it.Next();
1159 object != NULL; object = code_it.Next())
1160 object->Iterate(&v);
1162 // The old data space was normally swept conservatively so that the iterator
1163 // doesn't work, so we normally skip the next bit.
1164 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1165 HeapObjectIterator data_it(HEAP->old_data_space());
1166 for (HeapObject* object = data_it.Next();
1167 object != NULL; object = data_it.Next())
1168 object->Iterate(&v);
1171 #endif // VERIFY_HEAP
1174 void Heap::CheckNewSpaceExpansionCriteria() {
1175 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1176 survived_since_last_expansion_ > new_space_.Capacity() &&
1177 !new_space_high_promotion_mode_active_) {
1178 // Grow the size of new space if there is room to grow, enough data
1179 // has survived scavenge since the last expansion and we are not in
1180 // high promotion mode.
1182 survived_since_last_expansion_ = 0;
1187 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1188 return heap->InNewSpace(*p) &&
1189 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1193 void Heap::ScavengeStoreBufferCallback(
1196 StoreBufferEvent event) {
1197 heap->store_buffer_rebuilder_.Callback(page, event);
1201 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1202 if (event == kStoreBufferStartScanningPagesEvent) {
1203 start_of_current_page_ = NULL;
1204 current_page_ = NULL;
1205 } else if (event == kStoreBufferScanningPageEvent) {
1206 if (current_page_ != NULL) {
1207 // If this page already overflowed the store buffer during this iteration.
1208 if (current_page_->scan_on_scavenge()) {
1209 // Then we should wipe out the entries that have been added for it.
1210 store_buffer_->SetTop(start_of_current_page_);
1211 } else if (store_buffer_->Top() - start_of_current_page_ >=
1212 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1213 // Did we find too many pointers in the previous page? The heuristic is
1214 // that no page can take more then 1/5 the remaining slots in the store
1216 current_page_->set_scan_on_scavenge(true);
1217 store_buffer_->SetTop(start_of_current_page_);
1219 // In this case the page we scanned took a reasonable number of slots in
1220 // the store buffer. It has now been rehabilitated and is no longer
1221 // marked scan_on_scavenge.
1222 ASSERT(!current_page_->scan_on_scavenge());
1225 start_of_current_page_ = store_buffer_->Top();
1226 current_page_ = page;
1227 } else if (event == kStoreBufferFullEvent) {
1228 // The current page overflowed the store buffer again. Wipe out its entries
1229 // in the store buffer and mark it scan-on-scavenge again. This may happen
1230 // several times while scanning.
1231 if (current_page_ == NULL) {
1232 // Store Buffer overflowed while scanning promoted objects. These are not
1233 // in any particular page, though they are likely to be clustered by the
1234 // allocation routines.
1235 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1237 // Store Buffer overflowed while scanning a particular old space page for
1238 // pointers to new space.
1239 ASSERT(current_page_ == page);
1240 ASSERT(page != NULL);
1241 current_page_->set_scan_on_scavenge(true);
1242 ASSERT(start_of_current_page_ != store_buffer_->Top());
1243 store_buffer_->SetTop(start_of_current_page_);
1251 void PromotionQueue::Initialize() {
1252 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1253 // entries (where each is a pair of intptr_t). This allows us to simplify
1254 // the test fpr when to switch pages.
1255 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1257 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1259 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1260 emergency_stack_ = NULL;
1265 void PromotionQueue::RelocateQueueHead() {
1266 ASSERT(emergency_stack_ == NULL);
1268 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1269 intptr_t* head_start = rear_;
1270 intptr_t* head_end =
1271 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1274 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1276 emergency_stack_ = new List<Entry>(2 * entries_count);
1278 while (head_start != head_end) {
1279 int size = static_cast<int>(*(head_start++));
1280 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1281 emergency_stack_->Add(Entry(obj, size));
1287 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1289 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1291 virtual Object* RetainAs(Object* object) {
1292 if (!heap_->InFromSpace(object)) {
1296 MapWord map_word = HeapObject::cast(object)->map_word();
1297 if (map_word.IsForwardingAddress()) {
1298 return map_word.ToForwardingAddress();
1308 void Heap::Scavenge() {
1309 RelocationLock relocation_lock(this);
1312 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1315 gc_state_ = SCAVENGE;
1317 // Implements Cheney's copying algorithm
1318 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1320 // Clear descriptor cache.
1321 isolate_->descriptor_lookup_cache()->Clear();
1323 // Used for updating survived_since_last_expansion_ at function end.
1324 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1326 CheckNewSpaceExpansionCriteria();
1328 SelectScavengingVisitorsTable();
1330 incremental_marking()->PrepareForScavenge();
1332 paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1333 paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1335 // Flip the semispaces. After flipping, to space is empty, from space has
1338 new_space_.ResetAllocationInfo();
1340 // We need to sweep newly copied objects which can be either in the
1341 // to space or promoted to the old generation. For to-space
1342 // objects, we treat the bottom of the to space as a queue. Newly
1343 // copied and unswept objects lie between a 'front' mark and the
1344 // allocation pointer.
1346 // Promoted objects can go into various old-generation spaces, and
1347 // can be allocated internally in the spaces (from the free list).
1348 // We treat the top of the to space as a queue of addresses of
1349 // promoted objects. The addresses of newly promoted and unswept
1350 // objects lie between a 'front' mark and a 'rear' mark that is
1351 // updated as a side effect of promoting an object.
1353 // There is guaranteed to be enough room at the top of the to space
1354 // for the addresses of promoted objects: every object promoted
1355 // frees up its size in bytes from the top of the new space, and
1356 // objects are at least one pointer in size.
1357 Address new_space_front = new_space_.ToSpaceStart();
1358 promotion_queue_.Initialize();
1361 store_buffer()->Clean();
1364 ScavengeVisitor scavenge_visitor(this);
1366 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1368 // Copy objects reachable from the old generation.
1370 StoreBufferRebuildScope scope(this,
1372 &ScavengeStoreBufferCallback);
1373 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1376 // Copy objects reachable from simple cells by scavenging cell values
1378 HeapObjectIterator cell_iterator(cell_space_);
1379 for (HeapObject* heap_object = cell_iterator.Next();
1380 heap_object != NULL;
1381 heap_object = cell_iterator.Next()) {
1382 if (heap_object->IsCell()) {
1383 Cell* cell = Cell::cast(heap_object);
1384 Address value_address = cell->ValueAddress();
1385 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1389 // Copy objects reachable from global property cells by scavenging global
1390 // property cell values directly.
1391 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1392 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1393 heap_object != NULL;
1394 heap_object = js_global_property_cell_iterator.Next()) {
1395 if (heap_object->IsPropertyCell()) {
1396 PropertyCell* cell = PropertyCell::cast(heap_object);
1397 Address value_address = cell->ValueAddress();
1398 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1399 Address type_address = cell->TypeAddress();
1400 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1404 // Copy objects reachable from the code flushing candidates list.
1405 MarkCompactCollector* collector = mark_compact_collector();
1406 if (collector->is_code_flushing_enabled()) {
1407 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1410 // Scavenge object reachable from the native contexts list directly.
1411 scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1413 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1415 while (isolate()->global_handles()->IterateObjectGroups(
1416 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1417 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1419 isolate()->global_handles()->RemoveObjectGroups();
1420 isolate()->global_handles()->RemoveImplicitRefGroups();
1422 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1423 &IsUnscavengedHeapObject);
1424 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1426 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1428 UpdateNewSpaceReferencesInExternalStringTable(
1429 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1431 error_object_list_.UpdateReferencesInNewSpace(this);
1433 promotion_queue_.Destroy();
1435 if (!FLAG_watch_ic_patching) {
1436 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1438 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1440 ScavengeWeakObjectRetainer weak_object_retainer(this);
1441 ProcessWeakReferences(&weak_object_retainer);
1443 ASSERT(new_space_front == new_space_.top());
1446 new_space_.set_age_mark(new_space_.top());
1448 new_space_.LowerInlineAllocationLimit(
1449 new_space_.inline_allocation_limit_step());
1451 // Update how much has survived scavenge.
1452 IncrementYoungSurvivorsCounter(static_cast<int>(
1453 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1455 LOG(isolate_, ResourceEvent("scavenge", "end"));
1457 gc_state_ = NOT_IN_GC;
1459 scavenges_since_last_idle_round_++;
1463 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1465 MapWord first_word = HeapObject::cast(*p)->map_word();
1467 if (!first_word.IsForwardingAddress()) {
1468 // Unreachable external string can be finalized.
1469 heap->FinalizeExternalString(String::cast(*p));
1473 // String is still reachable.
1474 return String::cast(first_word.ToForwardingAddress());
1478 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1479 ExternalStringTableUpdaterCallback updater_func) {
1481 if (FLAG_verify_heap) {
1482 external_string_table_.Verify();
1486 if (external_string_table_.new_space_strings_.is_empty()) return;
1488 Object** start = &external_string_table_.new_space_strings_[0];
1489 Object** end = start + external_string_table_.new_space_strings_.length();
1490 Object** last = start;
1492 for (Object** p = start; p < end; ++p) {
1493 ASSERT(InFromSpace(*p));
1494 String* target = updater_func(this, p);
1496 if (target == NULL) continue;
1498 ASSERT(target->IsExternalString());
1500 if (InNewSpace(target)) {
1501 // String is still in new space. Update the table entry.
1505 // String got promoted. Move it to the old string list.
1506 external_string_table_.AddOldString(target);
1510 ASSERT(last <= end);
1511 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1515 void Heap::UpdateReferencesInExternalStringTable(
1516 ExternalStringTableUpdaterCallback updater_func) {
1518 // Update old space string references.
1519 if (external_string_table_.old_space_strings_.length() > 0) {
1520 Object** start = &external_string_table_.old_space_strings_[0];
1521 Object** end = start + external_string_table_.old_space_strings_.length();
1522 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1525 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1530 struct WeakListVisitor;
1534 static Object* VisitWeakList(Heap* heap,
1536 WeakObjectRetainer* retainer,
1537 bool record_slots) {
1538 Object* undefined = heap->undefined_value();
1539 Object* head = undefined;
1541 MarkCompactCollector* collector = heap->mark_compact_collector();
1542 while (list != undefined) {
1543 // Check whether to keep the candidate in the list.
1544 T* candidate = reinterpret_cast<T*>(list);
1545 Object* retained = retainer->RetainAs(list);
1546 if (retained != NULL) {
1547 if (head == undefined) {
1548 // First element in the list.
1551 // Subsequent elements in the list.
1552 ASSERT(tail != NULL);
1553 WeakListVisitor<T>::SetWeakNext(tail, retained);
1555 Object** next_slot =
1556 HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1557 collector->RecordSlot(next_slot, next_slot, retained);
1560 // Retained object is new tail.
1561 ASSERT(!retained->IsUndefined());
1562 candidate = reinterpret_cast<T*>(retained);
1566 // tail is a live object, visit it.
1567 WeakListVisitor<T>::VisitLiveObject(
1568 heap, tail, retainer, record_slots);
1570 WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1573 // Move to next element in the list.
1574 list = WeakListVisitor<T>::WeakNext(candidate);
1577 // Terminate the list if there is one or more elements.
1579 WeakListVisitor<T>::SetWeakNext(tail, undefined);
1586 struct WeakListVisitor<JSFunction> {
1587 static void SetWeakNext(JSFunction* function, Object* next) {
1588 function->set_next_function_link(next);
1591 static Object* WeakNext(JSFunction* function) {
1592 return function->next_function_link();
1595 static int WeakNextOffset() {
1596 return JSFunction::kNextFunctionLinkOffset;
1599 static void VisitLiveObject(Heap*, JSFunction*,
1600 WeakObjectRetainer*, bool) {
1603 static void VisitPhantomObject(Heap*, JSFunction*) {
1609 struct WeakListVisitor<Context> {
1610 static void SetWeakNext(Context* context, Object* next) {
1611 context->set(Context::NEXT_CONTEXT_LINK,
1613 UPDATE_WRITE_BARRIER);
1616 static Object* WeakNext(Context* context) {
1617 return context->get(Context::NEXT_CONTEXT_LINK);
1620 static void VisitLiveObject(Heap* heap,
1622 WeakObjectRetainer* retainer,
1623 bool record_slots) {
1624 // Process the weak list of optimized functions for the context.
1625 Object* function_list_head =
1626 VisitWeakList<JSFunction>(
1628 context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1631 context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1633 UPDATE_WRITE_BARRIER);
1635 Object** optimized_functions =
1636 HeapObject::RawField(
1637 context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1638 heap->mark_compact_collector()->RecordSlot(
1639 optimized_functions, optimized_functions, function_list_head);
1643 static void VisitPhantomObject(Heap*, Context*) {
1646 static int WeakNextOffset() {
1647 return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653 // We don't record weak slots during marking or scavenges.
1654 // Instead we do it once when we complete mark-compact cycle.
1655 // Note that write barrier has no effect if we are already in the middle of
1656 // compacting mark-sweep cycle and we have to record slots manually.
1658 gc_state() == MARK_COMPACT &&
1659 mark_compact_collector()->is_compacting();
1660 ProcessArrayBuffers(retainer, record_slots);
1661 ProcessNativeContexts(retainer, record_slots);
1662 ProcessAllocationSites(retainer, record_slots);
1665 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1666 bool record_slots) {
1668 VisitWeakList<Context>(
1669 this, native_contexts_list(), retainer, record_slots);
1670 // Update the head of the list of contexts.
1671 native_contexts_list_ = head;
1676 struct WeakListVisitor<JSArrayBufferView> {
1677 static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1678 obj->set_weak_next(next);
1681 static Object* WeakNext(JSArrayBufferView* obj) {
1682 return obj->weak_next();
1685 static void VisitLiveObject(Heap*,
1686 JSArrayBufferView* obj,
1687 WeakObjectRetainer* retainer,
1688 bool record_slots) {}
1690 static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1692 static int WeakNextOffset() {
1693 return JSArrayBufferView::kWeakNextOffset;
1699 struct WeakListVisitor<JSArrayBuffer> {
1700 static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1701 obj->set_weak_next(next);
1704 static Object* WeakNext(JSArrayBuffer* obj) {
1705 return obj->weak_next();
1708 static void VisitLiveObject(Heap* heap,
1709 JSArrayBuffer* array_buffer,
1710 WeakObjectRetainer* retainer,
1711 bool record_slots) {
1712 Object* typed_array_obj =
1713 VisitWeakList<JSArrayBufferView>(
1715 array_buffer->weak_first_view(),
1716 retainer, record_slots);
1717 array_buffer->set_weak_first_view(typed_array_obj);
1718 if (typed_array_obj != heap->undefined_value() && record_slots) {
1719 Object** slot = HeapObject::RawField(
1720 array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1721 heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1725 static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1726 Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1729 static int WeakNextOffset() {
1730 return JSArrayBuffer::kWeakNextOffset;
1735 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1736 bool record_slots) {
1737 Object* array_buffer_obj =
1738 VisitWeakList<JSArrayBuffer>(this,
1739 array_buffers_list(),
1740 retainer, record_slots);
1741 set_array_buffers_list(array_buffer_obj);
1745 void Heap::TearDownArrayBuffers() {
1746 Object* undefined = undefined_value();
1747 for (Object* o = array_buffers_list(); o != undefined;) {
1748 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1749 Runtime::FreeArrayBuffer(isolate(), buffer);
1750 o = buffer->weak_next();
1752 array_buffers_list_ = undefined;
1757 struct WeakListVisitor<AllocationSite> {
1758 static void SetWeakNext(AllocationSite* obj, Object* next) {
1759 obj->set_weak_next(next);
1762 static Object* WeakNext(AllocationSite* obj) {
1763 return obj->weak_next();
1766 static void VisitLiveObject(Heap* heap,
1767 AllocationSite* array_buffer,
1768 WeakObjectRetainer* retainer,
1769 bool record_slots) {}
1771 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1773 static int WeakNextOffset() {
1774 return AllocationSite::kWeakNextOffset;
1779 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1780 bool record_slots) {
1781 Object* allocation_site_obj =
1782 VisitWeakList<AllocationSite>(this,
1783 allocation_sites_list(),
1784 retainer, record_slots);
1785 set_allocation_sites_list(allocation_site_obj);
1789 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1790 DisallowHeapAllocation no_allocation;
1792 // Both the external string table and the string table may contain
1793 // external strings, but neither lists them exhaustively, nor is the
1794 // intersection set empty. Therefore we iterate over the external string
1795 // table first, ignoring internalized strings, and then over the
1796 // internalized string table.
1798 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1800 explicit ExternalStringTableVisitorAdapter(
1801 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1802 virtual void VisitPointers(Object** start, Object** end) {
1803 for (Object** p = start; p < end; p++) {
1804 // Visit non-internalized external strings,
1805 // since internalized strings are listed in the string table.
1806 if (!(*p)->IsInternalizedString()) {
1807 ASSERT((*p)->IsExternalString());
1808 visitor_->VisitExternalString(Utils::ToLocal(
1809 Handle<String>(String::cast(*p))));
1814 v8::ExternalResourceVisitor* visitor_;
1815 } external_string_table_visitor(visitor);
1817 external_string_table_.Iterate(&external_string_table_visitor);
1819 class StringTableVisitorAdapter : public ObjectVisitor {
1821 explicit StringTableVisitorAdapter(
1822 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1823 virtual void VisitPointers(Object** start, Object** end) {
1824 for (Object** p = start; p < end; p++) {
1825 if ((*p)->IsExternalString()) {
1826 ASSERT((*p)->IsInternalizedString());
1827 visitor_->VisitExternalString(Utils::ToLocal(
1828 Handle<String>(String::cast(*p))));
1833 v8::ExternalResourceVisitor* visitor_;
1834 } string_table_visitor(visitor);
1836 string_table()->IterateElements(&string_table_visitor);
1840 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1842 static inline void VisitPointer(Heap* heap, Object** p) {
1843 Object* object = *p;
1844 if (!heap->InNewSpace(object)) return;
1845 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1846 reinterpret_cast<HeapObject*>(object));
1851 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1852 Address new_space_front) {
1854 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1855 // The addresses new_space_front and new_space_.top() define a
1856 // queue of unprocessed copied objects. Process them until the
1858 while (new_space_front != new_space_.top()) {
1859 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1860 HeapObject* object = HeapObject::FromAddress(new_space_front);
1862 NewSpaceScavenger::IterateBody(object->map(), object);
1865 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1869 // Promote and process all the to-be-promoted objects.
1871 StoreBufferRebuildScope scope(this,
1873 &ScavengeStoreBufferCallback);
1874 while (!promotion_queue()->is_empty()) {
1877 promotion_queue()->remove(&target, &size);
1879 // Promoted object might be already partially visited
1880 // during old space pointer iteration. Thus we search specificly
1881 // for pointers to from semispace instead of looking for pointers
1883 ASSERT(!target->IsMap());
1884 IterateAndMarkPointersToFromSpace(target->address(),
1885 target->address() + size,
1890 // Take another spin if there are now unswept objects in new space
1891 // (there are currently no more unswept promoted objects).
1892 } while (new_space_front != new_space_.top());
1894 return new_space_front;
1898 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1901 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1905 static HeapObject* EnsureDoubleAligned(Heap* heap,
1908 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1909 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1910 return HeapObject::FromAddress(object->address() + kPointerSize);
1912 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1919 enum LoggingAndProfiling {
1920 LOGGING_AND_PROFILING_ENABLED,
1921 LOGGING_AND_PROFILING_DISABLED
1925 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1928 template<MarksHandling marks_handling,
1929 LoggingAndProfiling logging_and_profiling_mode>
1930 class ScavengingVisitor : public StaticVisitorBase {
1932 static void Initialize() {
1933 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1934 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1935 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1936 table_.Register(kVisitByteArray, &EvacuateByteArray);
1937 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1938 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1940 table_.Register(kVisitNativeContext,
1941 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1942 template VisitSpecialized<Context::kSize>);
1944 table_.Register(kVisitConsString,
1945 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1946 template VisitSpecialized<ConsString::kSize>);
1948 table_.Register(kVisitSlicedString,
1949 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950 template VisitSpecialized<SlicedString::kSize>);
1952 table_.Register(kVisitSymbol,
1953 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954 template VisitSpecialized<Symbol::kSize>);
1956 table_.Register(kVisitSharedFunctionInfo,
1957 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958 template VisitSpecialized<SharedFunctionInfo::kSize>);
1960 table_.Register(kVisitJSWeakMap,
1961 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1964 table_.Register(kVisitJSWeakSet,
1965 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1968 table_.Register(kVisitJSArrayBuffer,
1969 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1972 table_.Register(kVisitJSTypedArray,
1973 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1976 table_.Register(kVisitJSDataView,
1977 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1980 table_.Register(kVisitJSRegExp,
1981 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1984 if (marks_handling == IGNORE_MARKS) {
1985 table_.Register(kVisitJSFunction,
1986 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1987 template VisitSpecialized<JSFunction::kSize>);
1989 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1992 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1994 kVisitDataObjectGeneric>();
1996 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1998 kVisitJSObjectGeneric>();
2000 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2002 kVisitStructGeneric>();
2005 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2010 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2011 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
2013 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2014 bool should_record = false;
2016 should_record = FLAG_heap_stats;
2018 should_record = should_record || FLAG_log_gc;
2019 if (should_record) {
2020 if (heap->new_space()->Contains(obj)) {
2021 heap->new_space()->RecordAllocation(obj);
2023 heap->new_space()->RecordPromotion(obj);
2028 // Helper function used by CopyObject to copy a source object to an
2029 // allocated target object and update the forwarding pointer in the source
2030 // object. Returns the target object.
2031 INLINE(static void MigrateObject(Heap* heap,
2035 // Copy the content of source to target.
2036 heap->CopyBlock(target->address(), source->address(), size);
2038 // Set the forwarding address.
2039 source->set_map_word(MapWord::FromForwardingAddress(target));
2041 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2042 // Update NewSpace stats if necessary.
2043 RecordCopiedObject(heap, target);
2044 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2045 Isolate* isolate = heap->isolate();
2046 if (isolate->logger()->is_logging_code_events() ||
2047 isolate->cpu_profiler()->is_profiling()) {
2048 if (target->IsSharedFunctionInfo()) {
2049 PROFILE(isolate, SharedFunctionInfoMoveEvent(
2050 source->address(), target->address()));
2055 if (marks_handling == TRANSFER_MARKS) {
2056 if (Marking::TransferColor(source, target)) {
2057 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2063 template<ObjectContents object_contents,
2064 SizeRestriction size_restriction,
2066 static inline void EvacuateObject(Map* map,
2070 SLOW_ASSERT((size_restriction != SMALL) ||
2071 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2072 SLOW_ASSERT(object->Size() == object_size);
2074 int allocation_size = object_size;
2075 if (alignment != kObjectAlignment) {
2076 ASSERT(alignment == kDoubleAlignment);
2077 allocation_size += kPointerSize;
2080 Heap* heap = map->GetHeap();
2081 if (heap->ShouldBePromoted(object->address(), object_size)) {
2082 MaybeObject* maybe_result;
2084 if ((size_restriction != SMALL) &&
2085 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2086 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2089 if (object_contents == DATA_OBJECT) {
2090 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2093 heap->old_pointer_space()->AllocateRaw(allocation_size);
2097 Object* result = NULL; // Initialization to please compiler.
2098 if (maybe_result->ToObject(&result)) {
2099 HeapObject* target = HeapObject::cast(result);
2101 if (alignment != kObjectAlignment) {
2102 target = EnsureDoubleAligned(heap, target, allocation_size);
2105 // Order is important: slot might be inside of the target if target
2106 // was allocated over a dead object and slot comes from the store
2109 MigrateObject(heap, object, target, object_size);
2111 if (object_contents == POINTER_OBJECT) {
2112 if (map->instance_type() == JS_FUNCTION_TYPE) {
2113 heap->promotion_queue()->insert(
2114 target, JSFunction::kNonWeakFieldsEndOffset);
2116 heap->promotion_queue()->insert(target, object_size);
2120 heap->tracer()->increment_promoted_objects_size(object_size);
2124 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2125 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2126 Object* result = allocation->ToObjectUnchecked();
2127 HeapObject* target = HeapObject::cast(result);
2129 if (alignment != kObjectAlignment) {
2130 target = EnsureDoubleAligned(heap, target, allocation_size);
2133 // Order is important: slot might be inside of the target if target
2134 // was allocated over a dead object and slot comes from the store
2137 MigrateObject(heap, object, target, object_size);
2142 static inline void EvacuateJSFunction(Map* map,
2144 HeapObject* object) {
2145 ObjectEvacuationStrategy<POINTER_OBJECT>::
2146 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2148 HeapObject* target = *slot;
2149 MarkBit mark_bit = Marking::MarkBitFrom(target);
2150 if (Marking::IsBlack(mark_bit)) {
2151 // This object is black and it might not be rescanned by marker.
2152 // We should explicitly record code entry slot for compaction because
2153 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2154 // miss it as it is not HeapObject-tagged.
2155 Address code_entry_slot =
2156 target->address() + JSFunction::kCodeEntryOffset;
2157 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2158 map->GetHeap()->mark_compact_collector()->
2159 RecordCodeEntrySlot(code_entry_slot, code);
2164 static inline void EvacuateFixedArray(Map* map,
2166 HeapObject* object) {
2167 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2168 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2175 static inline void EvacuateFixedDoubleArray(Map* map,
2177 HeapObject* object) {
2178 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2179 int object_size = FixedDoubleArray::SizeFor(length);
2180 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2188 static inline void EvacuateByteArray(Map* map,
2190 HeapObject* object) {
2191 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2192 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2193 map, slot, object, object_size);
2197 static inline void EvacuateSeqOneByteString(Map* map,
2199 HeapObject* object) {
2200 int object_size = SeqOneByteString::cast(object)->
2201 SeqOneByteStringSize(map->instance_type());
2202 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2203 map, slot, object, object_size);
2207 static inline void EvacuateSeqTwoByteString(Map* map,
2209 HeapObject* object) {
2210 int object_size = SeqTwoByteString::cast(object)->
2211 SeqTwoByteStringSize(map->instance_type());
2212 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2213 map, slot, object, object_size);
2217 static inline bool IsShortcutCandidate(int type) {
2218 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2221 static inline void EvacuateShortcutCandidate(Map* map,
2223 HeapObject* object) {
2224 ASSERT(IsShortcutCandidate(map->instance_type()));
2226 Heap* heap = map->GetHeap();
2228 if (marks_handling == IGNORE_MARKS &&
2229 ConsString::cast(object)->unchecked_second() ==
2230 heap->empty_string()) {
2232 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2236 if (!heap->InNewSpace(first)) {
2237 object->set_map_word(MapWord::FromForwardingAddress(first));
2241 MapWord first_word = first->map_word();
2242 if (first_word.IsForwardingAddress()) {
2243 HeapObject* target = first_word.ToForwardingAddress();
2246 object->set_map_word(MapWord::FromForwardingAddress(target));
2250 heap->DoScavengeObject(first->map(), slot, first);
2251 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2255 int object_size = ConsString::kSize;
2256 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2257 map, slot, object, object_size);
2260 template<ObjectContents object_contents>
2261 class ObjectEvacuationStrategy {
2263 template<int object_size>
2264 static inline void VisitSpecialized(Map* map,
2266 HeapObject* object) {
2267 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2268 map, slot, object, object_size);
2271 static inline void Visit(Map* map,
2273 HeapObject* object) {
2274 int object_size = map->instance_size();
2275 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2276 map, slot, object, object_size);
2280 static VisitorDispatchTable<ScavengingCallback> table_;
2284 template<MarksHandling marks_handling,
2285 LoggingAndProfiling logging_and_profiling_mode>
2286 VisitorDispatchTable<ScavengingCallback>
2287 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2290 static void InitializeScavengingVisitorsTables() {
2291 ScavengingVisitor<TRANSFER_MARKS,
2292 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2293 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2294 ScavengingVisitor<TRANSFER_MARKS,
2295 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2296 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2300 void Heap::SelectScavengingVisitorsTable() {
2301 bool logging_and_profiling =
2302 isolate()->logger()->is_logging() ||
2303 isolate()->cpu_profiler()->is_profiling() ||
2304 (isolate()->heap_profiler() != NULL &&
2305 isolate()->heap_profiler()->is_profiling());
2307 if (!incremental_marking()->IsMarking()) {
2308 if (!logging_and_profiling) {
2309 scavenging_visitors_table_.CopyFrom(
2310 ScavengingVisitor<IGNORE_MARKS,
2311 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2313 scavenging_visitors_table_.CopyFrom(
2314 ScavengingVisitor<IGNORE_MARKS,
2315 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2318 if (!logging_and_profiling) {
2319 scavenging_visitors_table_.CopyFrom(
2320 ScavengingVisitor<TRANSFER_MARKS,
2321 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2323 scavenging_visitors_table_.CopyFrom(
2324 ScavengingVisitor<TRANSFER_MARKS,
2325 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2328 if (incremental_marking()->IsCompacting()) {
2329 // When compacting forbid short-circuiting of cons-strings.
2330 // Scavenging code relies on the fact that new space object
2331 // can't be evacuated into evacuation candidate but
2332 // short-circuiting violates this assumption.
2333 scavenging_visitors_table_.Register(
2334 StaticVisitorBase::kVisitShortcutCandidate,
2335 scavenging_visitors_table_.GetVisitorById(
2336 StaticVisitorBase::kVisitConsString));
2342 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2343 SLOW_ASSERT(HEAP->InFromSpace(object));
2344 MapWord first_word = object->map_word();
2345 SLOW_ASSERT(!first_word.IsForwardingAddress());
2346 Map* map = first_word.ToMap();
2347 map->GetHeap()->DoScavengeObject(map, p, object);
2351 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2352 int instance_size) {
2354 MaybeObject* maybe_result = AllocateRawMap();
2355 if (!maybe_result->ToObject(&result)) return maybe_result;
2357 // Map::cast cannot be used due to uninitialized map field.
2358 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2359 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2360 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2361 reinterpret_cast<Map*>(result)->set_visitor_id(
2362 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2363 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2364 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2365 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2366 reinterpret_cast<Map*>(result)->set_bit_field(0);
2367 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2368 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2369 Map::OwnsDescriptors::encode(true);
2370 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2375 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2377 ElementsKind elements_kind) {
2379 MaybeObject* maybe_result = AllocateRawMap();
2380 if (!maybe_result->To(&result)) return maybe_result;
2382 Map* map = reinterpret_cast<Map*>(result);
2383 map->set_map_no_write_barrier(meta_map());
2384 map->set_instance_type(instance_type);
2385 map->set_visitor_id(
2386 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2387 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2388 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2389 map->set_instance_size(instance_size);
2390 map->set_inobject_properties(0);
2391 map->set_pre_allocated_property_fields(0);
2392 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2393 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2394 SKIP_WRITE_BARRIER);
2395 map->init_back_pointer(undefined_value());
2396 map->set_unused_property_fields(0);
2397 map->set_instance_descriptors(empty_descriptor_array());
2398 map->set_bit_field(0);
2399 map->set_bit_field2(1 << Map::kIsExtensible);
2400 int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2401 Map::OwnsDescriptors::encode(true);
2402 map->set_bit_field3(bit_field3);
2403 map->set_elements_kind(elements_kind);
2409 MaybeObject* Heap::AllocateCodeCache() {
2410 CodeCache* code_cache;
2411 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2412 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2414 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2415 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2420 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2421 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2425 MaybeObject* Heap::AllocateAccessorPair() {
2426 AccessorPair* accessors;
2427 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2428 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2430 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2431 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2436 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2437 TypeFeedbackInfo* info;
2438 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2439 if (!maybe_info->To(&info)) return maybe_info;
2441 info->initialize_storage();
2442 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2443 SKIP_WRITE_BARRIER);
2448 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2449 AliasedArgumentsEntry* entry;
2450 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2451 if (!maybe_entry->To(&entry)) return maybe_entry;
2453 entry->set_aliased_context_slot(aliased_context_slot);
2458 const Heap::StringTypeTable Heap::string_type_table[] = {
2459 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2460 {type, size, k##camel_name##MapRootIndex},
2461 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2462 #undef STRING_TYPE_ELEMENT
2466 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2467 #define CONSTANT_STRING_ELEMENT(name, contents) \
2468 {contents, k##name##RootIndex},
2469 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2470 #undef CONSTANT_STRING_ELEMENT
2474 const Heap::StructTable Heap::struct_table[] = {
2475 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2476 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2477 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2478 #undef STRUCT_TABLE_ELEMENT
2482 bool Heap::CreateInitialMaps() {
2484 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2485 if (!maybe_obj->ToObject(&obj)) return false;
2487 // Map::cast cannot be used due to uninitialized map field.
2488 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2489 set_meta_map(new_meta_map);
2490 new_meta_map->set_map(new_meta_map);
2492 { MaybeObject* maybe_obj =
2493 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2494 if (!maybe_obj->ToObject(&obj)) return false;
2496 set_fixed_array_map(Map::cast(obj));
2498 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2499 if (!maybe_obj->ToObject(&obj)) return false;
2501 set_oddball_map(Map::cast(obj));
2503 // Allocate the empty array.
2504 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2505 if (!maybe_obj->ToObject(&obj)) return false;
2507 set_empty_fixed_array(FixedArray::cast(obj));
2509 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2510 if (!maybe_obj->ToObject(&obj)) return false;
2512 set_null_value(Oddball::cast(obj));
2513 Oddball::cast(obj)->set_kind(Oddball::kNull);
2515 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2516 if (!maybe_obj->ToObject(&obj)) return false;
2518 set_undefined_value(Oddball::cast(obj));
2519 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2520 ASSERT(!InNewSpace(undefined_value()));
2522 // Allocate the empty descriptor array.
2523 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2524 if (!maybe_obj->ToObject(&obj)) return false;
2526 set_empty_descriptor_array(DescriptorArray::cast(obj));
2528 // Fix the instance_descriptors for the existing maps.
2529 meta_map()->set_code_cache(empty_fixed_array());
2530 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2531 meta_map()->init_back_pointer(undefined_value());
2532 meta_map()->set_instance_descriptors(empty_descriptor_array());
2534 fixed_array_map()->set_code_cache(empty_fixed_array());
2535 fixed_array_map()->set_dependent_code(
2536 DependentCode::cast(empty_fixed_array()));
2537 fixed_array_map()->init_back_pointer(undefined_value());
2538 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2540 oddball_map()->set_code_cache(empty_fixed_array());
2541 oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2542 oddball_map()->init_back_pointer(undefined_value());
2543 oddball_map()->set_instance_descriptors(empty_descriptor_array());
2545 // Fix prototype object for existing maps.
2546 meta_map()->set_prototype(null_value());
2547 meta_map()->set_constructor(null_value());
2549 fixed_array_map()->set_prototype(null_value());
2550 fixed_array_map()->set_constructor(null_value());
2552 oddball_map()->set_prototype(null_value());
2553 oddball_map()->set_constructor(null_value());
2555 { MaybeObject* maybe_obj =
2556 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557 if (!maybe_obj->ToObject(&obj)) return false;
2559 set_fixed_cow_array_map(Map::cast(obj));
2560 ASSERT(fixed_array_map() != fixed_cow_array_map());
2562 { MaybeObject* maybe_obj =
2563 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2564 if (!maybe_obj->ToObject(&obj)) return false;
2566 set_scope_info_map(Map::cast(obj));
2568 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2569 if (!maybe_obj->ToObject(&obj)) return false;
2571 set_heap_number_map(Map::cast(obj));
2573 { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2574 if (!maybe_obj->ToObject(&obj)) return false;
2576 set_symbol_map(Map::cast(obj));
2578 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2579 if (!maybe_obj->ToObject(&obj)) return false;
2581 set_foreign_map(Map::cast(obj));
2583 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2584 const StringTypeTable& entry = string_type_table[i];
2585 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2586 if (!maybe_obj->ToObject(&obj)) return false;
2588 roots_[entry.index] = Map::cast(obj);
2591 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2592 if (!maybe_obj->ToObject(&obj)) return false;
2594 set_undetectable_string_map(Map::cast(obj));
2595 Map::cast(obj)->set_is_undetectable();
2597 { MaybeObject* maybe_obj =
2598 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2599 if (!maybe_obj->ToObject(&obj)) return false;
2601 set_undetectable_ascii_string_map(Map::cast(obj));
2602 Map::cast(obj)->set_is_undetectable();
2604 { MaybeObject* maybe_obj =
2605 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2606 if (!maybe_obj->ToObject(&obj)) return false;
2608 set_fixed_double_array_map(Map::cast(obj));
2610 { MaybeObject* maybe_obj =
2611 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2612 if (!maybe_obj->ToObject(&obj)) return false;
2614 set_byte_array_map(Map::cast(obj));
2616 { MaybeObject* maybe_obj =
2617 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2618 if (!maybe_obj->ToObject(&obj)) return false;
2620 set_free_space_map(Map::cast(obj));
2622 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2623 if (!maybe_obj->ToObject(&obj)) return false;
2625 set_empty_byte_array(ByteArray::cast(obj));
2627 { MaybeObject* maybe_obj =
2628 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2629 if (!maybe_obj->ToObject(&obj)) return false;
2631 set_external_pixel_array_map(Map::cast(obj));
2633 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2634 ExternalArray::kAlignedSize);
2635 if (!maybe_obj->ToObject(&obj)) return false;
2637 set_external_byte_array_map(Map::cast(obj));
2639 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2640 ExternalArray::kAlignedSize);
2641 if (!maybe_obj->ToObject(&obj)) return false;
2643 set_external_unsigned_byte_array_map(Map::cast(obj));
2645 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2646 ExternalArray::kAlignedSize);
2647 if (!maybe_obj->ToObject(&obj)) return false;
2649 set_external_short_array_map(Map::cast(obj));
2651 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2652 ExternalArray::kAlignedSize);
2653 if (!maybe_obj->ToObject(&obj)) return false;
2655 set_external_unsigned_short_array_map(Map::cast(obj));
2657 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2658 ExternalArray::kAlignedSize);
2659 if (!maybe_obj->ToObject(&obj)) return false;
2661 set_external_int_array_map(Map::cast(obj));
2663 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2664 ExternalArray::kAlignedSize);
2665 if (!maybe_obj->ToObject(&obj)) return false;
2667 set_external_unsigned_int_array_map(Map::cast(obj));
2669 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2670 ExternalArray::kAlignedSize);
2671 if (!maybe_obj->ToObject(&obj)) return false;
2673 set_external_float_array_map(Map::cast(obj));
2675 { MaybeObject* maybe_obj =
2676 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2677 if (!maybe_obj->ToObject(&obj)) return false;
2679 set_non_strict_arguments_elements_map(Map::cast(obj));
2681 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2682 ExternalArray::kAlignedSize);
2683 if (!maybe_obj->ToObject(&obj)) return false;
2685 set_external_double_array_map(Map::cast(obj));
2687 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2688 if (!maybe_obj->ToObject(&obj)) return false;
2690 set_empty_external_byte_array(ExternalArray::cast(obj));
2692 { MaybeObject* maybe_obj =
2693 AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2694 if (!maybe_obj->ToObject(&obj)) return false;
2696 set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2698 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2699 if (!maybe_obj->ToObject(&obj)) return false;
2701 set_empty_external_short_array(ExternalArray::cast(obj));
2703 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2704 kExternalUnsignedShortArray);
2705 if (!maybe_obj->ToObject(&obj)) return false;
2707 set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2709 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2710 if (!maybe_obj->ToObject(&obj)) return false;
2712 set_empty_external_int_array(ExternalArray::cast(obj));
2714 { MaybeObject* maybe_obj =
2715 AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2716 if (!maybe_obj->ToObject(&obj)) return false;
2718 set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2720 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2721 if (!maybe_obj->ToObject(&obj)) return false;
2723 set_empty_external_float_array(ExternalArray::cast(obj));
2725 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2726 if (!maybe_obj->ToObject(&obj)) return false;
2728 set_empty_external_double_array(ExternalArray::cast(obj));
2730 { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2731 if (!maybe_obj->ToObject(&obj)) return false;
2733 set_empty_external_pixel_array(ExternalArray::cast(obj));
2735 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2736 if (!maybe_obj->ToObject(&obj)) return false;
2738 set_code_map(Map::cast(obj));
2740 { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2741 if (!maybe_obj->ToObject(&obj)) return false;
2743 set_cell_map(Map::cast(obj));
2745 { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2746 PropertyCell::kSize);
2747 if (!maybe_obj->ToObject(&obj)) return false;
2749 set_global_property_cell_map(Map::cast(obj));
2751 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2752 if (!maybe_obj->ToObject(&obj)) return false;
2754 set_one_pointer_filler_map(Map::cast(obj));
2756 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2757 if (!maybe_obj->ToObject(&obj)) return false;
2759 set_two_pointer_filler_map(Map::cast(obj));
2761 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2762 const StructTable& entry = struct_table[i];
2763 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2764 if (!maybe_obj->ToObject(&obj)) return false;
2766 roots_[entry.index] = Map::cast(obj);
2769 { MaybeObject* maybe_obj =
2770 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2771 if (!maybe_obj->ToObject(&obj)) return false;
2773 set_hash_table_map(Map::cast(obj));
2775 { MaybeObject* maybe_obj =
2776 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2777 if (!maybe_obj->ToObject(&obj)) return false;
2779 set_function_context_map(Map::cast(obj));
2781 { MaybeObject* maybe_obj =
2782 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2783 if (!maybe_obj->ToObject(&obj)) return false;
2785 set_catch_context_map(Map::cast(obj));
2787 { MaybeObject* maybe_obj =
2788 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2789 if (!maybe_obj->ToObject(&obj)) return false;
2791 set_with_context_map(Map::cast(obj));
2793 { MaybeObject* maybe_obj =
2794 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2795 if (!maybe_obj->ToObject(&obj)) return false;
2797 set_block_context_map(Map::cast(obj));
2799 { MaybeObject* maybe_obj =
2800 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2801 if (!maybe_obj->ToObject(&obj)) return false;
2803 set_module_context_map(Map::cast(obj));
2805 { MaybeObject* maybe_obj =
2806 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2807 if (!maybe_obj->ToObject(&obj)) return false;
2809 set_global_context_map(Map::cast(obj));
2811 { MaybeObject* maybe_obj =
2812 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2813 if (!maybe_obj->ToObject(&obj)) return false;
2815 Map* native_context_map = Map::cast(obj);
2816 native_context_map->set_dictionary_map(true);
2817 native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2818 set_native_context_map(native_context_map);
2820 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2821 SharedFunctionInfo::kAlignedSize);
2822 if (!maybe_obj->ToObject(&obj)) return false;
2824 set_shared_function_info_map(Map::cast(obj));
2826 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2827 JSMessageObject::kSize);
2828 if (!maybe_obj->ToObject(&obj)) return false;
2830 set_message_object_map(Map::cast(obj));
2833 { MaybeObject* maybe_obj =
2834 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2835 if (!maybe_obj->To(&external_map)) return false;
2837 external_map->set_is_extensible(false);
2838 set_external_map(external_map);
2840 ASSERT(!InNewSpace(empty_fixed_array()));
2845 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2846 // Statically ensure that it is safe to allocate heap numbers in paged
2848 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2849 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2852 { MaybeObject* maybe_result =
2853 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2854 if (!maybe_result->ToObject(&result)) return maybe_result;
2857 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2858 HeapNumber::cast(result)->set_value(value);
2863 MaybeObject* Heap::AllocateHeapNumber(double value) {
2864 // Use general version, if we're forced to always allocate.
2865 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2867 // This version of AllocateHeapNumber is optimized for
2868 // allocation in new space.
2869 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2871 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2872 if (!maybe_result->ToObject(&result)) return maybe_result;
2874 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2875 HeapNumber::cast(result)->set_value(value);
2880 MaybeObject* Heap::AllocateCell(Object* value) {
2882 { MaybeObject* maybe_result = AllocateRawCell();
2883 if (!maybe_result->ToObject(&result)) return maybe_result;
2885 HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2886 Cell::cast(result)->set_value(value);
2891 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2893 MaybeObject* maybe_result = AllocateRawPropertyCell();
2894 if (!maybe_result->ToObject(&result)) return maybe_result;
2896 HeapObject::cast(result)->set_map_no_write_barrier(
2897 global_property_cell_map());
2898 PropertyCell* cell = PropertyCell::cast(result);
2899 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2900 SKIP_WRITE_BARRIER);
2901 cell->set_value(value);
2902 cell->set_type(Type::None());
2903 maybe_result = cell->SetValueInferType(value);
2904 if (maybe_result->IsFailure()) return maybe_result;
2909 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2911 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2912 if (!maybe_result->To(&result)) return maybe_result;
2913 result->set_value(value);
2918 MaybeObject* Heap::AllocateAllocationSite() {
2920 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2922 if (!maybe_result->ToObject(&result)) return maybe_result;
2923 AllocationSite* site = AllocationSite::cast(result);
2927 site->set_weak_next(allocation_sites_list());
2928 set_allocation_sites_list(site);
2933 MaybeObject* Heap::CreateOddball(const char* to_string,
2937 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2938 if (!maybe_result->ToObject(&result)) return maybe_result;
2940 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2944 bool Heap::CreateApiObjects() {
2947 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2948 if (!maybe_obj->ToObject(&obj)) return false;
2950 // Don't use Smi-only elements optimizations for objects with the neander
2951 // map. There are too many cases where element values are set directly with a
2952 // bottleneck to trap the Smi-only -> fast elements transition, and there
2953 // appears to be no benefit for optimize this case.
2954 Map* new_neander_map = Map::cast(obj);
2955 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2956 set_neander_map(new_neander_map);
2958 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2959 if (!maybe_obj->ToObject(&obj)) return false;
2962 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2963 if (!maybe_elements->ToObject(&elements)) return false;
2965 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2966 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2967 set_message_listeners(JSObject::cast(obj));
2973 void Heap::CreateJSEntryStub() {
2975 set_js_entry_code(*stub.GetCode(isolate()));
2979 void Heap::CreateJSConstructEntryStub() {
2980 JSConstructEntryStub stub;
2981 set_js_construct_entry_code(*stub.GetCode(isolate()));
2985 void Heap::CreateFixedStubs() {
2986 // Here we create roots for fixed stubs. They are needed at GC
2987 // for cooking and uncooking (check out frames.cc).
2988 // The eliminates the need for doing dictionary lookup in the
2989 // stub cache for these stubs.
2990 HandleScope scope(isolate());
2991 // gcc-4.4 has problem generating correct code of following snippet:
2992 // { JSEntryStub stub;
2993 // js_entry_code_ = *stub.GetCode();
2995 // { JSConstructEntryStub stub;
2996 // js_construct_entry_code_ = *stub.GetCode();
2998 // To workaround the problem, make separate functions without inlining.
2999 Heap::CreateJSEntryStub();
3000 Heap::CreateJSConstructEntryStub();
3002 // Create stubs that should be there, so we don't unexpectedly have to
3003 // create them if we need them during the creation of another stub.
3004 // Stub creation mixes raw pointers and handles in an unsafe manner so
3005 // we cannot create stubs while we are creating stubs.
3006 CodeStub::GenerateStubsAheadOfTime(isolate());
3010 bool Heap::CreateInitialObjects() {
3013 // The -0 value must be set before NumberFromDouble works.
3014 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3015 if (!maybe_obj->ToObject(&obj)) return false;
3017 set_minus_zero_value(HeapNumber::cast(obj));
3018 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3020 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3021 if (!maybe_obj->ToObject(&obj)) return false;
3023 set_nan_value(HeapNumber::cast(obj));
3025 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3026 if (!maybe_obj->ToObject(&obj)) return false;
3028 set_infinity_value(HeapNumber::cast(obj));
3030 // The hole has not been created yet, but we want to put something
3031 // predictable in the gaps in the string table, so lets make that Smi zero.
3032 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3034 // Allocate initial string table.
3035 { MaybeObject* maybe_obj =
3036 StringTable::Allocate(this, kInitialStringTableSize);
3037 if (!maybe_obj->ToObject(&obj)) return false;
3039 // Don't use set_string_table() due to asserts.
3040 roots_[kStringTableRootIndex] = obj;
3042 // Finish initializing oddballs after creating the string table.
3043 { MaybeObject* maybe_obj =
3044 undefined_value()->Initialize("undefined",
3046 Oddball::kUndefined);
3047 if (!maybe_obj->ToObject(&obj)) return false;
3050 // Initialize the null_value.
3051 { MaybeObject* maybe_obj =
3052 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3053 if (!maybe_obj->ToObject(&obj)) return false;
3056 { MaybeObject* maybe_obj = CreateOddball("true",
3059 if (!maybe_obj->ToObject(&obj)) return false;
3061 set_true_value(Oddball::cast(obj));
3063 { MaybeObject* maybe_obj = CreateOddball("false",
3066 if (!maybe_obj->ToObject(&obj)) return false;
3068 set_false_value(Oddball::cast(obj));
3070 { MaybeObject* maybe_obj = CreateOddball("hole",
3073 if (!maybe_obj->ToObject(&obj)) return false;
3075 set_the_hole_value(Oddball::cast(obj));
3077 { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3079 Oddball::kUninitialized);
3080 if (!maybe_obj->ToObject(&obj)) return false;
3082 set_uninitialized_value(Oddball::cast(obj));
3084 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3086 Oddball::kArgumentMarker);
3087 if (!maybe_obj->ToObject(&obj)) return false;
3089 set_arguments_marker(Oddball::cast(obj));
3091 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3094 if (!maybe_obj->ToObject(&obj)) return false;
3096 set_no_interceptor_result_sentinel(obj);
3098 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3101 if (!maybe_obj->ToObject(&obj)) return false;
3103 set_termination_exception(obj);
3105 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3106 { MaybeObject* maybe_obj =
3107 InternalizeUtf8String(constant_string_table[i].contents);
3108 if (!maybe_obj->ToObject(&obj)) return false;
3110 roots_[constant_string_table[i].index] = String::cast(obj);
3113 // Allocate the hidden string which is used to identify the hidden properties
3114 // in JSObjects. The hash code has a special value so that it will not match
3115 // the empty string when searching for the property. It cannot be part of the
3116 // loop above because it needs to be allocated manually with the special
3117 // hash code in place. The hash code for the hidden_string is zero to ensure
3118 // that it will always be at the first entry in property descriptors.
3119 { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3120 OneByteVector("", 0), String::kEmptyStringHash);
3121 if (!maybe_obj->ToObject(&obj)) return false;
3123 hidden_string_ = String::cast(obj);
3125 // Allocate the code_stubs dictionary. The initial size is set to avoid
3126 // expanding the dictionary during bootstrapping.
3127 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3128 if (!maybe_obj->ToObject(&obj)) return false;
3130 set_code_stubs(UnseededNumberDictionary::cast(obj));
3133 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3134 // is set to avoid expanding the dictionary during bootstrapping.
3135 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3136 if (!maybe_obj->ToObject(&obj)) return false;
3138 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3140 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3141 if (!maybe_obj->ToObject(&obj)) return false;
3143 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3145 set_instanceof_cache_function(Smi::FromInt(0));
3146 set_instanceof_cache_map(Smi::FromInt(0));
3147 set_instanceof_cache_answer(Smi::FromInt(0));
3151 // Allocate the dictionary of intrinsic function names.
3152 { MaybeObject* maybe_obj =
3153 NameDictionary::Allocate(this, Runtime::kNumFunctions);
3154 if (!maybe_obj->ToObject(&obj)) return false;
3156 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3158 if (!maybe_obj->ToObject(&obj)) return false;
3160 set_intrinsic_function_names(NameDictionary::cast(obj));
3162 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3163 if (!maybe_obj->ToObject(&obj)) return false;
3165 set_number_string_cache(FixedArray::cast(obj));
3167 // Allocate cache for single character one byte strings.
3168 { MaybeObject* maybe_obj =
3169 AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3170 if (!maybe_obj->ToObject(&obj)) return false;
3172 set_single_character_string_cache(FixedArray::cast(obj));
3174 // Allocate cache for string split.
3175 { MaybeObject* maybe_obj = AllocateFixedArray(
3176 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3177 if (!maybe_obj->ToObject(&obj)) return false;
3179 set_string_split_cache(FixedArray::cast(obj));
3181 { MaybeObject* maybe_obj = AllocateFixedArray(
3182 RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3183 if (!maybe_obj->ToObject(&obj)) return false;
3185 set_regexp_multiple_cache(FixedArray::cast(obj));
3187 // Allocate cache for external strings pointing to native source code.
3188 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3189 if (!maybe_obj->ToObject(&obj)) return false;
3191 set_natives_source_cache(FixedArray::cast(obj));
3193 // Allocate object to hold object observation state.
3194 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3195 if (!maybe_obj->ToObject(&obj)) return false;
3197 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3198 if (!maybe_obj->ToObject(&obj)) return false;
3200 set_observation_state(JSObject::cast(obj));
3202 { MaybeObject* maybe_obj = AllocateSymbol();
3203 if (!maybe_obj->ToObject(&obj)) return false;
3205 set_frozen_symbol(Symbol::cast(obj));
3207 { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3208 if (!maybe_obj->ToObject(&obj)) return false;
3210 SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3211 set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3213 { MaybeObject* maybe_obj = AllocateSymbol();
3214 if (!maybe_obj->ToObject(&obj)) return false;
3216 set_observed_symbol(Symbol::cast(obj));
3218 // Handling of script id generation is in Factory::NewScript.
3219 set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3221 // Initialize keyed lookup cache.
3222 isolate_->keyed_lookup_cache()->Clear();
3224 // Initialize context slot cache.
3225 isolate_->context_slot_cache()->Clear();
3227 // Initialize descriptor cache.
3228 isolate_->descriptor_lookup_cache()->Clear();
3230 // Initialize compilation cache.
3231 isolate_->compilation_cache()->Clear();
3237 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3238 RootListIndex writable_roots[] = {
3239 kStoreBufferTopRootIndex,
3240 kStackLimitRootIndex,
3241 kNumberStringCacheRootIndex,
3242 kInstanceofCacheFunctionRootIndex,
3243 kInstanceofCacheMapRootIndex,
3244 kInstanceofCacheAnswerRootIndex,
3245 kCodeStubsRootIndex,
3246 kNonMonomorphicCacheRootIndex,
3247 kPolymorphicCodeCacheRootIndex,
3248 kLastScriptIdRootIndex,
3249 kEmptyScriptRootIndex,
3250 kRealStackLimitRootIndex,
3251 kArgumentsAdaptorDeoptPCOffsetRootIndex,
3252 kConstructStubDeoptPCOffsetRootIndex,
3253 kGetterStubDeoptPCOffsetRootIndex,
3254 kSetterStubDeoptPCOffsetRootIndex,
3255 kStringTableRootIndex,
3258 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3259 if (root_index == writable_roots[i])
3266 Object* RegExpResultsCache::Lookup(Heap* heap,
3268 Object* key_pattern,
3269 ResultsCacheType type) {
3271 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3272 if (type == STRING_SPLIT_SUBSTRINGS) {
3273 ASSERT(key_pattern->IsString());
3274 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3275 cache = heap->string_split_cache();
3277 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3278 ASSERT(key_pattern->IsFixedArray());
3279 cache = heap->regexp_multiple_cache();
3282 uint32_t hash = key_string->Hash();
3283 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3284 ~(kArrayEntriesPerCacheEntry - 1));
3285 if (cache->get(index + kStringOffset) == key_string &&
3286 cache->get(index + kPatternOffset) == key_pattern) {
3287 return cache->get(index + kArrayOffset);
3290 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3291 if (cache->get(index + kStringOffset) == key_string &&
3292 cache->get(index + kPatternOffset) == key_pattern) {
3293 return cache->get(index + kArrayOffset);
3295 return Smi::FromInt(0);
3299 void RegExpResultsCache::Enter(Heap* heap,
3301 Object* key_pattern,
3302 FixedArray* value_array,
3303 ResultsCacheType type) {
3305 if (!key_string->IsInternalizedString()) return;
3306 if (type == STRING_SPLIT_SUBSTRINGS) {
3307 ASSERT(key_pattern->IsString());
3308 if (!key_pattern->IsInternalizedString()) return;
3309 cache = heap->string_split_cache();
3311 ASSERT(type == REGEXP_MULTIPLE_INDICES);
3312 ASSERT(key_pattern->IsFixedArray());
3313 cache = heap->regexp_multiple_cache();
3316 uint32_t hash = key_string->Hash();
3317 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3318 ~(kArrayEntriesPerCacheEntry - 1));
3319 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3320 cache->set(index + kStringOffset, key_string);
3321 cache->set(index + kPatternOffset, key_pattern);
3322 cache->set(index + kArrayOffset, value_array);
3325 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3326 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3327 cache->set(index2 + kStringOffset, key_string);
3328 cache->set(index2 + kPatternOffset, key_pattern);
3329 cache->set(index2 + kArrayOffset, value_array);
3331 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3332 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3333 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3334 cache->set(index + kStringOffset, key_string);
3335 cache->set(index + kPatternOffset, key_pattern);
3336 cache->set(index + kArrayOffset, value_array);
3339 // If the array is a reasonably short list of substrings, convert it into a
3340 // list of internalized strings.
3341 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3342 for (int i = 0; i < value_array->length(); i++) {
3343 String* str = String::cast(value_array->get(i));
3344 Object* internalized_str;
3345 MaybeObject* maybe_string = heap->InternalizeString(str);
3346 if (maybe_string->ToObject(&internalized_str)) {
3347 value_array->set(i, internalized_str);
3351 // Convert backing store to a copy-on-write array.
3352 value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3356 void RegExpResultsCache::Clear(FixedArray* cache) {
3357 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3358 cache->set(i, Smi::FromInt(0));
3363 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3364 MaybeObject* maybe_obj =
3365 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3370 int Heap::FullSizeNumberStringCacheLength() {
3371 // Compute the size of the number string cache based on the max newspace size.
3372 // The number string cache has a minimum size based on twice the initial cache
3373 // size to ensure that it is bigger after being made 'full size'.
3374 int number_string_cache_size = max_semispace_size_ / 512;
3375 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3376 Min(0x4000, number_string_cache_size));
3377 // There is a string and a number per entry so the length is twice the number
3379 return number_string_cache_size * 2;
3383 void Heap::AllocateFullSizeNumberStringCache() {
3384 // The idea is to have a small number string cache in the snapshot to keep
3385 // boot-time memory usage down. If we expand the number string cache already
3386 // while creating the snapshot then that didn't work out.
3387 ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3388 MaybeObject* maybe_obj =
3389 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3391 if (maybe_obj->ToObject(&new_cache)) {
3392 // We don't bother to repopulate the cache with entries from the old cache.
3393 // It will be repopulated soon enough with new strings.
3394 set_number_string_cache(FixedArray::cast(new_cache));
3396 // If allocation fails then we just return without doing anything. It is only
3397 // a cache, so best effort is OK here.
3401 void Heap::FlushNumberStringCache() {
3402 // Flush the number to string cache.
3403 int len = number_string_cache()->length();
3404 for (int i = 0; i < len; i++) {
3405 number_string_cache()->set_undefined(this, i);
3410 static inline int double_get_hash(double d) {
3411 DoubleRepresentation rep(d);
3412 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3416 static inline int smi_get_hash(Smi* smi) {
3417 return smi->value();
3421 Object* Heap::GetNumberStringCache(Object* number) {
3423 int mask = (number_string_cache()->length() >> 1) - 1;
3424 if (number->IsSmi()) {
3425 hash = smi_get_hash(Smi::cast(number)) & mask;
3427 hash = double_get_hash(number->Number()) & mask;
3429 Object* key = number_string_cache()->get(hash * 2);
3430 if (key == number) {
3431 return String::cast(number_string_cache()->get(hash * 2 + 1));
3432 } else if (key->IsHeapNumber() &&
3433 number->IsHeapNumber() &&
3434 key->Number() == number->Number()) {
3435 return String::cast(number_string_cache()->get(hash * 2 + 1));
3437 return undefined_value();
3441 void Heap::SetNumberStringCache(Object* number, String* string) {
3443 int mask = (number_string_cache()->length() >> 1) - 1;
3444 if (number->IsSmi()) {
3445 hash = smi_get_hash(Smi::cast(number)) & mask;
3447 hash = double_get_hash(number->Number()) & mask;
3449 if (number_string_cache()->get(hash * 2) != undefined_value() &&
3450 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3451 // The first time we have a hash collision, we move to the full sized
3452 // number string cache.
3453 AllocateFullSizeNumberStringCache();
3456 number_string_cache()->set(hash * 2, number);
3457 number_string_cache()->set(hash * 2 + 1, string);
3461 MaybeObject* Heap::NumberToString(Object* number,
3462 bool check_number_string_cache,
3463 PretenureFlag pretenure) {
3464 isolate_->counters()->number_to_string_runtime()->Increment();
3465 if (check_number_string_cache) {
3466 Object* cached = GetNumberStringCache(number);
3467 if (cached != undefined_value()) {
3473 Vector<char> buffer(arr, ARRAY_SIZE(arr));
3475 if (number->IsSmi()) {
3476 int num = Smi::cast(number)->value();
3477 str = IntToCString(num, buffer);
3479 double num = HeapNumber::cast(number)->value();
3480 str = DoubleToCString(num, buffer);
3484 MaybeObject* maybe_js_string =
3485 AllocateStringFromOneByte(CStrVector(str), pretenure);
3486 if (maybe_js_string->ToObject(&js_string)) {
3487 SetNumberStringCache(number, String::cast(js_string));
3489 return maybe_js_string;
3493 MaybeObject* Heap::Uint32ToString(uint32_t value,
3494 bool check_number_string_cache) {
3496 MaybeObject* maybe = NumberFromUint32(value);
3497 if (!maybe->To<Object>(&number)) return maybe;
3498 return NumberToString(number, check_number_string_cache);
3502 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3503 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3507 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3508 ExternalArrayType array_type) {
3509 switch (array_type) {
3510 case kExternalByteArray:
3511 return kExternalByteArrayMapRootIndex;
3512 case kExternalUnsignedByteArray:
3513 return kExternalUnsignedByteArrayMapRootIndex;
3514 case kExternalShortArray:
3515 return kExternalShortArrayMapRootIndex;
3516 case kExternalUnsignedShortArray:
3517 return kExternalUnsignedShortArrayMapRootIndex;
3518 case kExternalIntArray:
3519 return kExternalIntArrayMapRootIndex;
3520 case kExternalUnsignedIntArray:
3521 return kExternalUnsignedIntArrayMapRootIndex;
3522 case kExternalFloatArray:
3523 return kExternalFloatArrayMapRootIndex;
3524 case kExternalDoubleArray:
3525 return kExternalDoubleArrayMapRootIndex;
3526 case kExternalPixelArray:
3527 return kExternalPixelArrayMapRootIndex;
3530 return kUndefinedValueRootIndex;
3534 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3535 ElementsKind elementsKind) {
3536 switch (elementsKind) {
3537 case EXTERNAL_BYTE_ELEMENTS:
3538 return kEmptyExternalByteArrayRootIndex;
3539 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3540 return kEmptyExternalUnsignedByteArrayRootIndex;
3541 case EXTERNAL_SHORT_ELEMENTS:
3542 return kEmptyExternalShortArrayRootIndex;
3543 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3544 return kEmptyExternalUnsignedShortArrayRootIndex;
3545 case EXTERNAL_INT_ELEMENTS:
3546 return kEmptyExternalIntArrayRootIndex;
3547 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3548 return kEmptyExternalUnsignedIntArrayRootIndex;
3549 case EXTERNAL_FLOAT_ELEMENTS:
3550 return kEmptyExternalFloatArrayRootIndex;
3551 case EXTERNAL_DOUBLE_ELEMENTS:
3552 return kEmptyExternalDoubleArrayRootIndex;
3553 case EXTERNAL_PIXEL_ELEMENTS:
3554 return kEmptyExternalPixelArrayRootIndex;
3557 return kUndefinedValueRootIndex;
3562 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3563 return ExternalArray::cast(
3564 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3570 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3571 // We need to distinguish the minus zero value and this cannot be
3572 // done after conversion to int. Doing this by comparing bit
3573 // patterns is faster than using fpclassify() et al.
3574 static const DoubleRepresentation minus_zero(-0.0);
3576 DoubleRepresentation rep(value);
3577 if (rep.bits == minus_zero.bits) {
3578 return AllocateHeapNumber(-0.0, pretenure);
3581 int int_value = FastD2I(value);
3582 if (value == int_value && Smi::IsValid(int_value)) {
3583 return Smi::FromInt(int_value);
3586 // Materialize the value in the heap.
3587 return AllocateHeapNumber(value, pretenure);
3591 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3592 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3593 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3594 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3596 MaybeObject* maybe_result = Allocate(foreign_map(), space);
3597 if (!maybe_result->To(&result)) return maybe_result;
3598 result->set_foreign_address(address);
3603 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3604 SharedFunctionInfo* share;
3605 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3606 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3608 // Set pointer fields.
3609 share->set_name(name);
3610 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3611 share->set_code(illegal);
3612 share->set_optimized_code_map(Smi::FromInt(0));
3613 share->set_scope_info(ScopeInfo::Empty(isolate_));
3614 Code* construct_stub =
3615 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3616 share->set_construct_stub(construct_stub);
3617 share->set_instance_class_name(Object_string());
3618 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3619 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3620 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3621 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3622 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3623 share->set_ast_node_count(0);
3624 share->set_counters(0);
3626 // Set integer fields (smi or int, depending on the architecture).
3627 share->set_length(0);
3628 share->set_formal_parameter_count(0);
3629 share->set_expected_nof_properties(0);
3630 share->set_num_literals(0);
3631 share->set_start_position_and_type(0);
3632 share->set_end_position(0);
3633 share->set_function_token_position(0);
3634 // All compiler hints default to false or 0.
3635 share->set_compiler_hints(0);
3636 share->set_opt_count(0);
3642 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3647 Object* stack_trace,
3648 Object* stack_frames) {
3650 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3651 if (!maybe_result->ToObject(&result)) return maybe_result;
3653 JSMessageObject* message = JSMessageObject::cast(result);
3654 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3655 message->initialize_elements();
3656 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3657 message->set_type(type);
3658 message->set_arguments(arguments);
3659 message->set_start_position(start_position);
3660 message->set_end_position(end_position);
3661 message->set_script(script);
3662 message->set_stack_trace(stack_trace);
3663 message->set_stack_frames(stack_frames);
3669 // Returns true for a character in a range. Both limits are inclusive.
3670 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3671 // This makes uses of the the unsigned wraparound.
3672 return character - from <= to - from;
3676 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3681 // Numeric strings have a different hash algorithm not known by
3682 // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3683 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3684 heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3686 // Now we know the length is 2, we might as well make use of that fact
3687 // when building the new string.
3688 } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3690 ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
3692 { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3693 if (!maybe_result->ToObject(&result)) return maybe_result;
3695 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3696 dest[0] = static_cast<uint8_t>(c1);
3697 dest[1] = static_cast<uint8_t>(c2);
3701 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3702 if (!maybe_result->ToObject(&result)) return maybe_result;
3704 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3712 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3713 int first_length = first->length();
3714 if (first_length == 0) {
3718 int second_length = second->length();
3719 if (second_length == 0) {
3723 int length = first_length + second_length;
3725 // Optimization for 2-byte strings often used as keys in a decompression
3726 // dictionary. Check whether we already have the string in the string
3727 // table to prevent creation of many unneccesary strings.
3729 uint16_t c1 = first->Get(0);
3730 uint16_t c2 = second->Get(0);
3731 return MakeOrFindTwoCharacterString(this, c1, c2);
3734 bool first_is_one_byte = first->IsOneByteRepresentation();
3735 bool second_is_one_byte = second->IsOneByteRepresentation();
3736 bool is_one_byte = first_is_one_byte && second_is_one_byte;
3737 // Make sure that an out of memory exception is thrown if the length
3738 // of the new cons string is too large.
3739 if (length > String::kMaxLength || length < 0) {
3740 isolate()->context()->mark_out_of_memory();
3741 return Failure::OutOfMemoryException(0x4);
3744 bool is_one_byte_data_in_two_byte_string = false;
3746 // At least one of the strings uses two-byte representation so we
3747 // can't use the fast case code for short ASCII strings below, but
3748 // we can try to save memory if all chars actually fit in ASCII.
3749 is_one_byte_data_in_two_byte_string =
3750 first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3751 if (is_one_byte_data_in_two_byte_string) {
3752 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3756 // If the resulting string is small make a flat string.
3757 if (length < ConsString::kMinLength) {
3758 // Note that neither of the two inputs can be a slice because:
3759 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3760 ASSERT(first->IsFlat());
3761 ASSERT(second->IsFlat());
3764 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3765 if (!maybe_result->ToObject(&result)) return maybe_result;
3767 // Copy the characters into the new object.
3768 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3771 if (first->IsExternalString()) {
3772 src = ExternalAsciiString::cast(first)->GetChars();
3774 src = SeqOneByteString::cast(first)->GetChars();
3776 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3777 // Copy second part.
3778 if (second->IsExternalString()) {
3779 src = ExternalAsciiString::cast(second)->GetChars();
3781 src = SeqOneByteString::cast(second)->GetChars();
3783 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3786 if (is_one_byte_data_in_two_byte_string) {
3788 { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3789 if (!maybe_result->ToObject(&result)) return maybe_result;
3791 // Copy the characters into the new object.
3792 uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3793 String::WriteToFlat(first, dest, 0, first_length);
3794 String::WriteToFlat(second, dest + first_length, 0, second_length);
3795 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3800 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3801 if (!maybe_result->ToObject(&result)) return maybe_result;
3803 // Copy the characters into the new object.
3804 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3805 String::WriteToFlat(first, dest, 0, first_length);
3806 String::WriteToFlat(second, dest + first_length, 0, second_length);
3811 Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3812 cons_ascii_string_map() : cons_string_map();
3815 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3816 if (!maybe_result->ToObject(&result)) return maybe_result;
3819 DisallowHeapAllocation no_gc;
3820 ConsString* cons_string = ConsString::cast(result);
3821 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3822 cons_string->set_length(length);
3823 cons_string->set_hash_field(String::kEmptyHashField);
3824 cons_string->set_first(first, mode);
3825 cons_string->set_second(second, mode);
3830 MaybeObject* Heap::AllocateSubString(String* buffer,
3833 PretenureFlag pretenure) {
3834 int length = end - start;
3836 return empty_string();
3837 } else if (length == 1) {
3838 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3839 } else if (length == 2) {
3840 // Optimization for 2-byte strings often used as keys in a decompression
3841 // dictionary. Check whether we already have the string in the string
3842 // table to prevent creation of many unnecessary strings.
3843 uint16_t c1 = buffer->Get(start);
3844 uint16_t c2 = buffer->Get(start + 1);
3845 return MakeOrFindTwoCharacterString(this, c1, c2);
3848 // Make an attempt to flatten the buffer to reduce access time.
3849 buffer = buffer->TryFlattenGetString();
3851 if (!FLAG_string_slices ||
3852 !buffer->IsFlat() ||
3853 length < SlicedString::kMinLength ||
3854 pretenure == TENURED) {
3856 // WriteToFlat takes care of the case when an indirect string has a
3857 // different encoding from its underlying string. These encodings may
3858 // differ because of externalization.
3859 bool is_one_byte = buffer->IsOneByteRepresentation();
3860 { MaybeObject* maybe_result = is_one_byte
3861 ? AllocateRawOneByteString(length, pretenure)
3862 : AllocateRawTwoByteString(length, pretenure);
3863 if (!maybe_result->ToObject(&result)) return maybe_result;
3865 String* string_result = String::cast(result);
3866 // Copy the characters into the new object.
3868 ASSERT(string_result->IsOneByteRepresentation());
3869 uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3870 String::WriteToFlat(buffer, dest, start, end);
3872 ASSERT(string_result->IsTwoByteRepresentation());
3873 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3874 String::WriteToFlat(buffer, dest, start, end);
3879 ASSERT(buffer->IsFlat());
3881 if (FLAG_verify_heap) {
3882 buffer->StringVerify();
3887 // When slicing an indirect string we use its encoding for a newly created
3888 // slice and don't check the encoding of the underlying string. This is safe
3889 // even if the encodings are different because of externalization. If an
3890 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3891 // codes of the underlying string must still fit into ASCII (because
3892 // externalization must not change char codes).
3893 { Map* map = buffer->IsOneByteRepresentation()
3894 ? sliced_ascii_string_map()
3895 : sliced_string_map();
3896 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3897 if (!maybe_result->ToObject(&result)) return maybe_result;
3900 DisallowHeapAllocation no_gc;
3901 SlicedString* sliced_string = SlicedString::cast(result);
3902 sliced_string->set_length(length);
3903 sliced_string->set_hash_field(String::kEmptyHashField);
3904 if (buffer->IsConsString()) {
3905 ConsString* cons = ConsString::cast(buffer);
3906 ASSERT(cons->second()->length() == 0);
3907 sliced_string->set_parent(cons->first());
3908 sliced_string->set_offset(start);
3909 } else if (buffer->IsSlicedString()) {
3910 // Prevent nesting sliced strings.
3911 SlicedString* parent_slice = SlicedString::cast(buffer);
3912 sliced_string->set_parent(parent_slice->parent());
3913 sliced_string->set_offset(start + parent_slice->offset());
3915 sliced_string->set_parent(buffer);
3916 sliced_string->set_offset(start);
3918 ASSERT(sliced_string->parent()->IsSeqString() ||
3919 sliced_string->parent()->IsExternalString());
3924 MaybeObject* Heap::AllocateExternalStringFromAscii(
3925 const ExternalAsciiString::Resource* resource) {
3926 size_t length = resource->length();
3927 if (length > static_cast<size_t>(String::kMaxLength)) {
3928 isolate()->context()->mark_out_of_memory();
3929 return Failure::OutOfMemoryException(0x5);
3932 Map* map = external_ascii_string_map();
3934 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3935 if (!maybe_result->ToObject(&result)) return maybe_result;
3938 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3939 external_string->set_length(static_cast<int>(length));
3940 external_string->set_hash_field(String::kEmptyHashField);
3941 external_string->set_resource(resource);
3947 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3948 const ExternalTwoByteString::Resource* resource) {
3949 size_t length = resource->length();
3950 if (length > static_cast<size_t>(String::kMaxLength)) {
3951 isolate()->context()->mark_out_of_memory();
3952 return Failure::OutOfMemoryException(0x6);
3955 // For small strings we check whether the resource contains only
3956 // one byte characters. If yes, we use a different string map.
3957 static const size_t kOneByteCheckLengthLimit = 32;
3958 bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3959 String::IsOneByte(resource->data(), static_cast<int>(length));
3960 Map* map = is_one_byte ?
3961 external_string_with_one_byte_data_map() : external_string_map();
3963 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3964 if (!maybe_result->ToObject(&result)) return maybe_result;
3967 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3968 external_string->set_length(static_cast<int>(length));
3969 external_string->set_hash_field(String::kEmptyHashField);
3970 external_string->set_resource(resource);
3976 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3977 if (code <= String::kMaxOneByteCharCode) {
3978 Object* value = single_character_string_cache()->get(code);
3979 if (value != undefined_value()) return value;
3982 buffer[0] = static_cast<uint8_t>(code);
3984 MaybeObject* maybe_result =
3985 InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3987 if (!maybe_result->ToObject(&result)) return maybe_result;
3988 single_character_string_cache()->set(code, result);
3993 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3994 if (!maybe_result->ToObject(&result)) return maybe_result;
3996 String* answer = String::cast(result);
3997 answer->Set(0, code);
4002 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4003 if (length < 0 || length > ByteArray::kMaxLength) {
4004 return Failure::OutOfMemoryException(0x7);
4006 if (pretenure == NOT_TENURED) {
4007 return AllocateByteArray(length);
4009 int size = ByteArray::SizeFor(length);
4011 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4012 ? old_data_space_->AllocateRaw(size)
4013 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4014 if (!maybe_result->ToObject(&result)) return maybe_result;
4017 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4019 reinterpret_cast<ByteArray*>(result)->set_length(length);
4024 MaybeObject* Heap::AllocateByteArray(int length) {
4025 if (length < 0 || length > ByteArray::kMaxLength) {
4026 return Failure::OutOfMemoryException(0x8);
4028 int size = ByteArray::SizeFor(length);
4029 AllocationSpace space =
4030 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4032 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4033 if (!maybe_result->ToObject(&result)) return maybe_result;
4036 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4038 reinterpret_cast<ByteArray*>(result)->set_length(length);
4043 void Heap::CreateFillerObjectAt(Address addr, int size) {
4044 if (size == 0) return;
4045 HeapObject* filler = HeapObject::FromAddress(addr);
4046 if (size == kPointerSize) {
4047 filler->set_map_no_write_barrier(one_pointer_filler_map());
4048 } else if (size == 2 * kPointerSize) {
4049 filler->set_map_no_write_barrier(two_pointer_filler_map());
4051 filler->set_map_no_write_barrier(free_space_map());
4052 FreeSpace::cast(filler)->set_size(size);
4057 MaybeObject* Heap::AllocateExternalArray(int length,
4058 ExternalArrayType array_type,
4059 void* external_pointer,
4060 PretenureFlag pretenure) {
4061 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4063 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4066 if (!maybe_result->ToObject(&result)) return maybe_result;
4069 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4070 MapForExternalArrayType(array_type));
4071 reinterpret_cast<ExternalArray*>(result)->set_length(length);
4072 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4079 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4081 Handle<Object> self_reference,
4083 bool crankshafted) {
4084 // Allocate ByteArray before the Code object, so that we do not risk
4085 // leaving uninitialized Code object (and breaking the heap).
4086 ByteArray* reloc_info;
4087 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4088 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4091 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4092 int obj_size = Code::SizeFor(body_size);
4093 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4094 MaybeObject* maybe_result;
4095 // Large code objects and code objects which should stay at a fixed address
4096 // are allocated in large object space.
4098 bool force_lo_space = obj_size > code_space()->AreaSize();
4099 if (force_lo_space) {
4100 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4102 maybe_result = code_space_->AllocateRaw(obj_size);
4104 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4106 if (immovable && !force_lo_space &&
4107 // Objects on the first page of each space are never moved.
4108 !code_space_->FirstPage()->Contains(result->address())) {
4109 // Discard the first code allocation, which was on a page where it could be
4111 CreateFillerObjectAt(result->address(), obj_size);
4112 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4113 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4116 // Initialize the object
4117 result->set_map_no_write_barrier(code_map());
4118 Code* code = Code::cast(result);
4119 ASSERT(!isolate_->code_range()->exists() ||
4120 isolate_->code_range()->contains(code->address()));
4121 code->set_instruction_size(desc.instr_size);
4122 code->set_relocation_info(reloc_info);
4123 code->set_flags(flags);
4124 if (code->is_call_stub() || code->is_keyed_call_stub()) {
4125 code->set_check_type(RECEIVER_MAP_CHECK);
4127 code->set_is_crankshafted(crankshafted);
4128 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4129 code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4130 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4131 code->set_gc_metadata(Smi::FromInt(0));
4132 code->set_ic_age(global_ic_age_);
4133 code->set_prologue_offset(kPrologueOffsetNotSet);
4134 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4135 code->set_marked_for_deoptimization(false);
4137 // Allow self references to created code object by patching the handle to
4138 // point to the newly allocated Code object.
4139 if (!self_reference.is_null()) {
4140 *(self_reference.location()) = code;
4142 // Migrate generated code.
4143 // The generated code can contain Object** values (typically from handles)
4144 // that are dereferenced during the copy to point directly to the actual heap
4145 // objects. These pointers can include references to the code object itself,
4146 // through the self_reference parameter.
4147 code->CopyFrom(desc);
4150 if (FLAG_verify_heap) {
4158 MaybeObject* Heap::CopyCode(Code* code) {
4159 // Allocate an object the same size as the code object.
4160 int obj_size = code->Size();
4161 MaybeObject* maybe_result;
4162 if (obj_size > code_space()->AreaSize()) {
4163 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4165 maybe_result = code_space_->AllocateRaw(obj_size);
4169 if (!maybe_result->ToObject(&result)) return maybe_result;
4171 // Copy code object.
4172 Address old_addr = code->address();
4173 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4174 CopyBlock(new_addr, old_addr, obj_size);
4175 // Relocate the copy.
4176 Code* new_code = Code::cast(result);
4177 ASSERT(!isolate_->code_range()->exists() ||
4178 isolate_->code_range()->contains(code->address()));
4179 new_code->Relocate(new_addr - old_addr);
4184 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4185 // Allocate ByteArray before the Code object, so that we do not risk
4186 // leaving uninitialized Code object (and breaking the heap).
4187 Object* reloc_info_array;
4188 { MaybeObject* maybe_reloc_info_array =
4189 AllocateByteArray(reloc_info.length(), TENURED);
4190 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4191 return maybe_reloc_info_array;
4195 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4197 int new_obj_size = Code::SizeFor(new_body_size);
4199 Address old_addr = code->address();
4201 size_t relocation_offset =
4202 static_cast<size_t>(code->instruction_end() - old_addr);
4204 MaybeObject* maybe_result;
4205 if (new_obj_size > code_space()->AreaSize()) {
4206 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4208 maybe_result = code_space_->AllocateRaw(new_obj_size);
4212 if (!maybe_result->ToObject(&result)) return maybe_result;
4214 // Copy code object.
4215 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4217 // Copy header and instructions.
4218 CopyBytes(new_addr, old_addr, relocation_offset);
4220 Code* new_code = Code::cast(result);
4221 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4223 // Copy patched rinfo.
4224 CopyBytes(new_code->relocation_start(),
4226 static_cast<size_t>(reloc_info.length()));
4228 // Relocate the copy.
4229 ASSERT(!isolate_->code_range()->exists() ||
4230 isolate_->code_range()->contains(code->address()));
4231 new_code->Relocate(new_addr - old_addr);
4234 if (FLAG_verify_heap) {
4242 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4243 Handle<AllocationSite> allocation_site) {
4244 ASSERT(gc_state_ == NOT_IN_GC);
4245 ASSERT(map->instance_type() != MAP_TYPE);
4246 // If allocation failures are disallowed, we may allocate in a different
4247 // space when new space is full and the object is not a large object.
4248 AllocationSpace retry_space =
4249 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4250 int size = map->instance_size() + AllocationMemento::kSize;
4252 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4253 if (!maybe_result->ToObject(&result)) return maybe_result;
4254 // No need for write barrier since object is white and map is in old space.
4255 HeapObject::cast(result)->set_map_no_write_barrier(map);
4256 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4257 reinterpret_cast<Address>(result) + map->instance_size());
4258 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4259 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4264 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4265 ASSERT(gc_state_ == NOT_IN_GC);
4266 ASSERT(map->instance_type() != MAP_TYPE);
4267 // If allocation failures are disallowed, we may allocate in a different
4268 // space when new space is full and the object is not a large object.
4269 AllocationSpace retry_space =
4270 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4271 int size = map->instance_size();
4273 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4274 if (!maybe_result->ToObject(&result)) return maybe_result;
4275 // No need for write barrier since object is white and map is in old space.
4276 HeapObject::cast(result)->set_map_no_write_barrier(map);
4281 void Heap::InitializeFunction(JSFunction* function,
4282 SharedFunctionInfo* shared,
4283 Object* prototype) {
4284 ASSERT(!prototype->IsMap());
4285 function->initialize_properties();
4286 function->initialize_elements();
4287 function->set_shared(shared);
4288 function->set_code(shared->code());
4289 function->set_prototype_or_initial_map(prototype);
4290 function->set_context(undefined_value());
4291 function->set_literals_or_bindings(empty_fixed_array());
4292 function->set_next_function_link(undefined_value());
4296 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4297 // Make sure to use globals from the function's context, since the function
4298 // can be from a different context.
4299 Context* native_context = function->context()->native_context();
4301 if (function->shared()->is_generator()) {
4302 // Generator prototypes can share maps since they don't have "constructor"
4304 new_map = native_context->generator_object_prototype_map();
4306 // Each function prototype gets a fresh map to avoid unwanted sharing of
4307 // maps between prototypes of different constructors.
4308 JSFunction* object_function = native_context->object_function();
4309 ASSERT(object_function->has_initial_map());
4310 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4311 if (!maybe_map->To(&new_map)) return maybe_map;
4315 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4316 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4318 if (!function->shared()->is_generator()) {
4319 MaybeObject* maybe_failure =
4320 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4321 constructor_string(), function, DONT_ENUM);
4322 if (maybe_failure->IsFailure()) return maybe_failure;
4329 MaybeObject* Heap::AllocateFunction(Map* function_map,
4330 SharedFunctionInfo* shared,
4332 PretenureFlag pretenure) {
4333 AllocationSpace space =
4334 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4336 { MaybeObject* maybe_result = Allocate(function_map, space);
4337 if (!maybe_result->ToObject(&result)) return maybe_result;
4339 InitializeFunction(JSFunction::cast(result), shared, prototype);
4344 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4345 // To get fast allocation and map sharing for arguments objects we
4346 // allocate them based on an arguments boilerplate.
4348 JSObject* boilerplate;
4349 int arguments_object_size;
4350 bool strict_mode_callee = callee->IsJSFunction() &&
4351 !JSFunction::cast(callee)->shared()->is_classic_mode();
4352 if (strict_mode_callee) {
4354 isolate()->context()->native_context()->
4355 strict_mode_arguments_boilerplate();
4356 arguments_object_size = kArgumentsObjectSizeStrict;
4359 isolate()->context()->native_context()->arguments_boilerplate();
4360 arguments_object_size = kArgumentsObjectSize;
4363 // This calls Copy directly rather than using Heap::AllocateRaw so we
4364 // duplicate the check here.
4365 ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4367 // Check that the size of the boilerplate matches our
4368 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4369 // on the size being a known constant.
4370 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4372 // Do the allocation.
4374 { MaybeObject* maybe_result =
4375 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4376 if (!maybe_result->ToObject(&result)) return maybe_result;
4379 // Copy the content. The arguments boilerplate doesn't have any
4380 // fields that point to new space so it's safe to skip the write
4382 CopyBlock(HeapObject::cast(result)->address(),
4383 boilerplate->address(),
4384 JSObject::kHeaderSize);
4386 // Set the length property.
4387 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4388 Smi::FromInt(length),
4389 SKIP_WRITE_BARRIER);
4390 // Set the callee property for non-strict mode arguments object only.
4391 if (!strict_mode_callee) {
4392 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4396 // Check the state of the object
4397 ASSERT(JSObject::cast(result)->HasFastProperties());
4398 ASSERT(JSObject::cast(result)->HasFastObjectElements());
4404 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4405 ASSERT(!fun->has_initial_map());
4407 // First create a new map with the size and number of in-object properties
4408 // suggested by the function.
4409 InstanceType instance_type;
4411 int in_object_properties;
4412 if (fun->shared()->is_generator()) {
4413 instance_type = JS_GENERATOR_OBJECT_TYPE;
4414 instance_size = JSGeneratorObject::kSize;
4415 in_object_properties = 0;
4417 instance_type = JS_OBJECT_TYPE;
4418 instance_size = fun->shared()->CalculateInstanceSize();
4419 in_object_properties = fun->shared()->CalculateInObjectProperties();
4422 MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4423 if (!maybe_map->To(&map)) return maybe_map;
4425 // Fetch or allocate prototype.
4427 if (fun->has_instance_prototype()) {
4428 prototype = fun->instance_prototype();
4430 MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4431 if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4433 map->set_inobject_properties(in_object_properties);
4434 map->set_unused_property_fields(in_object_properties);
4435 map->set_prototype(prototype);
4436 ASSERT(map->has_fast_object_elements());
4438 if (!fun->shared()->is_generator()) {
4439 fun->shared()->StartInobjectSlackTracking(map);
4446 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4447 FixedArray* properties,
4449 obj->set_properties(properties);
4450 obj->initialize_elements();
4451 // TODO(1240798): Initialize the object's body using valid initial values
4452 // according to the object's initial map. For example, if the map's
4453 // instance type is JS_ARRAY_TYPE, the length field should be initialized
4454 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4455 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4456 // verification code has to cope with (temporarily) invalid objects. See
4457 // for example, JSArray::JSArrayVerify).
4459 // We cannot always fill with one_pointer_filler_map because objects
4460 // created from API functions expect their internal fields to be initialized
4461 // with undefined_value.
4462 // Pre-allocated fields need to be initialized with undefined_value as well
4463 // so that object accesses before the constructor completes (e.g. in the
4464 // debugger) will not cause a crash.
4465 if (map->constructor()->IsJSFunction() &&
4466 JSFunction::cast(map->constructor())->shared()->
4467 IsInobjectSlackTrackingInProgress()) {
4468 // We might want to shrink the object later.
4469 ASSERT(obj->GetInternalFieldCount() == 0);
4470 filler = Heap::one_pointer_filler_map();
4472 filler = Heap::undefined_value();
4474 obj->InitializeBody(map, Heap::undefined_value(), filler);
4478 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4479 // JSFunctions should be allocated using AllocateFunction to be
4480 // properly initialized.
4481 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4483 // Both types of global objects should be allocated using
4484 // AllocateGlobalObject to be properly initialized.
4485 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4486 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4488 // Allocate the backing storage for the properties.
4489 int prop_size = map->InitialPropertiesLength();
4490 ASSERT(prop_size >= 0);
4492 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4493 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4496 // Allocate the JSObject.
4497 AllocationSpace space =
4498 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4499 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4501 MaybeObject* maybe_obj = Allocate(map, space);
4502 if (!maybe_obj->To(&obj)) return maybe_obj;
4504 // Initialize the JSObject.
4505 InitializeJSObjectFromMap(JSObject::cast(obj),
4506 FixedArray::cast(properties),
4508 ASSERT(JSObject::cast(obj)->HasFastElements() ||
4509 JSObject::cast(obj)->HasExternalArrayElements());
4514 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4515 Handle<AllocationSite> allocation_site) {
4516 // JSFunctions should be allocated using AllocateFunction to be
4517 // properly initialized.
4518 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4520 // Both types of global objects should be allocated using
4521 // AllocateGlobalObject to be properly initialized.
4522 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4523 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4525 // Allocate the backing storage for the properties.
4526 int prop_size = map->InitialPropertiesLength();
4527 ASSERT(prop_size >= 0);
4529 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4530 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4533 // Allocate the JSObject.
4534 AllocationSpace space = NEW_SPACE;
4535 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4537 MaybeObject* maybe_obj =
4538 AllocateWithAllocationSite(map, space, allocation_site);
4539 if (!maybe_obj->To(&obj)) return maybe_obj;
4541 // Initialize the JSObject.
4542 InitializeJSObjectFromMap(JSObject::cast(obj),
4543 FixedArray::cast(properties),
4545 ASSERT(JSObject::cast(obj)->HasFastElements());
4550 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4551 PretenureFlag pretenure) {
4552 // Allocate the initial map if absent.
4553 if (!constructor->has_initial_map()) {
4554 Object* initial_map;
4555 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4556 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4558 constructor->set_initial_map(Map::cast(initial_map));
4559 Map::cast(initial_map)->set_constructor(constructor);
4561 // Allocate the object based on the constructors initial map.
4562 MaybeObject* result = AllocateJSObjectFromMap(
4563 constructor->initial_map(), pretenure);
4565 // Make sure result is NOT a global object if valid.
4566 Object* non_failure;
4567 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4573 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4574 Handle<AllocationSite> allocation_site) {
4575 // Allocate the initial map if absent.
4576 if (!constructor->has_initial_map()) {
4577 Object* initial_map;
4578 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4579 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4581 constructor->set_initial_map(Map::cast(initial_map));
4582 Map::cast(initial_map)->set_constructor(constructor);
4584 // Allocate the object based on the constructors initial map, or the payload
4586 Map* initial_map = constructor->initial_map();
4588 Smi* smi = Smi::cast(allocation_site->transition_info());
4589 ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4590 AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4591 if (to_kind != initial_map->elements_kind()) {
4592 MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4593 if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4594 // Possibly alter the mode, since we found an updated elements kind
4595 // in the type info cell.
4596 mode = AllocationSite::GetMode(to_kind);
4599 MaybeObject* result;
4600 if (mode == TRACK_ALLOCATION_SITE) {
4601 result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4604 result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4607 // Make sure result is NOT a global object if valid.
4608 Object* non_failure;
4609 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4615 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4616 ASSERT(function->shared()->is_generator());
4618 if (function->has_initial_map()) {
4619 map = function->initial_map();
4621 // Allocate the initial map if absent.
4622 MaybeObject* maybe_map = AllocateInitialMap(function);
4623 if (!maybe_map->To(&map)) return maybe_map;
4624 function->set_initial_map(map);
4625 map->set_constructor(function);
4627 ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4628 return AllocateJSObjectFromMap(map);
4632 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4633 // Allocate a fresh map. Modules do not have a prototype.
4635 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4636 if (!maybe_map->To(&map)) return maybe_map;
4637 // Allocate the object based on the map.
4639 MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4640 if (!maybe_module->To(&module)) return maybe_module;
4641 module->set_context(context);
4642 module->set_scope_info(scope_info);
4647 MaybeObject* Heap::AllocateJSArrayAndStorage(
4648 ElementsKind elements_kind,
4651 ArrayStorageAllocationMode mode,
4652 PretenureFlag pretenure) {
4653 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4655 if (!maybe_array->To(&array)) return maybe_array;
4657 // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4658 // for performance reasons.
4659 ASSERT(capacity >= length);
4661 if (capacity == 0) {
4662 array->set_length(Smi::FromInt(0));
4663 array->set_elements(empty_fixed_array());
4667 FixedArrayBase* elms;
4668 MaybeObject* maybe_elms = NULL;
4669 if (IsFastDoubleElementsKind(elements_kind)) {
4670 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4671 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4673 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4674 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4677 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4678 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4679 maybe_elms = AllocateUninitializedFixedArray(capacity);
4681 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4682 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4685 if (!maybe_elms->To(&elms)) return maybe_elms;
4687 array->set_elements(elms);
4688 array->set_length(Smi::FromInt(length));
4693 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4694 ElementsKind elements_kind,
4697 Handle<AllocationSite> allocation_site,
4698 ArrayStorageAllocationMode mode) {
4699 MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4702 if (!maybe_array->To(&array)) return maybe_array;
4703 return AllocateJSArrayStorage(array, length, capacity, mode);
4707 MaybeObject* Heap::AllocateJSArrayStorage(
4711 ArrayStorageAllocationMode mode) {
4712 ASSERT(capacity >= length);
4714 if (capacity == 0) {
4715 array->set_length(Smi::FromInt(0));
4716 array->set_elements(empty_fixed_array());
4720 FixedArrayBase* elms;
4721 MaybeObject* maybe_elms = NULL;
4722 ElementsKind elements_kind = array->GetElementsKind();
4723 if (IsFastDoubleElementsKind(elements_kind)) {
4724 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4725 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4727 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4728 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4731 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4732 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4733 maybe_elms = AllocateUninitializedFixedArray(capacity);
4735 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4736 maybe_elms = AllocateFixedArrayWithHoles(capacity);
4739 if (!maybe_elms->To(&elms)) return maybe_elms;
4741 array->set_elements(elms);
4742 array->set_length(Smi::FromInt(length));
4747 MaybeObject* Heap::AllocateJSArrayWithElements(
4748 FixedArrayBase* elements,
4749 ElementsKind elements_kind,
4751 PretenureFlag pretenure) {
4752 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4754 if (!maybe_array->To(&array)) return maybe_array;
4756 array->set_elements(elements);
4757 array->set_length(Smi::FromInt(length));
4758 array->ValidateElements();
4763 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4765 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4766 // maps. Will probably depend on the identity of the handler object, too.
4768 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4769 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4770 map->set_prototype(prototype);
4772 // Allocate the proxy object.
4774 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4775 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4776 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4777 result->set_handler(handler);
4778 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4783 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4785 Object* construct_trap,
4786 Object* prototype) {
4788 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4789 // maps. Will probably depend on the identity of the handler object, too.
4791 MaybeObject* maybe_map_obj =
4792 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4793 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4794 map->set_prototype(prototype);
4796 // Allocate the proxy object.
4797 JSFunctionProxy* result;
4798 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4799 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4800 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4801 result->set_handler(handler);
4802 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4803 result->set_call_trap(call_trap);
4804 result->set_construct_trap(construct_trap);
4809 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4810 ASSERT(constructor->has_initial_map());
4811 Map* map = constructor->initial_map();
4812 ASSERT(map->is_dictionary_map());
4814 // Make sure no field properties are described in the initial map.
4815 // This guarantees us that normalizing the properties does not
4816 // require us to change property values to PropertyCells.
4817 ASSERT(map->NextFreePropertyIndex() == 0);
4819 // Make sure we don't have a ton of pre-allocated slots in the
4820 // global objects. They will be unused once we normalize the object.
4821 ASSERT(map->unused_property_fields() == 0);
4822 ASSERT(map->inobject_properties() == 0);
4824 // Initial size of the backing store to avoid resize of the storage during
4825 // bootstrapping. The size differs between the JS global object ad the
4827 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4829 // Allocate a dictionary object for backing storage.
4830 NameDictionary* dictionary;
4831 MaybeObject* maybe_dictionary =
4832 NameDictionary::Allocate(
4834 map->NumberOfOwnDescriptors() * 2 + initial_size);
4835 if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4837 // The global object might be created from an object template with accessors.
4838 // Fill these accessors into the dictionary.
4839 DescriptorArray* descs = map->instance_descriptors();
4840 for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4841 PropertyDetails details = descs->GetDetails(i);
4842 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4843 PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4844 Object* value = descs->GetCallbacksObject(i);
4845 MaybeObject* maybe_value = AllocatePropertyCell(value);
4846 if (!maybe_value->ToObject(&value)) return maybe_value;
4848 MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4849 if (!maybe_added->To(&dictionary)) return maybe_added;
4852 // Allocate the global object and initialize it with the backing store.
4854 MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4855 if (!maybe_global->To(&global)) return maybe_global;
4857 InitializeJSObjectFromMap(global, dictionary, map);
4859 // Create a new map for the global object.
4861 MaybeObject* maybe_map = map->CopyDropDescriptors();
4862 if (!maybe_map->To(&new_map)) return maybe_map;
4863 new_map->set_dictionary_map(true);
4865 // Set up the global object as a normalized object.
4866 global->set_map(new_map);
4867 global->set_properties(dictionary);
4869 // Make sure result is a global object with properties in dictionary.
4870 ASSERT(global->IsGlobalObject());
4871 ASSERT(!global->HasFastProperties());
4876 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4877 // Never used to copy functions. If functions need to be copied we
4878 // have to be careful to clear the literals array.
4879 SLOW_ASSERT(!source->IsJSFunction());
4882 Map* map = source->map();
4883 int object_size = map->instance_size();
4886 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4888 // If we're forced to always allocate, we use the general allocation
4889 // functions which may leave us with an object in old space.
4890 if (always_allocate()) {
4891 { MaybeObject* maybe_clone =
4892 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4893 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4895 Address clone_address = HeapObject::cast(clone)->address();
4896 CopyBlock(clone_address,
4899 // Update write barrier for all fields that lie beyond the header.
4900 RecordWrites(clone_address,
4901 JSObject::kHeaderSize,
4902 (object_size - JSObject::kHeaderSize) / kPointerSize);
4904 wb_mode = SKIP_WRITE_BARRIER;
4906 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4907 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4909 SLOW_ASSERT(InNewSpace(clone));
4910 // Since we know the clone is allocated in new space, we can copy
4911 // the contents without worrying about updating the write barrier.
4912 CopyBlock(HeapObject::cast(clone)->address(),
4918 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4919 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4920 FixedArray* properties = FixedArray::cast(source->properties());
4921 // Update elements if necessary.
4922 if (elements->length() > 0) {
4924 { MaybeObject* maybe_elem;
4925 if (elements->map() == fixed_cow_array_map()) {
4926 maybe_elem = FixedArray::cast(elements);
4927 } else if (source->HasFastDoubleElements()) {
4928 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4930 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4932 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4934 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4936 // Update properties if necessary.
4937 if (properties->length() > 0) {
4939 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4940 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4942 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4944 // Return the new clone.
4949 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4951 AllocationSite* site) {
4952 // Never used to copy functions. If functions need to be copied we
4953 // have to be careful to clear the literals array.
4954 SLOW_ASSERT(!source->IsJSFunction());
4957 Map* map = source->map();
4958 int object_size = map->instance_size();
4961 ASSERT(map->CanTrackAllocationSite());
4962 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4963 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4965 // If we're forced to always allocate, we use the general allocation
4966 // functions which may leave us with an object in old space.
4967 int adjusted_object_size = object_size;
4968 if (always_allocate()) {
4969 // We'll only track origin if we are certain to allocate in new space
4970 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4971 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4972 adjusted_object_size += AllocationMemento::kSize;
4975 { MaybeObject* maybe_clone =
4976 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4977 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4979 Address clone_address = HeapObject::cast(clone)->address();
4980 CopyBlock(clone_address,
4983 // Update write barrier for all fields that lie beyond the header.
4984 int write_barrier_offset = adjusted_object_size > object_size
4985 ? JSArray::kSize + AllocationMemento::kSize
4986 : JSObject::kHeaderSize;
4987 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4988 RecordWrites(clone_address,
4989 write_barrier_offset,
4990 (object_size - write_barrier_offset) / kPointerSize);
4993 // Track allocation site information, if we failed to allocate it inline.
4994 if (InNewSpace(clone) &&
4995 adjusted_object_size == object_size) {
4996 MaybeObject* maybe_alloc_memento =
4997 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4998 AllocationMemento* alloc_memento;
4999 if (maybe_alloc_memento->To(&alloc_memento)) {
5000 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5001 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5005 wb_mode = SKIP_WRITE_BARRIER;
5006 adjusted_object_size += AllocationMemento::kSize;
5008 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5009 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5011 SLOW_ASSERT(InNewSpace(clone));
5012 // Since we know the clone is allocated in new space, we can copy
5013 // the contents without worrying about updating the write barrier.
5014 CopyBlock(HeapObject::cast(clone)->address(),
5019 if (adjusted_object_size > object_size) {
5020 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5021 reinterpret_cast<Address>(clone) + object_size);
5022 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5023 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5027 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5028 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5029 FixedArray* properties = FixedArray::cast(source->properties());
5030 // Update elements if necessary.
5031 if (elements->length() > 0) {
5033 { MaybeObject* maybe_elem;
5034 if (elements->map() == fixed_cow_array_map()) {
5035 maybe_elem = FixedArray::cast(elements);
5036 } else if (source->HasFastDoubleElements()) {
5037 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5039 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5041 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5043 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5045 // Update properties if necessary.
5046 if (properties->length() > 0) {
5048 { MaybeObject* maybe_prop = CopyFixedArray(properties);
5049 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5051 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5053 // Return the new clone.
5058 MaybeObject* Heap::ReinitializeJSReceiver(
5059 JSReceiver* object, InstanceType type, int size) {
5060 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5062 // Allocate fresh map.
5063 // TODO(rossberg): Once we optimize proxies, cache these maps.
5065 MaybeObject* maybe = AllocateMap(type, size);
5066 if (!maybe->To<Map>(&map)) return maybe;
5068 // Check that the receiver has at least the size of the fresh object.
5069 int size_difference = object->map()->instance_size() - map->instance_size();
5070 ASSERT(size_difference >= 0);
5072 map->set_prototype(object->map()->prototype());
5074 // Allocate the backing storage for the properties.
5075 int prop_size = map->unused_property_fields() - map->inobject_properties();
5077 maybe = AllocateFixedArray(prop_size, TENURED);
5078 if (!maybe->ToObject(&properties)) return maybe;
5080 // Functions require some allocation, which might fail here.
5081 SharedFunctionInfo* shared = NULL;
5082 if (type == JS_FUNCTION_TYPE) {
5085 InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5086 if (!maybe->To<String>(&name)) return maybe;
5087 maybe = AllocateSharedFunctionInfo(name);
5088 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5091 // Because of possible retries of this function after failure,
5092 // we must NOT fail after this point, where we have changed the type!
5094 // Reset the map for the object.
5095 object->set_map(map);
5096 JSObject* jsobj = JSObject::cast(object);
5098 // Reinitialize the object from the constructor map.
5099 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5101 // Functions require some minimal initialization.
5102 if (type == JS_FUNCTION_TYPE) {
5103 map->set_function_with_prototype(true);
5104 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5105 JSFunction::cast(object)->set_context(
5106 isolate()->context()->native_context());
5109 // Put in filler if the new object is smaller than the old.
5110 if (size_difference > 0) {
5111 CreateFillerObjectAt(
5112 object->address() + map->instance_size(), size_difference);
5119 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5120 JSGlobalProxy* object) {
5121 ASSERT(constructor->has_initial_map());
5122 Map* map = constructor->initial_map();
5124 // Check that the already allocated object has the same size and type as
5125 // objects allocated using the constructor.
5126 ASSERT(map->instance_size() == object->map()->instance_size());
5127 ASSERT(map->instance_type() == object->map()->instance_type());
5129 // Allocate the backing storage for the properties.
5130 int prop_size = map->unused_property_fields() - map->inobject_properties();
5132 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5133 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5136 // Reset the map for the object.
5137 object->set_map(constructor->initial_map());
5139 // Reinitialize the object from the constructor map.
5140 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5145 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5146 PretenureFlag pretenure) {
5147 int length = string.length();
5149 return Heap::LookupSingleCharacterStringFromCode(string[0]);
5152 { MaybeObject* maybe_result =
5153 AllocateRawOneByteString(string.length(), pretenure);
5154 if (!maybe_result->ToObject(&result)) return maybe_result;
5157 // Copy the characters into the new object.
5158 CopyChars(SeqOneByteString::cast(result)->GetChars(),
5165 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5166 int non_ascii_start,
5167 PretenureFlag pretenure) {
5168 // Continue counting the number of characters in the UTF-8 string, starting
5169 // from the first non-ascii character or word.
5170 Access<UnicodeCache::Utf8Decoder>
5171 decoder(isolate_->unicode_cache()->utf8_decoder());
5172 decoder->Reset(string.start() + non_ascii_start,
5173 string.length() - non_ascii_start);
5174 int utf16_length = decoder->Utf16Length();
5175 ASSERT(utf16_length > 0);
5179 int chars = non_ascii_start + utf16_length;
5180 MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5181 if (!maybe_result->ToObject(&result)) return maybe_result;
5183 // Convert and copy the characters into the new object.
5184 SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5185 // Copy ascii portion.
5186 uint16_t* data = twobyte->GetChars();
5187 if (non_ascii_start != 0) {
5188 const char* ascii_data = string.start();
5189 for (int i = 0; i < non_ascii_start; i++) {
5190 *data++ = *ascii_data++;
5193 // Now write the remainder.
5194 decoder->WriteUtf16(data, utf16_length);
5199 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5200 PretenureFlag pretenure) {
5201 // Check if the string is an ASCII string.
5203 int length = string.length();
5204 const uc16* start = string.start();
5206 if (String::IsOneByte(start, length)) {
5207 MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5208 if (!maybe_result->ToObject(&result)) return maybe_result;
5209 CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5210 } else { // It's not a one byte string.
5211 MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5212 if (!maybe_result->ToObject(&result)) return maybe_result;
5213 CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5219 Map* Heap::InternalizedStringMapForString(String* string) {
5220 // If the string is in new space it cannot be used as internalized.
5221 if (InNewSpace(string)) return NULL;
5223 // Find the corresponding internalized string map for strings.
5224 switch (string->map()->instance_type()) {
5225 case STRING_TYPE: return internalized_string_map();
5226 case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5227 case CONS_STRING_TYPE: return cons_internalized_string_map();
5228 case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5229 case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5230 case EXTERNAL_ASCII_STRING_TYPE:
5231 return external_ascii_internalized_string_map();
5232 case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5233 return external_internalized_string_with_one_byte_data_map();
5234 case SHORT_EXTERNAL_STRING_TYPE:
5235 return short_external_internalized_string_map();
5236 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5237 return short_external_ascii_internalized_string_map();
5238 case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5239 return short_external_internalized_string_with_one_byte_data_map();
5240 default: return NULL; // No match found.
5245 static inline void WriteOneByteData(Vector<const char> vector,
5248 // Only works for ascii.
5249 ASSERT(vector.length() == len);
5250 OS::MemCopy(chars, vector.start(), len);
5253 static inline void WriteTwoByteData(Vector<const char> vector,
5256 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5257 unsigned stream_length = vector.length();
5258 while (stream_length != 0) {
5259 unsigned consumed = 0;
5260 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5261 ASSERT(c != unibrow::Utf8::kBadChar);
5262 ASSERT(consumed <= stream_length);
5263 stream_length -= consumed;
5265 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5268 *chars++ = unibrow::Utf16::LeadSurrogate(c);
5269 *chars++ = unibrow::Utf16::TrailSurrogate(c);
5276 ASSERT(stream_length == 0);
5281 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5282 ASSERT(s->length() == len);
5283 String::WriteToFlat(s, chars, 0, len);
5287 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5288 ASSERT(s->length() == len);
5289 String::WriteToFlat(s, chars, 0, len);
5293 template<bool is_one_byte, typename T>
5294 MaybeObject* Heap::AllocateInternalizedStringImpl(
5295 T t, int chars, uint32_t hash_field) {
5297 // Compute map and object size.
5302 if (chars > SeqOneByteString::kMaxLength) {
5303 return Failure::OutOfMemoryException(0x9);
5305 map = ascii_internalized_string_map();
5306 size = SeqOneByteString::SizeFor(chars);
5308 if (chars > SeqTwoByteString::kMaxLength) {
5309 return Failure::OutOfMemoryException(0xa);
5311 map = internalized_string_map();
5312 size = SeqTwoByteString::SizeFor(chars);
5317 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5318 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5319 : old_data_space_->AllocateRaw(size);
5320 if (!maybe_result->ToObject(&result)) return maybe_result;
5323 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5324 // Set length and hash fields of the allocated string.
5325 String* answer = String::cast(result);
5326 answer->set_length(chars);
5327 answer->set_hash_field(hash_field);
5329 ASSERT_EQ(size, answer->Size());
5332 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5334 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5340 // Need explicit instantiations.
5342 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5344 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5345 String*, int, uint32_t);
5347 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5348 Vector<const char>, int, uint32_t);
5351 MaybeObject* Heap::AllocateRawOneByteString(int length,
5352 PretenureFlag pretenure) {
5353 if (length < 0 || length > SeqOneByteString::kMaxLength) {
5354 return Failure::OutOfMemoryException(0xb);
5357 int size = SeqOneByteString::SizeFor(length);
5358 ASSERT(size <= SeqOneByteString::kMaxSize);
5360 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5361 AllocationSpace retry_space = OLD_DATA_SPACE;
5363 if (space == NEW_SPACE) {
5364 if (size > kMaxObjectSizeInNewSpace) {
5365 // Allocate in large object space, retry space will be ignored.
5367 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5368 // Allocate in new space, retry in large object space.
5369 retry_space = LO_SPACE;
5371 } else if (space == OLD_DATA_SPACE &&
5372 size > Page::kMaxNonCodeHeapObjectSize) {
5376 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5377 if (!maybe_result->ToObject(&result)) return maybe_result;
5380 // Partially initialize the object.
5381 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5382 String::cast(result)->set_length(length);
5383 String::cast(result)->set_hash_field(String::kEmptyHashField);
5384 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5390 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5391 PretenureFlag pretenure) {
5392 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5393 return Failure::OutOfMemoryException(0xc);
5395 int size = SeqTwoByteString::SizeFor(length);
5396 ASSERT(size <= SeqTwoByteString::kMaxSize);
5397 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5398 AllocationSpace retry_space = OLD_DATA_SPACE;
5400 if (space == NEW_SPACE) {
5401 if (size > kMaxObjectSizeInNewSpace) {
5402 // Allocate in large object space, retry space will be ignored.
5404 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5405 // Allocate in new space, retry in large object space.
5406 retry_space = LO_SPACE;
5408 } else if (space == OLD_DATA_SPACE &&
5409 size > Page::kMaxNonCodeHeapObjectSize) {
5413 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5414 if (!maybe_result->ToObject(&result)) return maybe_result;
5417 // Partially initialize the object.
5418 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5419 String::cast(result)->set_length(length);
5420 String::cast(result)->set_hash_field(String::kEmptyHashField);
5421 ASSERT_EQ(size, HeapObject::cast(result)->Size());
5426 MaybeObject* Heap::AllocateJSArray(
5427 ElementsKind elements_kind,
5428 PretenureFlag pretenure) {
5429 Context* native_context = isolate()->context()->native_context();
5430 JSFunction* array_function = native_context->array_function();
5431 Map* map = array_function->initial_map();
5432 Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5433 if (transition_map != NULL) map = transition_map;
5434 return AllocateJSObjectFromMap(map, pretenure);
5438 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5439 ElementsKind elements_kind,
5440 Handle<AllocationSite> allocation_site) {
5441 Context* native_context = isolate()->context()->native_context();
5442 JSFunction* array_function = native_context->array_function();
5443 Map* map = array_function->initial_map();
5444 Object* maybe_map_array = native_context->js_array_maps();
5445 if (!maybe_map_array->IsUndefined()) {
5446 Object* maybe_transitioned_map =
5447 FixedArray::cast(maybe_map_array)->get(elements_kind);
5448 if (!maybe_transitioned_map->IsUndefined()) {
5449 map = Map::cast(maybe_transitioned_map);
5452 return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5456 MaybeObject* Heap::AllocateEmptyFixedArray() {
5457 int size = FixedArray::SizeFor(0);
5459 { MaybeObject* maybe_result =
5460 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5461 if (!maybe_result->ToObject(&result)) return maybe_result;
5463 // Initialize the object.
5464 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5466 reinterpret_cast<FixedArray*>(result)->set_length(0);
5471 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5472 return AllocateExternalArray(0, array_type, NULL, TENURED);
5476 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5477 if (length < 0 || length > FixedArray::kMaxLength) {
5478 return Failure::OutOfMemoryException(0xd);
5481 // Use the general function if we're forced to always allocate.
5482 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5483 // Allocate the raw data for a fixed array.
5484 int size = FixedArray::SizeFor(length);
5485 return size <= kMaxObjectSizeInNewSpace
5486 ? new_space_.AllocateRaw(size)
5487 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5491 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5492 int len = src->length();
5494 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5495 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5497 if (InNewSpace(obj)) {
5498 HeapObject* dst = HeapObject::cast(obj);
5499 dst->set_map_no_write_barrier(map);
5500 CopyBlock(dst->address() + kPointerSize,
5501 src->address() + kPointerSize,
5502 FixedArray::SizeFor(len) - kPointerSize);
5505 HeapObject::cast(obj)->set_map_no_write_barrier(map);
5506 FixedArray* result = FixedArray::cast(obj);
5507 result->set_length(len);
5510 DisallowHeapAllocation no_gc;
5511 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5512 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5517 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5519 int len = src->length();
5521 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5522 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5524 HeapObject* dst = HeapObject::cast(obj);
5525 dst->set_map_no_write_barrier(map);
5527 dst->address() + FixedDoubleArray::kLengthOffset,
5528 src->address() + FixedDoubleArray::kLengthOffset,
5529 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5534 MaybeObject* Heap::AllocateFixedArray(int length) {
5535 ASSERT(length >= 0);
5536 if (length == 0) return empty_fixed_array();
5538 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5539 if (!maybe_result->ToObject(&result)) return maybe_result;
5541 // Initialize header.
5542 FixedArray* array = reinterpret_cast<FixedArray*>(result);
5543 array->set_map_no_write_barrier(fixed_array_map());
5544 array->set_length(length);
5546 ASSERT(!InNewSpace(undefined_value()));
5547 MemsetPointer(array->data_start(), undefined_value(), length);
5552 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5553 if (length < 0 || length > FixedArray::kMaxLength) {
5554 return Failure::OutOfMemoryException(0xe);
5557 AllocationSpace space =
5558 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5559 int size = FixedArray::SizeFor(length);
5560 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5561 // Too big for new space.
5563 } else if (space == OLD_POINTER_SPACE &&
5564 size > Page::kMaxNonCodeHeapObjectSize) {
5565 // Too big for old pointer space.
5569 AllocationSpace retry_space =
5570 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5572 return AllocateRaw(size, space, retry_space);
5576 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5579 PretenureFlag pretenure,
5581 ASSERT(length >= 0);
5582 ASSERT(heap->empty_fixed_array()->IsFixedArray());
5583 if (length == 0) return heap->empty_fixed_array();
5585 ASSERT(!heap->InNewSpace(filler));
5587 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5588 if (!maybe_result->ToObject(&result)) return maybe_result;
5591 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5592 FixedArray* array = FixedArray::cast(result);
5593 array->set_length(length);
5594 MemsetPointer(array->data_start(), filler, length);
5599 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5600 return AllocateFixedArrayWithFiller(this,
5607 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5608 PretenureFlag pretenure) {
5609 return AllocateFixedArrayWithFiller(this,
5616 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5617 if (length == 0) return empty_fixed_array();
5620 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5621 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5624 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5626 FixedArray::cast(obj)->set_length(length);
5631 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5632 int size = FixedDoubleArray::SizeFor(0);
5634 { MaybeObject* maybe_result =
5635 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5636 if (!maybe_result->ToObject(&result)) return maybe_result;
5638 // Initialize the object.
5639 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5640 fixed_double_array_map());
5641 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5646 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5648 PretenureFlag pretenure) {
5649 if (length == 0) return empty_fixed_array();
5651 Object* elements_object;
5652 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5653 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5654 FixedDoubleArray* elements =
5655 reinterpret_cast<FixedDoubleArray*>(elements_object);
5657 elements->set_map_no_write_barrier(fixed_double_array_map());
5658 elements->set_length(length);
5663 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5665 PretenureFlag pretenure) {
5666 if (length == 0) return empty_fixed_array();
5668 Object* elements_object;
5669 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5670 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5671 FixedDoubleArray* elements =
5672 reinterpret_cast<FixedDoubleArray*>(elements_object);
5674 for (int i = 0; i < length; ++i) {
5675 elements->set_the_hole(i);
5678 elements->set_map_no_write_barrier(fixed_double_array_map());
5679 elements->set_length(length);
5684 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5685 PretenureFlag pretenure) {
5686 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5687 return Failure::OutOfMemoryException(0xf);
5690 AllocationSpace space =
5691 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5692 int size = FixedDoubleArray::SizeFor(length);
5694 #ifndef V8_HOST_ARCH_64_BIT
5695 size += kPointerSize;
5698 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5699 // Too big for new space.
5701 } else if (space == OLD_DATA_SPACE &&
5702 size > Page::kMaxNonCodeHeapObjectSize) {
5703 // Too big for old data space.
5707 AllocationSpace retry_space =
5708 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5711 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5712 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5715 return EnsureDoubleAligned(this, object, size);
5719 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5721 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5722 if (!maybe_result->ToObject(&result)) return maybe_result;
5724 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5726 ASSERT(result->IsHashTable());
5731 MaybeObject* Heap::AllocateSymbol() {
5732 // Statically ensure that it is safe to allocate symbols in paged spaces.
5733 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5736 MaybeObject* maybe =
5737 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5738 if (!maybe->ToObject(&result)) return maybe;
5740 HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5742 // Generate a random hash value.
5746 hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5748 } while (hash == 0 && attempts < 30);
5749 if (hash == 0) hash = 1; // never return 0
5751 Symbol::cast(result)->set_hash_field(
5752 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5753 Symbol::cast(result)->set_name(undefined_value());
5755 ASSERT(result->IsSymbol());
5760 MaybeObject* Heap::AllocateNativeContext() {
5762 { MaybeObject* maybe_result =
5763 AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5764 if (!maybe_result->ToObject(&result)) return maybe_result;
5766 Context* context = reinterpret_cast<Context*>(result);
5767 context->set_map_no_write_barrier(native_context_map());
5768 context->set_js_array_maps(undefined_value());
5769 ASSERT(context->IsNativeContext());
5770 ASSERT(result->IsContext());
5775 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5776 ScopeInfo* scope_info) {
5778 { MaybeObject* maybe_result =
5779 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5780 if (!maybe_result->ToObject(&result)) return maybe_result;
5782 Context* context = reinterpret_cast<Context*>(result);
5783 context->set_map_no_write_barrier(global_context_map());
5784 context->set_closure(function);
5785 context->set_previous(function->context());
5786 context->set_extension(scope_info);
5787 context->set_global_object(function->context()->global_object());
5788 ASSERT(context->IsGlobalContext());
5789 ASSERT(result->IsContext());
5794 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5796 { MaybeObject* maybe_result =
5797 AllocateFixedArray(scope_info->ContextLength(), TENURED);
5798 if (!maybe_result->ToObject(&result)) return maybe_result;
5800 Context* context = reinterpret_cast<Context*>(result);
5801 context->set_map_no_write_barrier(module_context_map());
5802 // Instance link will be set later.
5803 context->set_extension(Smi::FromInt(0));
5808 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5809 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5811 { MaybeObject* maybe_result = AllocateFixedArray(length);
5812 if (!maybe_result->ToObject(&result)) return maybe_result;
5814 Context* context = reinterpret_cast<Context*>(result);
5815 context->set_map_no_write_barrier(function_context_map());
5816 context->set_closure(function);
5817 context->set_previous(function->context());
5818 context->set_extension(Smi::FromInt(0));
5819 context->set_global_object(function->context()->global_object());
5824 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5827 Object* thrown_object) {
5828 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5830 { MaybeObject* maybe_result =
5831 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5832 if (!maybe_result->ToObject(&result)) return maybe_result;
5834 Context* context = reinterpret_cast<Context*>(result);
5835 context->set_map_no_write_barrier(catch_context_map());
5836 context->set_closure(function);
5837 context->set_previous(previous);
5838 context->set_extension(name);
5839 context->set_global_object(previous->global_object());
5840 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5845 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5847 JSReceiver* extension) {
5849 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5850 if (!maybe_result->ToObject(&result)) return maybe_result;
5852 Context* context = reinterpret_cast<Context*>(result);
5853 context->set_map_no_write_barrier(with_context_map());
5854 context->set_closure(function);
5855 context->set_previous(previous);
5856 context->set_extension(extension);
5857 context->set_global_object(previous->global_object());
5862 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5864 ScopeInfo* scope_info) {
5866 { MaybeObject* maybe_result =
5867 AllocateFixedArrayWithHoles(scope_info->ContextLength());
5868 if (!maybe_result->ToObject(&result)) return maybe_result;
5870 Context* context = reinterpret_cast<Context*>(result);
5871 context->set_map_no_write_barrier(block_context_map());
5872 context->set_closure(function);
5873 context->set_previous(previous);
5874 context->set_extension(scope_info);
5875 context->set_global_object(previous->global_object());
5880 MaybeObject* Heap::AllocateScopeInfo(int length) {
5881 FixedArray* scope_info;
5882 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5883 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5884 scope_info->set_map_no_write_barrier(scope_info_map());
5889 MaybeObject* Heap::AllocateExternal(void* value) {
5891 { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5892 if (!maybe_result->To(&foreign)) return maybe_result;
5895 { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5896 if (!maybe_result->To(&external)) return maybe_result;
5898 external->SetInternalField(0, foreign);
5903 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5906 #define MAKE_CASE(NAME, Name, name) \
5907 case NAME##_TYPE: map = name##_map(); break;
5908 STRUCT_LIST(MAKE_CASE)
5912 return Failure::InternalError();
5914 int size = map->instance_size();
5915 AllocationSpace space =
5916 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5918 { MaybeObject* maybe_result = Allocate(map, space);
5919 if (!maybe_result->ToObject(&result)) return maybe_result;
5921 Struct::cast(result)->InitializeBody(size);
5926 bool Heap::IsHeapIterable() {
5927 return (!old_pointer_space()->was_swept_conservatively() &&
5928 !old_data_space()->was_swept_conservatively());
5932 void Heap::EnsureHeapIsIterable() {
5933 ASSERT(AllowHeapAllocation::IsAllowed());
5934 if (!IsHeapIterable()) {
5935 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5937 ASSERT(IsHeapIterable());
5941 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5942 incremental_marking()->Step(step_size,
5943 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5945 if (incremental_marking()->IsComplete()) {
5946 bool uncommit = false;
5947 if (gc_count_at_last_idle_gc_ == gc_count_) {
5948 // No GC since the last full GC, the mutator is probably not active.
5949 isolate_->compilation_cache()->Clear();
5952 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5953 mark_sweeps_since_idle_round_started_++;
5954 gc_count_at_last_idle_gc_ = gc_count_;
5956 new_space_.Shrink();
5957 UncommitFromSpace();
5963 bool Heap::IdleNotification(int hint) {
5964 // Hints greater than this value indicate that
5965 // the embedder is requesting a lot of GC work.
5966 const int kMaxHint = 1000;
5967 const int kMinHintForIncrementalMarking = 10;
5968 // Minimal hint that allows to do full GC.
5969 const int kMinHintForFullGC = 100;
5970 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5971 // The size factor is in range [5..250]. The numbers here are chosen from
5972 // experiments. If you changes them, make sure to test with
5973 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5974 intptr_t step_size =
5975 size_factor * IncrementalMarking::kAllocatedThreshold;
5977 if (contexts_disposed_ > 0) {
5978 if (hint >= kMaxHint) {
5979 // The embedder is requesting a lot of GC work after context disposal,
5980 // we age inline caches so that they don't keep objects from
5981 // the old context alive.
5984 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5985 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5986 incremental_marking()->IsStopped()) {
5987 HistogramTimerScope scope(isolate_->counters()->gc_context());
5988 CollectAllGarbage(kReduceMemoryFootprintMask,
5989 "idle notification: contexts disposed");
5991 AdvanceIdleIncrementalMarking(step_size);
5992 contexts_disposed_ = 0;
5994 // After context disposal there is likely a lot of garbage remaining, reset
5995 // the idle notification counters in order to trigger more incremental GCs
5996 // on subsequent idle notifications.
6001 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
6002 return IdleGlobalGC();
6005 // By doing small chunks of GC work in each IdleNotification,
6006 // perform a round of incremental GCs and after that wait until
6007 // the mutator creates enough garbage to justify a new round.
6008 // An incremental GC progresses as follows:
6009 // 1. many incremental marking steps,
6010 // 2. one old space mark-sweep-compact,
6011 // 3. many lazy sweep steps.
6012 // Use mark-sweep-compact events to count incremental GCs in a round.
6014 if (incremental_marking()->IsStopped()) {
6015 if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
6016 !IsSweepingComplete() &&
6017 !AdvanceSweepers(static_cast<int>(step_size))) {
6022 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6023 if (EnoughGarbageSinceLastIdleRound()) {
6030 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6031 mark_sweeps_since_idle_round_started_;
6033 if (incremental_marking()->IsStopped()) {
6034 // If there are no more than two GCs left in this idle round and we are
6035 // allowed to do a full GC, then make those GCs full in order to compact
6037 // TODO(ulan): Once we enable code compaction for incremental marking,
6038 // we can get rid of this special case and always start incremental marking.
6039 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6040 CollectAllGarbage(kReduceMemoryFootprintMask,
6041 "idle notification: finalize idle round");
6042 mark_sweeps_since_idle_round_started_++;
6043 } else if (hint > kMinHintForIncrementalMarking) {
6044 incremental_marking()->Start();
6047 if (!incremental_marking()->IsStopped() &&
6048 hint > kMinHintForIncrementalMarking) {
6049 AdvanceIdleIncrementalMarking(step_size);
6052 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6061 bool Heap::IdleGlobalGC() {
6062 static const int kIdlesBeforeScavenge = 4;
6063 static const int kIdlesBeforeMarkSweep = 7;
6064 static const int kIdlesBeforeMarkCompact = 8;
6065 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6066 static const unsigned int kGCsBetweenCleanup = 4;
6068 if (!last_idle_notification_gc_count_init_) {
6069 last_idle_notification_gc_count_ = gc_count_;
6070 last_idle_notification_gc_count_init_ = true;
6073 bool uncommit = true;
6074 bool finished = false;
6076 // Reset the number of idle notifications received when a number of
6077 // GCs have taken place. This allows another round of cleanup based
6078 // on idle notifications if enough work has been carried out to
6079 // provoke a number of garbage collections.
6080 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6081 number_idle_notifications_ =
6082 Min(number_idle_notifications_ + 1, kMaxIdleCount);
6084 number_idle_notifications_ = 0;
6085 last_idle_notification_gc_count_ = gc_count_;
6088 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6089 CollectGarbage(NEW_SPACE, "idle notification");
6090 new_space_.Shrink();
6091 last_idle_notification_gc_count_ = gc_count_;
6092 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6093 // Before doing the mark-sweep collections we clear the
6094 // compilation cache to avoid hanging on to source code and
6095 // generated code for cached functions.
6096 isolate_->compilation_cache()->Clear();
6098 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6099 new_space_.Shrink();
6100 last_idle_notification_gc_count_ = gc_count_;
6102 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6103 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6104 new_space_.Shrink();
6105 last_idle_notification_gc_count_ = gc_count_;
6106 number_idle_notifications_ = 0;
6108 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6109 // If we have received more than kIdlesBeforeMarkCompact idle
6110 // notifications we do not perform any cleanup because we don't
6111 // expect to gain much by doing so.
6115 if (uncommit) UncommitFromSpace();
6123 void Heap::Print() {
6124 if (!HasBeenSetUp()) return;
6125 isolate()->PrintStack(stdout);
6126 AllSpaces spaces(this);
6127 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6133 void Heap::ReportCodeStatistics(const char* title) {
6134 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6135 PagedSpace::ResetCodeStatistics();
6136 // We do not look for code in new space, map space, or old space. If code
6137 // somehow ends up in those spaces, we would miss it here.
6138 code_space_->CollectCodeStatistics();
6139 lo_space_->CollectCodeStatistics();
6140 PagedSpace::ReportCodeStatistics();
6144 // This function expects that NewSpace's allocated objects histogram is
6145 // populated (via a call to CollectStatistics or else as a side effect of a
6146 // just-completed scavenge collection).
6147 void Heap::ReportHeapStatistics(const char* title) {
6149 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6151 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6152 old_generation_allocation_limit_);
6155 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6156 isolate_->global_handles()->PrintStats();
6159 PrintF("Heap statistics : ");
6160 isolate_->memory_allocator()->ReportStatistics();
6161 PrintF("To space : ");
6162 new_space_.ReportStatistics();
6163 PrintF("Old pointer space : ");
6164 old_pointer_space_->ReportStatistics();
6165 PrintF("Old data space : ");
6166 old_data_space_->ReportStatistics();
6167 PrintF("Code space : ");
6168 code_space_->ReportStatistics();
6169 PrintF("Map space : ");
6170 map_space_->ReportStatistics();
6171 PrintF("Cell space : ");
6172 cell_space_->ReportStatistics();
6173 PrintF("PropertyCell space : ");
6174 property_cell_space_->ReportStatistics();
6175 PrintF("Large object space : ");
6176 lo_space_->ReportStatistics();
6177 PrintF(">>>>>> ========================================= >>>>>>\n");
6182 bool Heap::Contains(HeapObject* value) {
6183 return Contains(value->address());
6187 bool Heap::Contains(Address addr) {
6188 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6189 return HasBeenSetUp() &&
6190 (new_space_.ToSpaceContains(addr) ||
6191 old_pointer_space_->Contains(addr) ||
6192 old_data_space_->Contains(addr) ||
6193 code_space_->Contains(addr) ||
6194 map_space_->Contains(addr) ||
6195 cell_space_->Contains(addr) ||
6196 property_cell_space_->Contains(addr) ||
6197 lo_space_->SlowContains(addr));
6201 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6202 return InSpace(value->address(), space);
6206 bool Heap::InSpace(Address addr, AllocationSpace space) {
6207 if (OS::IsOutsideAllocatedSpace(addr)) return false;
6208 if (!HasBeenSetUp()) return false;
6212 return new_space_.ToSpaceContains(addr);
6213 case OLD_POINTER_SPACE:
6214 return old_pointer_space_->Contains(addr);
6215 case OLD_DATA_SPACE:
6216 return old_data_space_->Contains(addr);
6218 return code_space_->Contains(addr);
6220 return map_space_->Contains(addr);
6222 return cell_space_->Contains(addr);
6223 case PROPERTY_CELL_SPACE:
6224 return property_cell_space_->Contains(addr);
6226 return lo_space_->SlowContains(addr);
6234 void Heap::Verify() {
6235 CHECK(HasBeenSetUp());
6237 store_buffer()->Verify();
6239 VerifyPointersVisitor visitor;
6240 IterateRoots(&visitor, VISIT_ONLY_STRONG);
6242 new_space_.Verify();
6244 old_pointer_space_->Verify(&visitor);
6245 map_space_->Verify(&visitor);
6247 VerifyPointersVisitor no_dirty_regions_visitor;
6248 old_data_space_->Verify(&no_dirty_regions_visitor);
6249 code_space_->Verify(&no_dirty_regions_visitor);
6250 cell_space_->Verify(&no_dirty_regions_visitor);
6251 property_cell_space_->Verify(&no_dirty_regions_visitor);
6253 lo_space_->Verify();
6258 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6259 Object* result = NULL;
6261 { MaybeObject* maybe_new_table =
6262 string_table()->LookupUtf8String(string, &result);
6263 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6265 // Can't use set_string_table because StringTable::cast knows that
6266 // StringTable is a singleton and checks for identity.
6267 roots_[kStringTableRootIndex] = new_table;
6268 ASSERT(result != NULL);
6273 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6274 Object* result = NULL;
6276 { MaybeObject* maybe_new_table =
6277 string_table()->LookupOneByteString(string, &result);
6278 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6280 // Can't use set_string_table because StringTable::cast knows that
6281 // StringTable is a singleton and checks for identity.
6282 roots_[kStringTableRootIndex] = new_table;
6283 ASSERT(result != NULL);
6288 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6291 Object* result = NULL;
6293 { MaybeObject* maybe_new_table =
6294 string_table()->LookupSubStringOneByteString(string,
6298 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6300 // Can't use set_string_table because StringTable::cast knows that
6301 // StringTable is a singleton and checks for identity.
6302 roots_[kStringTableRootIndex] = new_table;
6303 ASSERT(result != NULL);
6308 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6309 Object* result = NULL;
6311 { MaybeObject* maybe_new_table =
6312 string_table()->LookupTwoByteString(string, &result);
6313 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6315 // Can't use set_string_table because StringTable::cast knows that
6316 // StringTable is a singleton and checks for identity.
6317 roots_[kStringTableRootIndex] = new_table;
6318 ASSERT(result != NULL);
6323 MaybeObject* Heap::InternalizeString(String* string) {
6324 if (string->IsInternalizedString()) return string;
6325 Object* result = NULL;
6327 { MaybeObject* maybe_new_table =
6328 string_table()->LookupString(string, &result);
6329 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6331 // Can't use set_string_table because StringTable::cast knows that
6332 // StringTable is a singleton and checks for identity.
6333 roots_[kStringTableRootIndex] = new_table;
6334 ASSERT(result != NULL);
6339 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6340 if (string->IsInternalizedString()) {
6344 return string_table()->LookupStringIfExists(string, result);
6348 void Heap::ZapFromSpace() {
6349 NewSpacePageIterator it(new_space_.FromSpaceStart(),
6350 new_space_.FromSpaceEnd());
6351 while (it.has_next()) {
6352 NewSpacePage* page = it.next();
6353 for (Address cursor = page->area_start(), limit = page->area_end();
6355 cursor += kPointerSize) {
6356 Memory::Address_at(cursor) = kFromSpaceZapValue;
6362 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6364 ObjectSlotCallback callback) {
6365 Address slot_address = start;
6367 // We are not collecting slots on new space objects during mutation
6368 // thus we have to scan for pointers to evacuation candidates when we
6369 // promote objects. But we should not record any slots in non-black
6370 // objects. Grey object's slots would be rescanned.
6371 // White object might not survive until the end of collection
6372 // it would be a violation of the invariant to record it's slots.
6373 bool record_slots = false;
6374 if (incremental_marking()->IsCompacting()) {
6375 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6376 record_slots = Marking::IsBlack(mark_bit);
6379 while (slot_address < end) {
6380 Object** slot = reinterpret_cast<Object**>(slot_address);
6381 Object* object = *slot;
6382 // If the store buffer becomes overfull we mark pages as being exempt from
6383 // the store buffer. These pages are scanned to find pointers that point
6384 // to the new space. In that case we may hit newly promoted objects and
6385 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6386 if (object->IsHeapObject()) {
6387 if (Heap::InFromSpace(object)) {
6388 callback(reinterpret_cast<HeapObject**>(slot),
6389 HeapObject::cast(object));
6390 Object* new_object = *slot;
6391 if (InNewSpace(new_object)) {
6392 SLOW_ASSERT(Heap::InToSpace(new_object));
6393 SLOW_ASSERT(new_object->IsHeapObject());
6394 store_buffer_.EnterDirectlyIntoStoreBuffer(
6395 reinterpret_cast<Address>(slot));
6397 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6398 } else if (record_slots &&
6399 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6400 mark_compact_collector()->RecordSlot(slot, slot, object);
6403 slot_address += kPointerSize;
6409 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6412 bool IsAMapPointerAddress(Object** addr) {
6413 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6414 int mod = a % Map::kSize;
6415 return mod >= Map::kPointerFieldsBeginOffset &&
6416 mod < Map::kPointerFieldsEndOffset;
6420 bool EverythingsAPointer(Object** addr) {
6425 static void CheckStoreBuffer(Heap* heap,
6428 Object**** store_buffer_position,
6429 Object*** store_buffer_top,
6430 CheckStoreBufferFilter filter,
6431 Address special_garbage_start,
6432 Address special_garbage_end) {
6433 Map* free_space_map = heap->free_space_map();
6434 for ( ; current < limit; current++) {
6435 Object* o = *current;
6436 Address current_address = reinterpret_cast<Address>(current);
6438 if (o == free_space_map) {
6439 Address current_address = reinterpret_cast<Address>(current);
6440 FreeSpace* free_space =
6441 FreeSpace::cast(HeapObject::FromAddress(current_address));
6442 int skip = free_space->Size();
6443 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6445 current_address += skip - kPointerSize;
6446 current = reinterpret_cast<Object**>(current_address);
6449 // Skip the current linear allocation space between top and limit which is
6450 // unmarked with the free space map, but can contain junk.
6451 if (current_address == special_garbage_start &&
6452 special_garbage_end != special_garbage_start) {
6453 current_address = special_garbage_end - kPointerSize;
6454 current = reinterpret_cast<Object**>(current_address);
6457 if (!(*filter)(current)) continue;
6458 ASSERT(current_address < special_garbage_start ||
6459 current_address >= special_garbage_end);
6460 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6461 // We have to check that the pointer does not point into new space
6462 // without trying to cast it to a heap object since the hash field of
6463 // a string can contain values like 1 and 3 which are tagged null
6465 if (!heap->InNewSpace(o)) continue;
6466 while (**store_buffer_position < current &&
6467 *store_buffer_position < store_buffer_top) {
6468 (*store_buffer_position)++;
6470 if (**store_buffer_position != current ||
6471 *store_buffer_position == store_buffer_top) {
6472 Object** obj_start = current;
6473 while (!(*obj_start)->IsMap()) obj_start--;
6480 // Check that the store buffer contains all intergenerational pointers by
6481 // scanning a page and ensuring that all pointers to young space are in the
6483 void Heap::OldPointerSpaceCheckStoreBuffer() {
6484 OldSpace* space = old_pointer_space();
6485 PageIterator pages(space);
6487 store_buffer()->SortUniq();
6489 while (pages.has_next()) {
6490 Page* page = pages.next();
6491 Object** current = reinterpret_cast<Object**>(page->area_start());
6493 Address end = page->area_end();
6495 Object*** store_buffer_position = store_buffer()->Start();
6496 Object*** store_buffer_top = store_buffer()->Top();
6498 Object** limit = reinterpret_cast<Object**>(end);
6499 CheckStoreBuffer(this,
6502 &store_buffer_position,
6504 &EverythingsAPointer,
6511 void Heap::MapSpaceCheckStoreBuffer() {
6512 MapSpace* space = map_space();
6513 PageIterator pages(space);
6515 store_buffer()->SortUniq();
6517 while (pages.has_next()) {
6518 Page* page = pages.next();
6519 Object** current = reinterpret_cast<Object**>(page->area_start());
6521 Address end = page->area_end();
6523 Object*** store_buffer_position = store_buffer()->Start();
6524 Object*** store_buffer_top = store_buffer()->Top();
6526 Object** limit = reinterpret_cast<Object**>(end);
6527 CheckStoreBuffer(this,
6530 &store_buffer_position,
6532 &IsAMapPointerAddress,
6539 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6540 LargeObjectIterator it(lo_space());
6541 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6542 // We only have code, sequential strings, or fixed arrays in large
6543 // object space, and only fixed arrays can possibly contain pointers to
6544 // the young generation.
6545 if (object->IsFixedArray()) {
6546 Object*** store_buffer_position = store_buffer()->Start();
6547 Object*** store_buffer_top = store_buffer()->Top();
6548 Object** current = reinterpret_cast<Object**>(object->address());
6550 reinterpret_cast<Object**>(object->address() + object->Size());
6551 CheckStoreBuffer(this,
6554 &store_buffer_position,
6556 &EverythingsAPointer,
6565 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6566 IterateStrongRoots(v, mode);
6567 IterateWeakRoots(v, mode);
6571 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6572 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6573 v->Synchronize(VisitorSynchronization::kStringTable);
6574 if (mode != VISIT_ALL_IN_SCAVENGE &&
6575 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6576 // Scavenge collections have special processing for this.
6577 external_string_table_.Iterate(v);
6578 error_object_list_.Iterate(v);
6580 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6584 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6585 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6586 v->Synchronize(VisitorSynchronization::kStrongRootList);
6588 v->VisitPointer(BitCast<Object**>(&hidden_string_));
6589 v->Synchronize(VisitorSynchronization::kInternalizedString);
6591 isolate_->bootstrapper()->Iterate(v);
6592 v->Synchronize(VisitorSynchronization::kBootstrapper);
6593 isolate_->Iterate(v);
6594 v->Synchronize(VisitorSynchronization::kTop);
6595 Relocatable::Iterate(v);
6596 v->Synchronize(VisitorSynchronization::kRelocatable);
6598 #ifdef ENABLE_DEBUGGER_SUPPORT
6599 isolate_->debug()->Iterate(v);
6600 if (isolate_->deoptimizer_data() != NULL) {
6601 isolate_->deoptimizer_data()->Iterate(v);
6604 v->Synchronize(VisitorSynchronization::kDebug);
6605 isolate_->compilation_cache()->Iterate(v);
6606 v->Synchronize(VisitorSynchronization::kCompilationCache);
6608 // Iterate over local handles in handle scopes.
6609 isolate_->handle_scope_implementer()->Iterate(v);
6610 isolate_->IterateDeferredHandles(v);
6611 v->Synchronize(VisitorSynchronization::kHandleScope);
6613 // Iterate over the builtin code objects and code stubs in the
6614 // heap. Note that it is not necessary to iterate over code objects
6615 // on scavenge collections.
6616 if (mode != VISIT_ALL_IN_SCAVENGE) {
6617 isolate_->builtins()->IterateBuiltins(v);
6619 v->Synchronize(VisitorSynchronization::kBuiltins);
6621 // Iterate over global handles.
6623 case VISIT_ONLY_STRONG:
6624 isolate_->global_handles()->IterateStrongRoots(v);
6626 case VISIT_ALL_IN_SCAVENGE:
6627 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6629 case VISIT_ALL_IN_SWEEP_NEWSPACE:
6631 isolate_->global_handles()->IterateAllRoots(v);
6634 v->Synchronize(VisitorSynchronization::kGlobalHandles);
6636 // Iterate over pointers being held by inactive threads.
6637 isolate_->thread_manager()->Iterate(v);
6638 v->Synchronize(VisitorSynchronization::kThreadManager);
6640 // Iterate over the pointers the Serialization/Deserialization code is
6642 // During garbage collection this keeps the partial snapshot cache alive.
6643 // During deserialization of the startup snapshot this creates the partial
6644 // snapshot cache and deserializes the objects it refers to. During
6645 // serialization this does nothing, since the partial snapshot cache is
6646 // empty. However the next thing we do is create the partial snapshot,
6647 // filling up the partial snapshot cache with objects it needs as we go.
6648 SerializerDeserializer::Iterate(v);
6649 // We don't do a v->Synchronize call here, because in debug mode that will
6650 // output a flag to the snapshot. However at this point the serializer and
6651 // deserializer are deliberately a little unsynchronized (see above) so the
6652 // checking of the sync flag in the snapshot would fail.
6656 // TODO(1236194): Since the heap size is configurable on the command line
6657 // and through the API, we should gracefully handle the case that the heap
6658 // size is not big enough to fit all the initial objects.
6659 bool Heap::ConfigureHeap(int max_semispace_size,
6660 intptr_t max_old_gen_size,
6661 intptr_t max_executable_size) {
6662 if (HasBeenSetUp()) return false;
6664 if (FLAG_stress_compaction) {
6665 // This will cause more frequent GCs when stressing.
6666 max_semispace_size_ = Page::kPageSize;
6669 if (max_semispace_size > 0) {
6670 if (max_semispace_size < Page::kPageSize) {
6671 max_semispace_size = Page::kPageSize;
6672 if (FLAG_trace_gc) {
6673 PrintPID("Max semispace size cannot be less than %dkbytes\n",
6674 Page::kPageSize >> 10);
6677 max_semispace_size_ = max_semispace_size;
6680 if (Snapshot::IsEnabled()) {
6681 // If we are using a snapshot we always reserve the default amount
6682 // of memory for each semispace because code in the snapshot has
6683 // write-barrier code that relies on the size and alignment of new
6684 // space. We therefore cannot use a larger max semispace size
6685 // than the default reserved semispace size.
6686 if (max_semispace_size_ > reserved_semispace_size_) {
6687 max_semispace_size_ = reserved_semispace_size_;
6688 if (FLAG_trace_gc) {
6689 PrintPID("Max semispace size cannot be more than %dkbytes\n",
6690 reserved_semispace_size_ >> 10);
6694 // If we are not using snapshots we reserve space for the actual
6695 // max semispace size.
6696 reserved_semispace_size_ = max_semispace_size_;
6699 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6700 if (max_executable_size > 0) {
6701 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6704 // The max executable size must be less than or equal to the max old
6706 if (max_executable_size_ > max_old_generation_size_) {
6707 max_executable_size_ = max_old_generation_size_;
6710 // The new space size must be a power of two to support single-bit testing
6712 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6713 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6714 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6716 // The external allocation limit should be below 256 MB on all architectures
6717 // to avoid unnecessary low memory notifications, as that is the threshold
6718 // for some embedders.
6719 external_allocation_limit_ = 12 * max_semispace_size_;
6720 ASSERT(external_allocation_limit_ <= 256 * MB);
6722 // The old generation is paged and needs at least one page for each space.
6723 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6724 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6726 RoundUp(max_old_generation_size_,
6734 bool Heap::ConfigureHeapDefault() {
6735 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6736 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6737 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6741 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6742 *stats->start_marker = HeapStats::kStartMarker;
6743 *stats->end_marker = HeapStats::kEndMarker;
6744 *stats->new_space_size = new_space_.SizeAsInt();
6745 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6746 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6747 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6748 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6749 *stats->old_data_space_capacity = old_data_space_->Capacity();
6750 *stats->code_space_size = code_space_->SizeOfObjects();
6751 *stats->code_space_capacity = code_space_->Capacity();
6752 *stats->map_space_size = map_space_->SizeOfObjects();
6753 *stats->map_space_capacity = map_space_->Capacity();
6754 *stats->cell_space_size = cell_space_->SizeOfObjects();
6755 *stats->cell_space_capacity = cell_space_->Capacity();
6756 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6757 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6758 *stats->lo_space_size = lo_space_->Size();
6759 isolate_->global_handles()->RecordStats(stats);
6760 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6761 *stats->memory_allocator_capacity =
6762 isolate()->memory_allocator()->Size() +
6763 isolate()->memory_allocator()->Available();
6764 *stats->os_error = OS::GetLastError();
6765 isolate()->memory_allocator()->Available();
6766 if (take_snapshot) {
6767 HeapIterator iterator(this);
6768 for (HeapObject* obj = iterator.next();
6770 obj = iterator.next()) {
6771 InstanceType type = obj->map()->instance_type();
6772 ASSERT(0 <= type && type <= LAST_TYPE);
6773 stats->objects_per_type[type]++;
6774 stats->size_per_type[type] += obj->Size();
6780 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6781 return old_pointer_space_->SizeOfObjects()
6782 + old_data_space_->SizeOfObjects()
6783 + code_space_->SizeOfObjects()
6784 + map_space_->SizeOfObjects()
6785 + cell_space_->SizeOfObjects()
6786 + property_cell_space_->SizeOfObjects()
6787 + lo_space_->SizeOfObjects();
6791 intptr_t Heap::PromotedExternalMemorySize() {
6792 if (amount_of_external_allocated_memory_
6793 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6794 return amount_of_external_allocated_memory_
6795 - amount_of_external_allocated_memory_at_last_global_gc_;
6799 V8_DECLARE_ONCE(initialize_gc_once);
6801 static void InitializeGCOnce() {
6802 InitializeScavengingVisitorsTables();
6803 NewSpaceScavenger::Initialize();
6804 MarkCompactCollector::Initialize();
6808 bool Heap::SetUp() {
6810 allocation_timeout_ = FLAG_gc_interval;
6813 // Initialize heap spaces and initial maps and objects. Whenever something
6814 // goes wrong, just return false. The caller should check the results and
6815 // call Heap::TearDown() to release allocated memory.
6817 // If the heap is not yet configured (e.g. through the API), configure it.
6818 // Configuration is based on the flags new-space-size (really the semispace
6819 // size) and old-space-size if set or the initial values of semispace_size_
6820 // and old_generation_size_ otherwise.
6822 if (!ConfigureHeapDefault()) return false;
6825 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6827 MarkMapPointersAsEncoded(false);
6829 // Set up memory allocator.
6830 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6833 // Set up new space.
6834 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6838 // Initialize old pointer space.
6839 old_pointer_space_ =
6841 max_old_generation_size_,
6844 if (old_pointer_space_ == NULL) return false;
6845 if (!old_pointer_space_->SetUp()) return false;
6847 // Initialize old data space.
6850 max_old_generation_size_,
6853 if (old_data_space_ == NULL) return false;
6854 if (!old_data_space_->SetUp()) return false;
6856 // Initialize the code space, set its maximum capacity to the old
6857 // generation size. It needs executable memory.
6858 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6859 // virtual address space, so that they can call each other with near calls.
6860 if (code_range_size_ > 0) {
6861 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6867 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6868 if (code_space_ == NULL) return false;
6869 if (!code_space_->SetUp()) return false;
6871 // Initialize map space.
6872 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6873 if (map_space_ == NULL) return false;
6874 if (!map_space_->SetUp()) return false;
6876 // Initialize simple cell space.
6877 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6878 if (cell_space_ == NULL) return false;
6879 if (!cell_space_->SetUp()) return false;
6881 // Initialize global property cell space.
6882 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6883 PROPERTY_CELL_SPACE);
6884 if (property_cell_space_ == NULL) return false;
6885 if (!property_cell_space_->SetUp()) return false;
6887 // The large object code space may contain code or data. We set the memory
6888 // to be non-executable here for safety, but this means we need to enable it
6889 // explicitly when allocating large code objects.
6890 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6891 if (lo_space_ == NULL) return false;
6892 if (!lo_space_->SetUp()) return false;
6894 // Set up the seed that is used to randomize the string hash function.
6895 ASSERT(hash_seed() == 0);
6896 if (FLAG_randomize_hashes) {
6897 if (FLAG_hash_seed == 0) {
6899 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6901 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6905 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6906 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6908 store_buffer()->SetUp();
6910 if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6912 relocation_mutex_locked_by_optimizer_thread_ = false;
6919 bool Heap::CreateHeapObjects() {
6920 // Create initial maps.
6921 if (!CreateInitialMaps()) return false;
6922 if (!CreateApiObjects()) return false;
6924 // Create initial objects
6925 if (!CreateInitialObjects()) return false;
6927 native_contexts_list_ = undefined_value();
6928 array_buffers_list_ = undefined_value();
6929 allocation_sites_list_ = undefined_value();
6934 void Heap::SetStackLimits() {
6935 ASSERT(isolate_ != NULL);
6936 ASSERT(isolate_ == isolate());
6937 // On 64 bit machines, pointers are generally out of range of Smis. We write
6938 // something that looks like an out of range Smi to the GC.
6940 // Set up the special root array entries containing the stack limits.
6941 // These are actually addresses, but the tag makes the GC ignore it.
6942 roots_[kStackLimitRootIndex] =
6943 reinterpret_cast<Object*>(
6944 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6945 roots_[kRealStackLimitRootIndex] =
6946 reinterpret_cast<Object*>(
6947 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6951 void Heap::TearDown() {
6953 if (FLAG_verify_heap) {
6958 if (FLAG_print_cumulative_gc_stat) {
6960 PrintF("gc_count=%d ", gc_count_);
6961 PrintF("mark_sweep_count=%d ", ms_count_);
6962 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6963 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6964 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6965 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6966 get_max_alive_after_gc());
6967 PrintF("total_marking_time=%.1f ", marking_time());
6968 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6972 TearDownArrayBuffers();
6974 isolate_->global_handles()->TearDown();
6976 external_string_table_.TearDown();
6978 error_object_list_.TearDown();
6980 new_space_.TearDown();
6982 if (old_pointer_space_ != NULL) {
6983 old_pointer_space_->TearDown();
6984 delete old_pointer_space_;
6985 old_pointer_space_ = NULL;
6988 if (old_data_space_ != NULL) {
6989 old_data_space_->TearDown();
6990 delete old_data_space_;
6991 old_data_space_ = NULL;
6994 if (code_space_ != NULL) {
6995 code_space_->TearDown();
7000 if (map_space_ != NULL) {
7001 map_space_->TearDown();
7006 if (cell_space_ != NULL) {
7007 cell_space_->TearDown();
7012 if (property_cell_space_ != NULL) {
7013 property_cell_space_->TearDown();
7014 delete property_cell_space_;
7015 property_cell_space_ = NULL;
7018 if (lo_space_ != NULL) {
7019 lo_space_->TearDown();
7024 store_buffer()->TearDown();
7025 incremental_marking()->TearDown();
7027 isolate_->memory_allocator()->TearDown();
7029 delete relocation_mutex_;
7033 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7034 ASSERT(callback != NULL);
7035 GCPrologueCallbackPair pair(callback, gc_type);
7036 ASSERT(!gc_prologue_callbacks_.Contains(pair));
7037 return gc_prologue_callbacks_.Add(pair);
7041 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7042 ASSERT(callback != NULL);
7043 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7044 if (gc_prologue_callbacks_[i].callback == callback) {
7045 gc_prologue_callbacks_.Remove(i);
7053 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7054 ASSERT(callback != NULL);
7055 GCEpilogueCallbackPair pair(callback, gc_type);
7056 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7057 return gc_epilogue_callbacks_.Add(pair);
7061 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7062 ASSERT(callback != NULL);
7063 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7064 if (gc_epilogue_callbacks_[i].callback == callback) {
7065 gc_epilogue_callbacks_.Remove(i);
7075 class PrintHandleVisitor: public ObjectVisitor {
7077 void VisitPointers(Object** start, Object** end) {
7078 for (Object** p = start; p < end; p++)
7079 PrintF(" handle %p to %p\n",
7080 reinterpret_cast<void*>(p),
7081 reinterpret_cast<void*>(*p));
7086 void Heap::PrintHandles() {
7087 PrintF("Handles:\n");
7088 PrintHandleVisitor v;
7089 isolate_->handle_scope_implementer()->Iterate(&v);
7095 Space* AllSpaces::next() {
7096 switch (counter_++) {
7098 return heap_->new_space();
7099 case OLD_POINTER_SPACE:
7100 return heap_->old_pointer_space();
7101 case OLD_DATA_SPACE:
7102 return heap_->old_data_space();
7104 return heap_->code_space();
7106 return heap_->map_space();
7108 return heap_->cell_space();
7109 case PROPERTY_CELL_SPACE:
7110 return heap_->property_cell_space();
7112 return heap_->lo_space();
7119 PagedSpace* PagedSpaces::next() {
7120 switch (counter_++) {
7121 case OLD_POINTER_SPACE:
7122 return heap_->old_pointer_space();
7123 case OLD_DATA_SPACE:
7124 return heap_->old_data_space();
7126 return heap_->code_space();
7128 return heap_->map_space();
7130 return heap_->cell_space();
7131 case PROPERTY_CELL_SPACE:
7132 return heap_->property_cell_space();
7140 OldSpace* OldSpaces::next() {
7141 switch (counter_++) {
7142 case OLD_POINTER_SPACE:
7143 return heap_->old_pointer_space();
7144 case OLD_DATA_SPACE:
7145 return heap_->old_data_space();
7147 return heap_->code_space();
7154 SpaceIterator::SpaceIterator(Heap* heap)
7156 current_space_(FIRST_SPACE),
7162 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7164 current_space_(FIRST_SPACE),
7166 size_func_(size_func) {
7170 SpaceIterator::~SpaceIterator() {
7171 // Delete active iterator if any.
7176 bool SpaceIterator::has_next() {
7177 // Iterate until no more spaces.
7178 return current_space_ != LAST_SPACE;
7182 ObjectIterator* SpaceIterator::next() {
7183 if (iterator_ != NULL) {
7186 // Move to the next space
7188 if (current_space_ > LAST_SPACE) {
7193 // Return iterator for the new current space.
7194 return CreateIterator();
7198 // Create an iterator for the space to iterate.
7199 ObjectIterator* SpaceIterator::CreateIterator() {
7200 ASSERT(iterator_ == NULL);
7202 switch (current_space_) {
7204 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7206 case OLD_POINTER_SPACE:
7208 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7210 case OLD_DATA_SPACE:
7211 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7214 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7217 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7220 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7222 case PROPERTY_CELL_SPACE:
7223 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7227 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7231 // Return the newly allocated iterator;
7232 ASSERT(iterator_ != NULL);
7237 class HeapObjectsFilter {
7239 virtual ~HeapObjectsFilter() {}
7240 virtual bool SkipObject(HeapObject* object) = 0;
7244 class UnreachableObjectsFilter : public HeapObjectsFilter {
7246 UnreachableObjectsFilter() {
7247 MarkReachableObjects();
7250 ~UnreachableObjectsFilter() {
7251 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7254 bool SkipObject(HeapObject* object) {
7255 MarkBit mark_bit = Marking::MarkBitFrom(object);
7256 return !mark_bit.Get();
7260 class MarkingVisitor : public ObjectVisitor {
7262 MarkingVisitor() : marking_stack_(10) {}
7264 void VisitPointers(Object** start, Object** end) {
7265 for (Object** p = start; p < end; p++) {
7266 if (!(*p)->IsHeapObject()) continue;
7267 HeapObject* obj = HeapObject::cast(*p);
7268 MarkBit mark_bit = Marking::MarkBitFrom(obj);
7269 if (!mark_bit.Get()) {
7271 marking_stack_.Add(obj);
7276 void TransitiveClosure() {
7277 while (!marking_stack_.is_empty()) {
7278 HeapObject* obj = marking_stack_.RemoveLast();
7284 List<HeapObject*> marking_stack_;
7287 void MarkReachableObjects() {
7288 Heap* heap = Isolate::Current()->heap();
7289 MarkingVisitor visitor;
7290 heap->IterateRoots(&visitor, VISIT_ALL);
7291 visitor.TransitiveClosure();
7294 DisallowHeapAllocation no_allocation_;
7298 HeapIterator::HeapIterator(Heap* heap)
7300 filtering_(HeapIterator::kNoFiltering),
7306 HeapIterator::HeapIterator(Heap* heap,
7307 HeapIterator::HeapObjectsFiltering filtering)
7309 filtering_(filtering),
7315 HeapIterator::~HeapIterator() {
7320 void HeapIterator::Init() {
7321 // Start the iteration.
7322 space_iterator_ = new SpaceIterator(heap_);
7323 switch (filtering_) {
7324 case kFilterUnreachable:
7325 filter_ = new UnreachableObjectsFilter;
7330 object_iterator_ = space_iterator_->next();
7334 void HeapIterator::Shutdown() {
7336 // Assert that in filtering mode we have iterated through all
7337 // objects. Otherwise, heap will be left in an inconsistent state.
7338 if (filtering_ != kNoFiltering) {
7339 ASSERT(object_iterator_ == NULL);
7342 // Make sure the last iterator is deallocated.
7343 delete space_iterator_;
7344 space_iterator_ = NULL;
7345 object_iterator_ = NULL;
7351 HeapObject* HeapIterator::next() {
7352 if (filter_ == NULL) return NextObject();
7354 HeapObject* obj = NextObject();
7355 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7360 HeapObject* HeapIterator::NextObject() {
7361 // No iterator means we are done.
7362 if (object_iterator_ == NULL) return NULL;
7364 if (HeapObject* obj = object_iterator_->next_object()) {
7365 // If the current iterator has more objects we are fine.
7368 // Go though the spaces looking for one that has objects.
7369 while (space_iterator_->has_next()) {
7370 object_iterator_ = space_iterator_->next();
7371 if (HeapObject* obj = object_iterator_->next_object()) {
7376 // Done with the last space.
7377 object_iterator_ = NULL;
7382 void HeapIterator::reset() {
7383 // Restart the iterator.
7391 Object* const PathTracer::kAnyGlobalObject = NULL;
7393 class PathTracer::MarkVisitor: public ObjectVisitor {
7395 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7396 void VisitPointers(Object** start, Object** end) {
7397 // Scan all HeapObject pointers in [start, end)
7398 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7399 if ((*p)->IsHeapObject())
7400 tracer_->MarkRecursively(p, this);
7405 PathTracer* tracer_;
7409 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7411 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7412 void VisitPointers(Object** start, Object** end) {
7413 // Scan all HeapObject pointers in [start, end)
7414 for (Object** p = start; p < end; p++) {
7415 if ((*p)->IsHeapObject())
7416 tracer_->UnmarkRecursively(p, this);
7421 PathTracer* tracer_;
7425 void PathTracer::VisitPointers(Object** start, Object** end) {
7426 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7427 // Visit all HeapObject pointers in [start, end)
7428 for (Object** p = start; !done && (p < end); p++) {
7429 if ((*p)->IsHeapObject()) {
7431 done = ((what_to_find_ == FIND_FIRST) && found_target_);
7437 void PathTracer::Reset() {
7438 found_target_ = false;
7439 object_stack_.Clear();
7443 void PathTracer::TracePathFrom(Object** root) {
7444 ASSERT((search_target_ == kAnyGlobalObject) ||
7445 search_target_->IsHeapObject());
7446 found_target_in_trace_ = false;
7449 MarkVisitor mark_visitor(this);
7450 MarkRecursively(root, &mark_visitor);
7452 UnmarkVisitor unmark_visitor(this);
7453 UnmarkRecursively(root, &unmark_visitor);
7459 static bool SafeIsNativeContext(HeapObject* obj) {
7460 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7464 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7465 if (!(*p)->IsHeapObject()) return;
7467 HeapObject* obj = HeapObject::cast(*p);
7469 Object* map = obj->map();
7471 if (!map->IsHeapObject()) return; // visited before
7473 if (found_target_in_trace_) return; // stop if target found
7474 object_stack_.Add(obj);
7475 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7476 (obj == search_target_)) {
7477 found_target_in_trace_ = true;
7478 found_target_ = true;
7482 bool is_native_context = SafeIsNativeContext(obj);
7485 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7487 Address map_addr = map_p->address();
7489 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7491 // Scan the object body.
7492 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7493 // This is specialized to scan Context's properly.
7494 Object** start = reinterpret_cast<Object**>(obj->address() +
7495 Context::kHeaderSize);
7496 Object** end = reinterpret_cast<Object**>(obj->address() +
7497 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7498 mark_visitor->VisitPointers(start, end);
7500 obj->IterateBody(map_p->instance_type(),
7501 obj->SizeFromMap(map_p),
7505 // Scan the map after the body because the body is a lot more interesting
7506 // when doing leak detection.
7507 MarkRecursively(&map, mark_visitor);
7509 if (!found_target_in_trace_) // don't pop if found the target
7510 object_stack_.RemoveLast();
7514 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7515 if (!(*p)->IsHeapObject()) return;
7517 HeapObject* obj = HeapObject::cast(*p);
7519 Object* map = obj->map();
7521 if (map->IsHeapObject()) return; // unmarked already
7523 Address map_addr = reinterpret_cast<Address>(map);
7525 map_addr -= kMarkTag;
7527 ASSERT_TAG_ALIGNED(map_addr);
7529 HeapObject* map_p = HeapObject::FromAddress(map_addr);
7531 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7533 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7535 obj->IterateBody(Map::cast(map_p)->instance_type(),
7536 obj->SizeFromMap(Map::cast(map_p)),
7541 void PathTracer::ProcessResults() {
7542 if (found_target_) {
7543 PrintF("=====================================\n");
7544 PrintF("==== Path to object ====\n");
7545 PrintF("=====================================\n\n");
7547 ASSERT(!object_stack_.is_empty());
7548 for (int i = 0; i < object_stack_.length(); i++) {
7549 if (i > 0) PrintF("\n |\n |\n V\n\n");
7550 Object* obj = object_stack_[i];
7553 PrintF("=====================================\n");
7558 // Triggers a depth-first traversal of reachable objects from one
7559 // given root object and finds a path to a specific heap object and
7561 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7562 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7563 tracer.VisitPointer(&root);
7567 // Triggers a depth-first traversal of reachable objects from roots
7568 // and finds a path to a specific heap object and prints it.
7569 void Heap::TracePathToObject(Object* target) {
7570 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7571 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7575 // Triggers a depth-first traversal of reachable objects from roots
7576 // and finds a path to any global object and prints it. Useful for
7577 // determining the source for leaks of global objects.
7578 void Heap::TracePathToGlobal() {
7579 PathTracer tracer(PathTracer::kAnyGlobalObject,
7580 PathTracer::FIND_ALL,
7582 IterateRoots(&tracer, VISIT_ONLY_STRONG);
7587 static intptr_t CountTotalHolesSize(Heap* heap) {
7588 intptr_t holes_size = 0;
7589 OldSpaces spaces(heap);
7590 for (OldSpace* space = spaces.next();
7592 space = spaces.next()) {
7593 holes_size += space->Waste() + space->Available();
7599 GCTracer::GCTracer(Heap* heap,
7600 const char* gc_reason,
7601 const char* collector_reason)
7603 start_object_size_(0),
7604 start_memory_size_(0),
7607 allocated_since_last_gc_(0),
7608 spent_in_mutator_(0),
7609 promoted_objects_size_(0),
7610 nodes_died_in_new_space_(0),
7611 nodes_copied_in_new_space_(0),
7614 gc_reason_(gc_reason),
7615 collector_reason_(collector_reason) {
7616 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7617 start_time_ = OS::TimeCurrentMillis();
7618 start_object_size_ = heap_->SizeOfObjects();
7619 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7621 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7625 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7627 allocated_since_last_gc_ =
7628 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7630 if (heap_->last_gc_end_timestamp_ > 0) {
7631 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7634 steps_count_ = heap_->incremental_marking()->steps_count();
7635 steps_took_ = heap_->incremental_marking()->steps_took();
7636 longest_step_ = heap_->incremental_marking()->longest_step();
7637 steps_count_since_last_gc_ =
7638 heap_->incremental_marking()->steps_count_since_last_gc();
7639 steps_took_since_last_gc_ =
7640 heap_->incremental_marking()->steps_took_since_last_gc();
7644 GCTracer::~GCTracer() {
7645 // Printf ONE line iff flag is set.
7646 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7648 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7650 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7651 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7653 double time = heap_->last_gc_end_timestamp_ - start_time_;
7655 // Update cumulative GC statistics if required.
7656 if (FLAG_print_cumulative_gc_stat) {
7657 heap_->total_gc_time_ms_ += time;
7658 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7659 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7660 heap_->alive_after_last_gc_);
7662 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7665 } else if (FLAG_trace_gc_verbose) {
7666 heap_->total_gc_time_ms_ += time;
7669 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7671 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7673 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7674 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7676 if (!FLAG_trace_gc_nvp) {
7677 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7679 double end_memory_size_mb =
7680 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7682 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7684 static_cast<double>(start_object_size_) / MB,
7685 static_cast<double>(start_memory_size_) / MB,
7686 SizeOfHeapObjects(),
7687 end_memory_size_mb);
7689 if (external_time > 0) PrintF("%d / ", external_time);
7690 PrintF("%.1f ms", time);
7691 if (steps_count_ > 0) {
7692 if (collector_ == SCAVENGER) {
7693 PrintF(" (+ %.1f ms in %d steps since last GC)",
7694 steps_took_since_last_gc_,
7695 steps_count_since_last_gc_);
7697 PrintF(" (+ %.1f ms in %d steps since start of marking, "
7698 "biggest step %.1f ms)",
7705 if (gc_reason_ != NULL) {
7706 PrintF(" [%s]", gc_reason_);
7709 if (collector_reason_ != NULL) {
7710 PrintF(" [%s]", collector_reason_);
7715 PrintF("pause=%.1f ", time);
7716 PrintF("mutator=%.1f ", spent_in_mutator_);
7718 switch (collector_) {
7722 case MARK_COMPACTOR:
7730 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7731 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7732 PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7733 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7734 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7735 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7736 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7737 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7738 PrintF("compaction_ptrs=%.1f ",
7739 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7740 PrintF("intracompaction_ptrs=%.1f ",
7741 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7742 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7743 PrintF("weakcollection_process=%.1f ",
7744 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7745 PrintF("weakcollection_clear=%.1f ",
7746 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7748 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7749 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7750 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7751 in_free_list_or_wasted_before_gc_);
7752 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7754 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7755 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7756 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7757 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7758 PrintF("nodes_promoted=%d ", nodes_promoted_);
7760 if (collector_ == SCAVENGER) {
7761 PrintF("stepscount=%d ", steps_count_since_last_gc_);
7762 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7764 PrintF("stepscount=%d ", steps_count_);
7765 PrintF("stepstook=%.1f ", steps_took_);
7766 PrintF("longeststep=%.1f ", longest_step_);
7772 heap_->PrintShortHeapStatistics();
7776 const char* GCTracer::CollectorString() {
7777 switch (collector_) {
7780 case MARK_COMPACTOR:
7781 return "Mark-sweep";
7783 return "Unknown GC";
7787 int KeyedLookupCache::Hash(Map* map, Name* name) {
7788 // Uses only lower 32 bits if pointers are larger.
7789 uintptr_t addr_hash =
7790 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7791 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7795 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7796 int index = (Hash(map, name) & kHashMask);
7797 for (int i = 0; i < kEntriesPerBucket; i++) {
7798 Key& key = keys_[index + i];
7799 if ((key.map == map) && key.name->Equals(name)) {
7800 return field_offsets_[index + i];
7807 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7808 if (!name->IsUniqueName()) {
7809 String* internalized_string;
7810 if (!HEAP->InternalizeStringIfExists(
7811 String::cast(name), &internalized_string)) {
7814 name = internalized_string;
7816 // This cache is cleared only between mark compact passes, so we expect the
7817 // cache to only contain old space names.
7818 ASSERT(!HEAP->InNewSpace(name));
7820 int index = (Hash(map, name) & kHashMask);
7821 // After a GC there will be free slots, so we use them in order (this may
7822 // help to get the most frequently used one in position 0).
7823 for (int i = 0; i< kEntriesPerBucket; i++) {
7824 Key& key = keys_[index];
7825 Object* free_entry_indicator = NULL;
7826 if (key.map == free_entry_indicator) {
7829 field_offsets_[index + i] = field_offset;
7833 // No free entry found in this bucket, so we move them all down one and
7834 // put the new entry at position zero.
7835 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7836 Key& key = keys_[index + i];
7837 Key& key2 = keys_[index + i - 1];
7839 field_offsets_[index + i] = field_offsets_[index + i - 1];
7842 // Write the new first entry.
7843 Key& key = keys_[index];
7846 field_offsets_[index] = field_offset;
7850 void KeyedLookupCache::Clear() {
7851 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7855 void DescriptorLookupCache::Clear() {
7856 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7861 void Heap::GarbageCollectionGreedyCheck() {
7862 ASSERT(FLAG_gc_greedy);
7863 if (isolate_->bootstrapper()->IsActive()) return;
7864 if (disallow_allocation_failure()) return;
7865 CollectGarbage(NEW_SPACE);
7870 TranscendentalCache::SubCache::SubCache(Type t)
7872 isolate_(Isolate::Current()) {
7873 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7874 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7875 for (int i = 0; i < kCacheSize; i++) {
7876 elements_[i].in[0] = in0;
7877 elements_[i].in[1] = in1;
7878 elements_[i].output = NULL;
7883 void TranscendentalCache::Clear() {
7884 for (int i = 0; i < kNumberOfCaches; i++) {
7885 if (caches_[i] != NULL) {
7893 void ExternalStringTable::CleanUp() {
7895 for (int i = 0; i < new_space_strings_.length(); ++i) {
7896 if (new_space_strings_[i] == heap_->the_hole_value()) {
7899 if (heap_->InNewSpace(new_space_strings_[i])) {
7900 new_space_strings_[last++] = new_space_strings_[i];
7902 old_space_strings_.Add(new_space_strings_[i]);
7905 new_space_strings_.Rewind(last);
7906 new_space_strings_.Trim();
7909 for (int i = 0; i < old_space_strings_.length(); ++i) {
7910 if (old_space_strings_[i] == heap_->the_hole_value()) {
7913 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7914 old_space_strings_[last++] = old_space_strings_[i];
7916 old_space_strings_.Rewind(last);
7917 old_space_strings_.Trim();
7919 if (FLAG_verify_heap) {
7926 void ExternalStringTable::TearDown() {
7927 new_space_strings_.Free();
7928 old_space_strings_.Free();
7932 // Update all references.
7933 void ErrorObjectList::UpdateReferences() {
7934 for (int i = 0; i < list_.length(); i++) {
7935 HeapObject* object = HeapObject::cast(list_[i]);
7936 MapWord first_word = object->map_word();
7937 if (first_word.IsForwardingAddress()) {
7938 list_[i] = first_word.ToForwardingAddress();
7944 // Unforwarded objects in new space are dead and removed from the list.
7945 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7946 if (list_.is_empty()) return;
7948 int write_index = 0;
7949 for (int i = 0; i < list_.length(); i++) {
7950 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7951 if (first_word.IsForwardingAddress()) {
7952 list_[write_index++] = first_word.ToForwardingAddress();
7955 list_.Rewind(write_index);
7957 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7958 // objects in the list, just remove dead ones, as to not confuse the
7959 // loop in DeferredFormatStackTrace.
7960 for (int i = 0; i < list_.length(); i++) {
7961 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7962 list_[i] = first_word.IsForwardingAddress()
7963 ? first_word.ToForwardingAddress()
7964 : heap->the_hole_value();
7970 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7971 // If formatting the stack trace causes a GC, this method will be
7972 // recursively called. In that case, skip the recursive call, since
7973 // the loop modifies the list while iterating over it.
7974 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7976 HandleScope scope(isolate);
7977 Handle<String> stack_key = isolate->factory()->stack_string();
7978 int write_index = 0;
7979 int budget = kBudgetPerGC;
7980 for (int i = 0; i < list_.length(); i++) {
7981 Object* object = list_[i];
7982 JSFunction* getter_fun;
7984 { DisallowHeapAllocation no_gc;
7985 // Skip possible holes in the list.
7986 if (object->IsTheHole()) continue;
7987 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7988 list_[write_index++] = object;
7992 // Check whether the stack property is backed by the original getter.
7993 LookupResult lookup(isolate);
7994 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7995 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7996 Object* callback = lookup.GetCallbackObject();
7997 if (!callback->IsAccessorPair()) continue;
7998 Object* getter_obj = AccessorPair::cast(callback)->getter();
7999 if (!getter_obj->IsJSFunction()) continue;
8000 getter_fun = JSFunction::cast(getter_obj);
8001 String* key = isolate->heap()->hidden_stack_trace_string();
8002 Object* value = getter_fun->GetHiddenProperty(key);
8003 if (key != value) continue;
8007 HandleScope scope(isolate);
8008 bool has_exception = false;
8010 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
8012 Handle<Object> object_handle(object, isolate);
8013 Handle<Object> getter_handle(getter_fun, isolate);
8014 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
8015 ASSERT(*map == HeapObject::cast(*object_handle)->map());
8016 if (has_exception) {
8017 // Hit an exception (most likely a stack overflow).
8018 // Wrap up this pass and retry after another GC.
8019 isolate->clear_pending_exception();
8020 // We use the handle since calling the getter might have caused a GC.
8021 list_[write_index++] = *object_handle;
8025 list_.Rewind(write_index);
8031 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
8032 for (int i = 0; i < list_.length(); i++) {
8033 HeapObject* object = HeapObject::cast(list_[i]);
8034 if (!Marking::MarkBitFrom(object).Get()) {
8035 list_[i] = heap->the_hole_value();
8041 void ErrorObjectList::TearDown() {
8046 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
8047 chunk->set_next_chunk(chunks_queued_for_free_);
8048 chunks_queued_for_free_ = chunk;
8052 void Heap::FreeQueuedChunks() {
8053 if (chunks_queued_for_free_ == NULL) return;
8056 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8057 next = chunk->next_chunk();
8058 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8060 if (chunk->owner()->identity() == LO_SPACE) {
8061 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
8062 // If FromAnyPointerAddress encounters a slot that belongs to a large
8063 // chunk queued for deletion it will fail to find the chunk because
8064 // it try to perform a search in the list of pages owned by of the large
8065 // object space and queued chunks were detached from that list.
8066 // To work around this we split large chunk into normal kPageSize aligned
8067 // pieces and initialize size, owner and flags field of every piece.
8068 // If FromAnyPointerAddress encounters a slot that belongs to one of
8069 // these smaller pieces it will treat it as a slot on a normal Page.
8070 Address chunk_end = chunk->address() + chunk->size();
8071 MemoryChunk* inner = MemoryChunk::FromAddress(
8072 chunk->address() + Page::kPageSize);
8073 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
8074 while (inner <= inner_last) {
8075 // Size of a large chunk is always a multiple of
8076 // OS::AllocateAlignment() so there is always
8077 // enough space for a fake MemoryChunk header.
8078 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
8079 // Guard against overflow.
8080 if (area_end < inner->address()) area_end = chunk_end;
8081 inner->SetArea(inner->address(), area_end);
8082 inner->set_size(Page::kPageSize);
8083 inner->set_owner(lo_space());
8084 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8085 inner = MemoryChunk::FromAddress(
8086 inner->address() + Page::kPageSize);
8090 isolate_->heap()->store_buffer()->Compact();
8091 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
8092 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8093 next = chunk->next_chunk();
8094 isolate_->memory_allocator()->Free(chunk);
8096 chunks_queued_for_free_ = NULL;
8100 void Heap::RememberUnmappedPage(Address page, bool compacted) {
8101 uintptr_t p = reinterpret_cast<uintptr_t>(page);
8102 // Tag the page pointer to make it findable in the dump file.
8104 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
8106 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
8108 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
8109 reinterpret_cast<Address>(p);
8110 remembered_unmapped_pages_index_++;
8111 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
8115 void Heap::ClearObjectStats(bool clear_last_time_stats) {
8116 memset(object_counts_, 0, sizeof(object_counts_));
8117 memset(object_sizes_, 0, sizeof(object_sizes_));
8118 if (clear_last_time_stats) {
8119 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
8120 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
8125 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
8128 void Heap::CheckpointObjectStats() {
8129 ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8130 Counters* counters = isolate()->counters();
8131 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8132 counters->count_of_##name()->Increment( \
8133 static_cast<int>(object_counts_[name])); \
8134 counters->count_of_##name()->Decrement( \
8135 static_cast<int>(object_counts_last_time_[name])); \
8136 counters->size_of_##name()->Increment( \
8137 static_cast<int>(object_sizes_[name])); \
8138 counters->size_of_##name()->Decrement( \
8139 static_cast<int>(object_sizes_last_time_[name]));
8140 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8141 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8143 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8144 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
8145 counters->count_of_CODE_TYPE_##name()->Increment( \
8146 static_cast<int>(object_counts_[index])); \
8147 counters->count_of_CODE_TYPE_##name()->Decrement( \
8148 static_cast<int>(object_counts_last_time_[index])); \
8149 counters->size_of_CODE_TYPE_##name()->Increment( \
8150 static_cast<int>(object_sizes_[index])); \
8151 counters->size_of_CODE_TYPE_##name()->Decrement( \
8152 static_cast<int>(object_sizes_last_time_[index]));
8153 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8154 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8155 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
8156 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
8157 counters->count_of_FIXED_ARRAY_##name()->Increment( \
8158 static_cast<int>(object_counts_[index])); \
8159 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
8160 static_cast<int>(object_counts_last_time_[index])); \
8161 counters->size_of_FIXED_ARRAY_##name()->Increment( \
8162 static_cast<int>(object_sizes_[index])); \
8163 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
8164 static_cast<int>(object_sizes_last_time_[index]));
8165 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8166 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8168 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8169 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8174 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8175 if (FLAG_parallel_recompilation) {
8176 heap_->relocation_mutex_->Lock();
8178 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8179 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8184 } } // namespace v8::internal