1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "runtime-profiler.h"
46 #include "scopeinfo.h"
48 #include "store-buffer.h"
49 #include "v8threads.h"
50 #include "vm-state-inl.h"
51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
52 #include "regexp-macro-assembler.h"
53 #include "arm/regexp-macro-assembler-arm.h"
55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
56 #include "regexp-macro-assembler.h"
57 #include "mips/regexp-macro-assembler-mips.h"
64 static Mutex* gc_initializer_mutex = OS::CreateMutex();
69 // semispace_size_ should be a power of 2 and old_generation_size_ should be
70 // a multiple of Page::kPageSize.
72 #define LUMP_OF_MEMORY (128 * KB)
74 #elif defined(V8_TARGET_ARCH_X64)
75 #define LUMP_OF_MEMORY (2 * MB)
76 code_range_size_(512*MB),
78 #define LUMP_OF_MEMORY MB
81 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83 initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
85 max_executable_size_(128l * LUMP_OF_MEMORY),
87 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
89 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size.
91 survived_since_last_expansion_(0),
93 always_allocate_scope_depth_(0),
94 linear_allocation_scope_depth_(0),
95 contexts_disposed_(0),
96 scan_on_scavenge_pages_(0),
98 old_pointer_space_(NULL),
99 old_data_space_(NULL),
104 gc_state_(NOT_IN_GC),
105 gc_post_processing_depth_(0),
108 unflattened_strings_length_(0),
110 allocation_allowed_(true),
111 allocation_timeout_(0),
112 disallow_allocation_failure_(false),
115 old_gen_promotion_limit_(kMinimumPromotionLimit),
116 old_gen_allocation_limit_(kMinimumAllocationLimit),
117 old_gen_limit_factor_(1),
118 size_of_old_gen_at_last_old_space_gc_(0),
119 external_allocation_limit_(0),
120 amount_of_external_allocated_memory_(0),
121 amount_of_external_allocated_memory_at_last_global_gc_(0),
122 old_gen_exhausted_(false),
123 store_buffer_rebuilder_(store_buffer()),
124 hidden_symbol_(NULL),
125 global_gc_prologue_callback_(NULL),
126 global_gc_epilogue_callback_(NULL),
127 gc_safe_size_of_old_object_(NULL),
128 total_regexp_code_generated_(0),
130 young_survivors_after_last_gc_(0),
131 high_survival_rate_period_length_(0),
133 previous_survival_rate_trend_(Heap::STABLE),
134 survival_rate_trend_(Heap::STABLE),
136 max_alive_after_gc_(0),
137 min_in_mutator_(kMaxInt),
138 alive_after_last_gc_(0),
139 last_gc_end_timestamp_(0.0),
142 incremental_marking_(this),
143 number_idle_notifications_(0),
144 last_idle_notification_gc_count_(0),
145 last_idle_notification_gc_count_init_(false),
147 chunks_queued_for_free_(NULL) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151 #if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155 intptr_t max_virtual = OS::MaxVirtualMemory();
157 if (max_virtual > 0) {
158 if (code_range_size_ > 0) {
159 // Reserve no more than 1/8 of the memory for the code range.
160 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
164 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
165 global_contexts_list_ = NULL;
166 mark_compact_collector_.heap_ = this;
167 external_string_table_.heap_ = this;
171 intptr_t Heap::Capacity() {
172 if (!HasBeenSetup()) return 0;
174 return new_space_.Capacity() +
175 old_pointer_space_->Capacity() +
176 old_data_space_->Capacity() +
177 code_space_->Capacity() +
178 map_space_->Capacity() +
179 cell_space_->Capacity();
183 intptr_t Heap::CommittedMemory() {
184 if (!HasBeenSetup()) return 0;
186 return new_space_.CommittedMemory() +
187 old_pointer_space_->CommittedMemory() +
188 old_data_space_->CommittedMemory() +
189 code_space_->CommittedMemory() +
190 map_space_->CommittedMemory() +
191 cell_space_->CommittedMemory() +
195 intptr_t Heap::CommittedMemoryExecutable() {
196 if (!HasBeenSetup()) return 0;
198 return isolate()->memory_allocator()->SizeExecutable();
202 intptr_t Heap::Available() {
203 if (!HasBeenSetup()) return 0;
205 return new_space_.Available() +
206 old_pointer_space_->Available() +
207 old_data_space_->Available() +
208 code_space_->Available() +
209 map_space_->Available() +
210 cell_space_->Available();
214 bool Heap::HasBeenSetup() {
215 return old_pointer_space_ != NULL &&
216 old_data_space_ != NULL &&
217 code_space_ != NULL &&
218 map_space_ != NULL &&
219 cell_space_ != NULL &&
224 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
225 if (IntrusiveMarking::IsMarked(object)) {
226 return IntrusiveMarking::SizeOfMarkedObject(object);
228 return object->SizeFromMap(object->map());
232 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
233 // Is global GC requested?
234 if (space != NEW_SPACE || FLAG_gc_global) {
235 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
236 return MARK_COMPACTOR;
239 // Is enough data promoted to justify a global GC?
240 if (OldGenerationPromotionLimitReached()) {
241 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
242 return MARK_COMPACTOR;
245 // Have allocation in OLD and LO failed?
246 if (old_gen_exhausted_) {
247 isolate_->counters()->
248 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
249 return MARK_COMPACTOR;
252 // Is there enough space left in OLD to guarantee that a scavenge can
255 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
256 // for object promotion. It counts only the bytes that the memory
257 // allocator has not yet allocated from the OS and assigned to any space,
258 // and does not count available bytes already in the old space or code
259 // space. Undercounting is safe---we may get an unrequested full GC when
260 // a scavenge would have succeeded.
261 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
262 isolate_->counters()->
263 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
264 return MARK_COMPACTOR;
272 // TODO(1238405): Combine the infrastructure for --heap-stats and
273 // --log-gc to avoid the complicated preprocessor and flag testing.
274 void Heap::ReportStatisticsBeforeGC() {
275 // Heap::ReportHeapStatistics will also log NewSpace statistics when
276 // compiled --log-gc is set. The following logic is used to avoid
279 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
280 if (FLAG_heap_stats) {
281 ReportHeapStatistics("Before GC");
282 } else if (FLAG_log_gc) {
283 new_space_.ReportStatistics();
285 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
288 new_space_.CollectStatistics();
289 new_space_.ReportStatistics();
290 new_space_.ClearHistograms();
296 void Heap::PrintShortHeapStatistics() {
297 if (!FLAG_trace_gc_verbose) return;
298 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
299 ", available: %8" V8_PTR_PREFIX "d\n",
300 isolate_->memory_allocator()->Size(),
301 isolate_->memory_allocator()->Available());
302 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
303 ", available: %8" V8_PTR_PREFIX "d\n",
304 Heap::new_space_.Size(),
305 new_space_.Available());
306 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
307 ", available: %8" V8_PTR_PREFIX "d"
308 ", waste: %8" V8_PTR_PREFIX "d\n",
309 old_pointer_space_->Size(),
310 old_pointer_space_->Available(),
311 old_pointer_space_->Waste());
312 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
313 ", available: %8" V8_PTR_PREFIX "d"
314 ", waste: %8" V8_PTR_PREFIX "d\n",
315 old_data_space_->Size(),
316 old_data_space_->Available(),
317 old_data_space_->Waste());
318 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
319 ", available: %8" V8_PTR_PREFIX "d"
320 ", waste: %8" V8_PTR_PREFIX "d\n",
322 code_space_->Available(),
323 code_space_->Waste());
324 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d"
326 ", waste: %8" V8_PTR_PREFIX "d\n",
328 map_space_->Available(),
329 map_space_->Waste());
330 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
331 ", available: %8" V8_PTR_PREFIX "d"
332 ", waste: %8" V8_PTR_PREFIX "d\n",
334 cell_space_->Available(),
335 cell_space_->Waste());
336 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
337 ", available: %8" V8_PTR_PREFIX "d\n",
339 lo_space_->Available());
343 // TODO(1238405): Combine the infrastructure for --heap-stats and
344 // --log-gc to avoid the complicated preprocessor and flag testing.
345 void Heap::ReportStatisticsAfterGC() {
346 // Similar to the before GC, we use some complicated logic to ensure that
347 // NewSpace statistics are logged exactly once when --log-gc is turned on.
349 if (FLAG_heap_stats) {
350 new_space_.CollectStatistics();
351 ReportHeapStatistics("After GC");
352 } else if (FLAG_log_gc) {
353 new_space_.ReportStatistics();
356 if (FLAG_log_gc) new_space_.ReportStatistics();
361 void Heap::GarbageCollectionPrologue() {
362 isolate_->transcendental_cache()->Clear();
363 ClearJSFunctionResultCaches();
365 unflattened_strings_length_ = 0;
367 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
368 allow_allocation(false);
370 if (FLAG_verify_heap) {
374 if (FLAG_gc_verbose) Print();
378 ReportStatisticsBeforeGC();
381 LiveObjectList::GCPrologue();
382 store_buffer()->GCPrologue();
385 intptr_t Heap::SizeOfObjects() {
388 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
389 total += space->SizeOfObjects();
394 void Heap::GarbageCollectionEpilogue() {
395 store_buffer()->GCEpilogue();
396 LiveObjectList::GCEpilogue();
398 allow_allocation(true);
401 if (FLAG_verify_heap) {
405 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
406 if (FLAG_print_handles) PrintHandles();
407 if (FLAG_gc_verbose) Print();
408 if (FLAG_code_stats) ReportCodeStatistics("After GC");
411 isolate_->counters()->alive_after_last_gc()->Set(
412 static_cast<int>(SizeOfObjects()));
414 isolate_->counters()->symbol_table_capacity()->Set(
415 symbol_table()->Capacity());
416 isolate_->counters()->number_of_symbols()->Set(
417 symbol_table()->NumberOfElements());
419 ReportStatisticsAfterGC();
421 #ifdef ENABLE_DEBUGGER_SUPPORT
422 isolate_->debug()->AfterGarbageCollection();
423 #endif // ENABLE_DEBUGGER_SUPPORT
427 void Heap::CollectAllGarbage(int flags) {
428 // Since we are ignoring the return value, the exact choice of space does
429 // not matter, so long as we do not specify NEW_SPACE, which would not
431 mark_compact_collector_.SetFlags(flags);
432 CollectGarbage(OLD_POINTER_SPACE);
433 mark_compact_collector_.SetFlags(kNoGCFlags);
437 void Heap::CollectAllAvailableGarbage() {
438 // Since we are ignoring the return value, the exact choice of space does
439 // not matter, so long as we do not specify NEW_SPACE, which would not
441 // Major GC would invoke weak handle callbacks on weakly reachable
442 // handles, but won't collect weakly reachable objects until next
443 // major GC. Therefore if we collect aggressively and weak handle callback
444 // has been invoked, we rerun major GC to release objects which become
446 // Note: as weak callbacks can execute arbitrary code, we cannot
447 // hope that eventually there will be no weak callbacks invocations.
448 // Therefore stop recollecting after several attempts.
449 mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
450 const int kMaxNumberOfAttempts = 7;
451 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
452 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
456 mark_compact_collector()->SetFlags(kNoGCFlags);
460 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
461 // The VM is in the GC state until exiting this function.
462 VMState state(isolate_, GC);
465 // Reset the allocation timeout to the GC interval, but make sure to
466 // allow at least a few allocations after a collection. The reason
467 // for this is that we have a lot of allocation sequences and we
468 // assume that a garbage collection will allow the subsequent
469 // allocation attempts to go through.
470 allocation_timeout_ = Max(6, FLAG_gc_interval);
473 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
474 if (FLAG_trace_incremental_marking) {
475 PrintF("[IncrementalMarking] Scavenge during marking.\n");
479 if (collector == MARK_COMPACTOR &&
480 !mark_compact_collector()->PreciseSweepingRequired() &&
481 !incremental_marking()->IsStopped() &&
482 !incremental_marking()->should_hurry() &&
483 FLAG_incremental_marking_steps) {
484 if (FLAG_trace_incremental_marking) {
485 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
487 collector = SCAVENGER;
490 bool next_gc_likely_to_collect_more = false;
492 { GCTracer tracer(this);
493 GarbageCollectionPrologue();
494 // The GC count was incremented in the prologue. Tell the tracer about
496 tracer.set_gc_count(gc_count_);
498 // Tell the tracer which collector we've selected.
499 tracer.set_collector(collector);
501 HistogramTimer* rate = (collector == SCAVENGER)
502 ? isolate_->counters()->gc_scavenger()
503 : isolate_->counters()->gc_compactor();
505 next_gc_likely_to_collect_more =
506 PerformGarbageCollection(collector, &tracer);
509 GarbageCollectionEpilogue();
512 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
513 if (incremental_marking()->IsStopped()) {
514 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
515 incremental_marking()->Start();
519 return next_gc_likely_to_collect_more;
523 void Heap::PerformScavenge() {
524 GCTracer tracer(this);
525 if (incremental_marking()->IsStopped()) {
526 PerformGarbageCollection(SCAVENGER, &tracer);
528 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
534 // Helper class for verifying the symbol table.
535 class SymbolTableVerifier : public ObjectVisitor {
537 void VisitPointers(Object** start, Object** end) {
538 // Visit all HeapObject pointers in [start, end).
539 for (Object** p = start; p < end; p++) {
540 if ((*p)->IsHeapObject()) {
541 // Check that the symbol is actually a symbol.
542 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
550 static void VerifySymbolTable() {
552 SymbolTableVerifier verifier;
553 HEAP->symbol_table()->IterateElements(&verifier);
558 void Heap::ReserveSpace(
560 int pointer_space_size,
565 int large_object_size) {
566 NewSpace* new_space = Heap::new_space();
567 PagedSpace* old_pointer_space = Heap::old_pointer_space();
568 PagedSpace* old_data_space = Heap::old_data_space();
569 PagedSpace* code_space = Heap::code_space();
570 PagedSpace* map_space = Heap::map_space();
571 PagedSpace* cell_space = Heap::cell_space();
572 LargeObjectSpace* lo_space = Heap::lo_space();
573 bool gc_performed = true;
574 while (gc_performed) {
575 gc_performed = false;
576 if (!new_space->ReserveSpace(new_space_size)) {
577 Heap::CollectGarbage(NEW_SPACE);
580 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
581 Heap::CollectGarbage(OLD_POINTER_SPACE);
584 if (!(old_data_space->ReserveSpace(data_space_size))) {
585 Heap::CollectGarbage(OLD_DATA_SPACE);
588 if (!(code_space->ReserveSpace(code_space_size))) {
589 Heap::CollectGarbage(CODE_SPACE);
592 if (!(map_space->ReserveSpace(map_space_size))) {
593 Heap::CollectGarbage(MAP_SPACE);
596 if (!(cell_space->ReserveSpace(cell_space_size))) {
597 Heap::CollectGarbage(CELL_SPACE);
600 // We add a slack-factor of 2 in order to have space for a series of
601 // large-object allocations that are only just larger than the page size.
602 large_object_size *= 2;
603 // The ReserveSpace method on the large object space checks how much
604 // we can expand the old generation. This includes expansion caused by
605 // allocation in the other spaces.
606 large_object_size += cell_space_size + map_space_size + code_space_size +
607 data_space_size + pointer_space_size;
608 if (!(lo_space->ReserveSpace(large_object_size))) {
609 Heap::CollectGarbage(LO_SPACE);
616 void Heap::EnsureFromSpaceIsCommitted() {
617 if (new_space_.CommitFromSpaceIfNeeded()) return;
619 // Committing memory to from space failed.
620 // Try shrinking and try again.
622 if (new_space_.CommitFromSpaceIfNeeded()) return;
624 // Committing memory to from space failed again.
625 // Memory is exhausted and we will die.
626 V8::FatalProcessOutOfMemory("Committing semi space failed.");
630 void Heap::ClearJSFunctionResultCaches() {
631 if (isolate_->bootstrapper()->IsActive()) return;
633 Object* context = global_contexts_list_;
634 while (!context->IsUndefined()) {
635 // Get the caches for this context:
637 Context::cast(context)->jsfunction_result_caches();
639 int length = caches->length();
640 for (int i = 0; i < length; i++) {
641 JSFunctionResultCache::cast(caches->get(i))->Clear();
643 // Get the next context:
644 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
650 void Heap::ClearNormalizedMapCaches() {
651 if (isolate_->bootstrapper()->IsActive() &&
652 !incremental_marking()->IsMarking()) {
656 Object* context = global_contexts_list_;
657 while (!context->IsUndefined()) {
658 Context::cast(context)->normalized_map_cache()->Clear();
659 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
664 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
665 double survival_rate =
666 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
667 start_new_space_size;
669 if (survival_rate > kYoungSurvivalRateThreshold) {
670 high_survival_rate_period_length_++;
672 high_survival_rate_period_length_ = 0;
675 double survival_rate_diff = survival_rate_ - survival_rate;
677 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
678 set_survival_rate_trend(DECREASING);
679 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
680 set_survival_rate_trend(INCREASING);
682 set_survival_rate_trend(STABLE);
685 survival_rate_ = survival_rate;
688 bool Heap::PerformGarbageCollection(GarbageCollector collector,
690 bool next_gc_likely_to_collect_more = false;
692 if (collector != SCAVENGER) {
693 PROFILE(isolate_, CodeMovingGCEvent());
696 if (FLAG_verify_heap) {
699 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
700 ASSERT(!allocation_allowed_);
701 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
702 global_gc_prologue_callback_();
706 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
708 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
709 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
710 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
714 EnsureFromSpaceIsCommitted();
716 int start_new_space_size = Heap::new_space()->SizeAsInt();
718 if (IsHighSurvivalRate()) {
719 // We speed up the incremental marker if it is running so that it
720 // does not fall behind the rate of promotion, which would cause a
721 // constantly growing old space.
722 incremental_marking()->NotifyOfHighPromotionRate();
725 if (collector == MARK_COMPACTOR) {
726 // Perform mark-sweep with optional compaction.
729 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
730 IsStableOrIncreasingSurvivalTrend();
732 UpdateSurvivalRateTrend(start_new_space_size);
734 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
736 if (high_survival_rate_during_scavenges &&
737 IsStableOrIncreasingSurvivalTrend()) {
738 // Stable high survival rates of young objects both during partial and
739 // full collection indicate that mutator is either building or modifying
740 // a structure with a long lifetime.
741 // In this case we aggressively raise old generation memory limits to
742 // postpone subsequent mark-sweep collection and thus trade memory
743 // space for the mutation speed.
744 old_gen_limit_factor_ = 2;
746 old_gen_limit_factor_ = 1;
749 old_gen_promotion_limit_ =
750 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
751 old_gen_allocation_limit_ =
752 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
754 old_gen_exhausted_ = false;
760 UpdateSurvivalRateTrend(start_new_space_size);
763 isolate_->counters()->objs_since_last_young()->Set(0);
765 gc_post_processing_depth_++;
766 { DisableAssertNoAllocation allow_allocation;
767 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
768 next_gc_likely_to_collect_more =
769 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
771 gc_post_processing_depth_--;
773 // Update relocatables.
774 Relocatable::PostGarbageCollectionProcessing();
776 if (collector == MARK_COMPACTOR) {
777 // Register the amount of external allocated memory.
778 amount_of_external_allocated_memory_at_last_global_gc_ =
779 amount_of_external_allocated_memory_;
782 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
783 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
784 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
785 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
789 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
790 ASSERT(!allocation_allowed_);
791 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
792 global_gc_epilogue_callback_();
794 if (FLAG_verify_heap) {
798 return next_gc_likely_to_collect_more;
802 void Heap::MarkCompact(GCTracer* tracer) {
803 gc_state_ = MARK_COMPACT;
804 LOG(isolate_, ResourceEvent("markcompact", "begin"));
806 mark_compact_collector_.Prepare(tracer);
809 tracer->set_full_gc_count(ms_count_);
811 MarkCompactPrologue();
813 mark_compact_collector_.CollectGarbage();
815 LOG(isolate_, ResourceEvent("markcompact", "end"));
817 gc_state_ = NOT_IN_GC;
819 isolate_->counters()->objs_since_last_full()->Set(0);
821 contexts_disposed_ = 0;
825 void Heap::MarkCompactPrologue() {
826 // At any old GC clear the keyed lookup cache to enable collection of unused
828 isolate_->keyed_lookup_cache()->Clear();
829 isolate_->context_slot_cache()->Clear();
830 isolate_->descriptor_lookup_cache()->Clear();
831 StringSplitCache::Clear(string_split_cache());
833 isolate_->compilation_cache()->MarkCompactPrologue();
835 CompletelyClearInstanceofCache();
837 // TODO(1605) select heuristic for flushing NumberString cache with
838 // FlushNumberStringCache
839 if (FLAG_cleanup_code_caches_at_gc) {
840 polymorphic_code_cache()->set_cache(undefined_value());
843 ClearNormalizedMapCaches();
847 Object* Heap::FindCodeObject(Address a) {
848 return isolate()->inner_pointer_to_code_cache()->
849 GcSafeFindCodeForInnerPointer(a);
853 // Helper class for copying HeapObjects
854 class ScavengeVisitor: public ObjectVisitor {
856 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
858 void VisitPointer(Object** p) { ScavengePointer(p); }
860 void VisitPointers(Object** start, Object** end) {
861 // Copy all HeapObject pointers in [start, end)
862 for (Object** p = start; p < end; p++) ScavengePointer(p);
866 void ScavengePointer(Object** p) {
868 if (!heap_->InNewSpace(object)) return;
869 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
870 reinterpret_cast<HeapObject*>(object));
878 // Visitor class to verify pointers in code or data space do not point into
880 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
882 void VisitPointers(Object** start, Object**end) {
883 for (Object** current = start; current < end; current++) {
884 if ((*current)->IsHeapObject()) {
885 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
892 static void VerifyNonPointerSpacePointers() {
893 // Verify that there are no pointers to new space in spaces where we
894 // do not expect them.
895 VerifyNonPointerSpacePointersVisitor v;
896 HeapObjectIterator code_it(HEAP->code_space());
897 for (HeapObject* object = code_it.Next();
898 object != NULL; object = code_it.Next())
901 // The old data space was normally swept conservatively so that the iterator
902 // doesn't work, so we normally skip the next bit.
903 if (!HEAP->old_data_space()->was_swept_conservatively()) {
904 HeapObjectIterator data_it(HEAP->old_data_space());
905 for (HeapObject* object = data_it.Next();
906 object != NULL; object = data_it.Next())
913 void Heap::CheckNewSpaceExpansionCriteria() {
914 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
915 survived_since_last_expansion_ > new_space_.Capacity()) {
916 // Grow the size of new space if there is room to grow and enough
917 // data has survived scavenge since the last expansion.
919 survived_since_last_expansion_ = 0;
924 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
925 return heap->InNewSpace(*p) &&
926 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
930 void Heap::ScavengeStoreBufferCallback(
933 StoreBufferEvent event) {
934 heap->store_buffer_rebuilder_.Callback(page, event);
938 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
939 if (event == kStoreBufferStartScanningPagesEvent) {
940 start_of_current_page_ = NULL;
941 current_page_ = NULL;
942 } else if (event == kStoreBufferScanningPageEvent) {
943 if (current_page_ != NULL) {
944 // If this page already overflowed the store buffer during this iteration.
945 if (current_page_->scan_on_scavenge()) {
946 // Then we should wipe out the entries that have been added for it.
947 store_buffer_->SetTop(start_of_current_page_);
948 } else if (store_buffer_->Top() - start_of_current_page_ >=
949 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
950 // Did we find too many pointers in the previous page? The heuristic is
951 // that no page can take more then 1/5 the remaining slots in the store
953 current_page_->set_scan_on_scavenge(true);
954 store_buffer_->SetTop(start_of_current_page_);
956 // In this case the page we scanned took a reasonable number of slots in
957 // the store buffer. It has now been rehabilitated and is no longer
958 // marked scan_on_scavenge.
959 ASSERT(!current_page_->scan_on_scavenge());
962 start_of_current_page_ = store_buffer_->Top();
963 current_page_ = page;
964 } else if (event == kStoreBufferFullEvent) {
965 // The current page overflowed the store buffer again. Wipe out its entries
966 // in the store buffer and mark it scan-on-scavenge again. This may happen
967 // several times while scanning.
968 if (current_page_ == NULL) {
969 // Store Buffer overflowed while scanning promoted objects. These are not
970 // in any particular page, though they are likely to be clustered by the
971 // allocation routines.
972 store_buffer_->HandleFullness();
974 // Store Buffer overflowed while scanning a particular old space page for
975 // pointers to new space.
976 ASSERT(current_page_ == page);
977 ASSERT(page != NULL);
978 current_page_->set_scan_on_scavenge(true);
979 ASSERT(start_of_current_page_ != store_buffer_->Top());
980 store_buffer_->SetTop(start_of_current_page_);
988 void Heap::Scavenge() {
990 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
993 gc_state_ = SCAVENGE;
995 // Implements Cheney's copying algorithm
996 LOG(isolate_, ResourceEvent("scavenge", "begin"));
998 // Clear descriptor cache.
999 isolate_->descriptor_lookup_cache()->Clear();
1001 // Used for updating survived_since_last_expansion_ at function end.
1002 intptr_t survived_watermark = PromotedSpaceSize();
1004 CheckNewSpaceExpansionCriteria();
1006 SelectScavengingVisitorsTable();
1008 incremental_marking()->PrepareForScavenge();
1010 old_pointer_space()->AdvanceSweeper(new_space_.Size());
1011 old_data_space()->AdvanceSweeper(new_space_.Size());
1013 // Flip the semispaces. After flipping, to space is empty, from space has
1016 new_space_.ResetAllocationInfo();
1018 // We need to sweep newly copied objects which can be either in the
1019 // to space or promoted to the old generation. For to-space
1020 // objects, we treat the bottom of the to space as a queue. Newly
1021 // copied and unswept objects lie between a 'front' mark and the
1022 // allocation pointer.
1024 // Promoted objects can go into various old-generation spaces, and
1025 // can be allocated internally in the spaces (from the free list).
1026 // We treat the top of the to space as a queue of addresses of
1027 // promoted objects. The addresses of newly promoted and unswept
1028 // objects lie between a 'front' mark and a 'rear' mark that is
1029 // updated as a side effect of promoting an object.
1031 // There is guaranteed to be enough room at the top of the to space
1032 // for the addresses of promoted objects: every object promoted
1033 // frees up its size in bytes from the top of the new space, and
1034 // objects are at least one pointer in size.
1035 Address new_space_front = new_space_.ToSpaceStart();
1036 promotion_queue_.Initialize(new_space_.ToSpaceEnd());
1039 store_buffer()->Clean();
1042 ScavengeVisitor scavenge_visitor(this);
1044 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1046 // Copy objects reachable from the old generation.
1048 StoreBufferRebuildScope scope(this,
1050 &ScavengeStoreBufferCallback);
1051 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1054 // Copy objects reachable from cells by scavenging cell values directly.
1055 HeapObjectIterator cell_iterator(cell_space_);
1056 for (HeapObject* cell = cell_iterator.Next();
1057 cell != NULL; cell = cell_iterator.Next()) {
1058 if (cell->IsJSGlobalPropertyCell()) {
1059 Address value_address =
1060 reinterpret_cast<Address>(cell) +
1061 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1062 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1066 // Scavenge object reachable from the global contexts list directly.
1067 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1069 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1070 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1071 &IsUnscavengedHeapObject);
1072 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1074 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1077 UpdateNewSpaceReferencesInExternalStringTable(
1078 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1080 LiveObjectList::UpdateReferencesForScavengeGC();
1081 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1082 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1084 ASSERT(new_space_front == new_space_.top());
1087 new_space_.set_age_mark(new_space_.top());
1089 new_space_.LowerInlineAllocationLimit(
1090 new_space_.inline_allocation_limit_step());
1092 // Update how much has survived scavenge.
1093 IncrementYoungSurvivorsCounter(static_cast<int>(
1094 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1096 LOG(isolate_, ResourceEvent("scavenge", "end"));
1098 gc_state_ = NOT_IN_GC;
1102 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1104 MapWord first_word = HeapObject::cast(*p)->map_word();
1106 if (!first_word.IsForwardingAddress()) {
1107 // Unreachable external string can be finalized.
1108 heap->FinalizeExternalString(String::cast(*p));
1112 // String is still reachable.
1113 return String::cast(first_word.ToForwardingAddress());
1117 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1118 ExternalStringTableUpdaterCallback updater_func) {
1119 if (FLAG_verify_heap) {
1120 external_string_table_.Verify();
1123 if (external_string_table_.new_space_strings_.is_empty()) return;
1125 Object** start = &external_string_table_.new_space_strings_[0];
1126 Object** end = start + external_string_table_.new_space_strings_.length();
1127 Object** last = start;
1129 for (Object** p = start; p < end; ++p) {
1130 ASSERT(InFromSpace(*p));
1131 String* target = updater_func(this, p);
1133 if (target == NULL) continue;
1135 ASSERT(target->IsExternalString());
1137 if (InNewSpace(target)) {
1138 // String is still in new space. Update the table entry.
1142 // String got promoted. Move it to the old string list.
1143 external_string_table_.AddOldString(target);
1147 ASSERT(last <= end);
1148 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1152 void Heap::UpdateReferencesInExternalStringTable(
1153 ExternalStringTableUpdaterCallback updater_func) {
1155 // Update old space string references.
1156 if (external_string_table_.old_space_strings_.length() > 0) {
1157 Object** start = &external_string_table_.old_space_strings_[0];
1158 Object** end = start + external_string_table_.old_space_strings_.length();
1159 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1162 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1166 static Object* ProcessFunctionWeakReferences(Heap* heap,
1168 WeakObjectRetainer* retainer) {
1169 Object* undefined = heap->undefined_value();
1170 Object* head = undefined;
1171 JSFunction* tail = NULL;
1172 Object* candidate = function;
1173 while (candidate != undefined) {
1174 // Check whether to keep the candidate in the list.
1175 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1176 Object* retain = retainer->RetainAs(candidate);
1177 if (retain != NULL) {
1178 if (head == undefined) {
1179 // First element in the list.
1182 // Subsequent elements in the list.
1183 ASSERT(tail != NULL);
1184 tail->set_next_function_link(retain);
1186 // Retained function is new tail.
1187 candidate_function = reinterpret_cast<JSFunction*>(retain);
1188 tail = candidate_function;
1190 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1192 if (retain == undefined) break;
1195 // Move to next element in the list.
1196 candidate = candidate_function->next_function_link();
1199 // Terminate the list if there is one or more elements.
1201 tail->set_next_function_link(undefined);
1208 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1209 Object* undefined = undefined_value();
1210 Object* head = undefined;
1211 Context* tail = NULL;
1212 Object* candidate = global_contexts_list_;
1213 while (candidate != undefined) {
1214 // Check whether to keep the candidate in the list.
1215 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1216 Object* retain = retainer->RetainAs(candidate);
1217 if (retain != NULL) {
1218 if (head == undefined) {
1219 // First element in the list.
1222 // Subsequent elements in the list.
1223 ASSERT(tail != NULL);
1224 tail->set_unchecked(this,
1225 Context::NEXT_CONTEXT_LINK,
1227 UPDATE_WRITE_BARRIER);
1229 // Retained context is new tail.
1230 candidate_context = reinterpret_cast<Context*>(retain);
1231 tail = candidate_context;
1233 if (retain == undefined) break;
1235 // Process the weak list of optimized functions for the context.
1236 Object* function_list_head =
1237 ProcessFunctionWeakReferences(
1239 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1241 candidate_context->set_unchecked(this,
1242 Context::OPTIMIZED_FUNCTIONS_LIST,
1244 UPDATE_WRITE_BARRIER);
1247 // Move to next element in the list.
1248 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1251 // Terminate the list if there is one or more elements.
1253 tail->set_unchecked(this,
1254 Context::NEXT_CONTEXT_LINK,
1255 Heap::undefined_value(),
1256 UPDATE_WRITE_BARRIER);
1259 // Update the head of the list of contexts.
1260 global_contexts_list_ = head;
1264 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1266 static inline void VisitPointer(Heap* heap, Object** p) {
1267 Object* object = *p;
1268 if (!heap->InNewSpace(object)) return;
1269 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1270 reinterpret_cast<HeapObject*>(object));
1275 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1276 Address new_space_front) {
1278 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1279 // The addresses new_space_front and new_space_.top() define a
1280 // queue of unprocessed copied objects. Process them until the
1282 while (new_space_front != new_space_.top()) {
1283 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1284 HeapObject* object = HeapObject::FromAddress(new_space_front);
1286 NewSpaceScavenger::IterateBody(object->map(), object);
1289 NewSpacePage::FromLimit(new_space_front)->next_page()->body();
1293 // Promote and process all the to-be-promoted objects.
1295 StoreBufferRebuildScope scope(this,
1297 &ScavengeStoreBufferCallback);
1298 while (!promotion_queue()->is_empty()) {
1301 promotion_queue()->remove(&target, &size);
1303 // Promoted object might be already partially visited
1304 // during old space pointer iteration. Thus we search specificly
1305 // for pointers to from semispace instead of looking for pointers
1307 ASSERT(!target->IsMap());
1308 IterateAndMarkPointersToFromSpace(target->address(),
1309 target->address() + size,
1314 // Take another spin if there are now unswept objects in new space
1315 // (there are currently no more unswept promoted objects).
1316 } while (new_space_front != new_space_.top());
1318 return new_space_front;
1322 enum LoggingAndProfiling {
1323 LOGGING_AND_PROFILING_ENABLED,
1324 LOGGING_AND_PROFILING_DISABLED
1328 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1331 template<MarksHandling marks_handling,
1332 LoggingAndProfiling logging_and_profiling_mode>
1333 class ScavengingVisitor : public StaticVisitorBase {
1335 static void Initialize() {
1336 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1337 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1338 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1339 table_.Register(kVisitByteArray, &EvacuateByteArray);
1340 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1341 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1343 table_.Register(kVisitGlobalContext,
1344 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1345 template VisitSpecialized<Context::kSize>);
1347 table_.Register(kVisitConsString,
1348 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1349 template VisitSpecialized<ConsString::kSize>);
1351 table_.Register(kVisitSlicedString,
1352 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1353 template VisitSpecialized<SlicedString::kSize>);
1355 table_.Register(kVisitSharedFunctionInfo,
1356 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1357 template VisitSpecialized<SharedFunctionInfo::kSize>);
1359 table_.Register(kVisitJSWeakMap,
1360 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1363 table_.Register(kVisitJSRegExp,
1364 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1367 if (marks_handling == IGNORE_MARKS) {
1368 table_.Register(kVisitJSFunction,
1369 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1370 template VisitSpecialized<JSFunction::kSize>);
1372 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1375 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1377 kVisitDataObjectGeneric>();
1379 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1381 kVisitJSObjectGeneric>();
1383 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1385 kVisitStructGeneric>();
1388 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1393 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1394 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1396 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1397 bool should_record = false;
1399 should_record = FLAG_heap_stats;
1401 should_record = should_record || FLAG_log_gc;
1402 if (should_record) {
1403 if (heap->new_space()->Contains(obj)) {
1404 heap->new_space()->RecordAllocation(obj);
1406 heap->new_space()->RecordPromotion(obj);
1411 // Helper function used by CopyObject to copy a source object to an
1412 // allocated target object and update the forwarding pointer in the source
1413 // object. Returns the target object.
1414 INLINE(static HeapObject* MigrateObject(Heap* heap,
1418 // Copy the content of source to target.
1419 heap->CopyBlock(target->address(), source->address(), size);
1421 // Set the forwarding address.
1422 source->set_map_word(MapWord::FromForwardingAddress(target));
1424 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1425 // Update NewSpace stats if necessary.
1426 RecordCopiedObject(heap, target);
1427 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1428 Isolate* isolate = heap->isolate();
1429 if (isolate->logger()->is_logging() ||
1430 CpuProfiler::is_profiling(isolate)) {
1431 if (target->IsSharedFunctionInfo()) {
1432 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1433 source->address(), target->address()));
1438 if (marks_handling == TRANSFER_MARKS) {
1439 if (Marking::TransferColor(source, target)) {
1440 MemoryChunk::IncrementLiveBytes(target->address(), size);
1447 template<ObjectContents object_contents, SizeRestriction size_restriction>
1448 static inline void EvacuateObject(Map* map,
1452 SLOW_ASSERT((size_restriction != SMALL) ||
1453 (object_size <= Page::kMaxHeapObjectSize));
1454 SLOW_ASSERT(object->Size() == object_size);
1456 Heap* heap = map->GetHeap();
1457 if (heap->ShouldBePromoted(object->address(), object_size)) {
1458 MaybeObject* maybe_result;
1460 if ((size_restriction != SMALL) &&
1461 (object_size > Page::kMaxHeapObjectSize)) {
1462 maybe_result = heap->lo_space()->AllocateRaw(object_size,
1465 if (object_contents == DATA_OBJECT) {
1466 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1468 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1472 Object* result = NULL; // Initialization to please compiler.
1473 if (maybe_result->ToObject(&result)) {
1474 HeapObject* target = HeapObject::cast(result);
1475 *slot = MigrateObject(heap, object , target, object_size);
1477 if (object_contents == POINTER_OBJECT) {
1478 heap->promotion_queue()->insert(target, object_size);
1481 heap->tracer()->increment_promoted_objects_size(object_size);
1485 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1486 Object* result = allocation->ToObjectUnchecked();
1488 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1493 static inline void EvacuateJSFunction(Map* map,
1495 HeapObject* object) {
1496 ObjectEvacuationStrategy<POINTER_OBJECT>::
1497 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1499 HeapObject* target = *slot;
1500 MarkBit mark_bit = Marking::MarkBitFrom(target);
1501 if (Marking::IsBlack(mark_bit)) {
1502 // This object is black and it might not be rescanned by marker.
1503 // We should explicitly record code entry slot for compaction because
1504 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1505 // miss it as it is not HeapObject-tagged.
1506 Address code_entry_slot =
1507 target->address() + JSFunction::kCodeEntryOffset;
1508 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1509 map->GetHeap()->mark_compact_collector()->
1510 RecordCodeEntrySlot(code_entry_slot, code);
1515 static inline void EvacuateFixedArray(Map* map,
1517 HeapObject* object) {
1518 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1519 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1526 static inline void EvacuateFixedDoubleArray(Map* map,
1528 HeapObject* object) {
1529 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1530 int object_size = FixedDoubleArray::SizeFor(length);
1531 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1538 static inline void EvacuateByteArray(Map* map,
1540 HeapObject* object) {
1541 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1542 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1546 static inline void EvacuateSeqAsciiString(Map* map,
1548 HeapObject* object) {
1549 int object_size = SeqAsciiString::cast(object)->
1550 SeqAsciiStringSize(map->instance_type());
1551 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1555 static inline void EvacuateSeqTwoByteString(Map* map,
1557 HeapObject* object) {
1558 int object_size = SeqTwoByteString::cast(object)->
1559 SeqTwoByteStringSize(map->instance_type());
1560 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1564 static inline bool IsShortcutCandidate(int type) {
1565 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1568 static inline void EvacuateShortcutCandidate(Map* map,
1570 HeapObject* object) {
1571 ASSERT(IsShortcutCandidate(map->instance_type()));
1573 Heap* heap = map->GetHeap();
1575 if (marks_handling == IGNORE_MARKS &&
1576 ConsString::cast(object)->unchecked_second() ==
1577 heap->empty_string()) {
1579 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1583 if (!heap->InNewSpace(first)) {
1584 object->set_map_word(MapWord::FromForwardingAddress(first));
1588 MapWord first_word = first->map_word();
1589 if (first_word.IsForwardingAddress()) {
1590 HeapObject* target = first_word.ToForwardingAddress();
1593 object->set_map_word(MapWord::FromForwardingAddress(target));
1597 heap->DoScavengeObject(first->map(), slot, first);
1598 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1602 int object_size = ConsString::kSize;
1603 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1606 template<ObjectContents object_contents>
1607 class ObjectEvacuationStrategy {
1609 template<int object_size>
1610 static inline void VisitSpecialized(Map* map,
1612 HeapObject* object) {
1613 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1616 static inline void Visit(Map* map,
1618 HeapObject* object) {
1619 int object_size = map->instance_size();
1620 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1624 static VisitorDispatchTable<ScavengingCallback> table_;
1628 template<MarksHandling marks_handling,
1629 LoggingAndProfiling logging_and_profiling_mode>
1630 VisitorDispatchTable<ScavengingCallback>
1631 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1634 static void InitializeScavengingVisitorsTables() {
1635 ScavengingVisitor<TRANSFER_MARKS,
1636 LOGGING_AND_PROFILING_DISABLED>::Initialize();
1637 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1638 ScavengingVisitor<TRANSFER_MARKS,
1639 LOGGING_AND_PROFILING_ENABLED>::Initialize();
1640 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1644 void Heap::SelectScavengingVisitorsTable() {
1645 bool logging_and_profiling =
1646 isolate()->logger()->is_logging() ||
1647 CpuProfiler::is_profiling(isolate()) ||
1648 (isolate()->heap_profiler() != NULL &&
1649 isolate()->heap_profiler()->is_profiling());
1651 if (!incremental_marking()->IsMarking()) {
1652 if (!logging_and_profiling) {
1653 scavenging_visitors_table_.CopyFrom(
1654 ScavengingVisitor<IGNORE_MARKS,
1655 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1657 scavenging_visitors_table_.CopyFrom(
1658 ScavengingVisitor<IGNORE_MARKS,
1659 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1662 if (!logging_and_profiling) {
1663 scavenging_visitors_table_.CopyFrom(
1664 ScavengingVisitor<TRANSFER_MARKS,
1665 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1667 scavenging_visitors_table_.CopyFrom(
1668 ScavengingVisitor<TRANSFER_MARKS,
1669 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1672 if (incremental_marking()->IsCompacting()) {
1673 // When compacting forbid short-circuiting of cons-strings.
1674 // Scavenging code relies on the fact that new space object
1675 // can't be evacuated into evacuation candidate but
1676 // short-circuiting violates this assumption.
1677 scavenging_visitors_table_.Register(
1678 StaticVisitorBase::kVisitShortcutCandidate,
1679 scavenging_visitors_table_.GetVisitorById(
1680 StaticVisitorBase::kVisitConsString));
1686 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1687 SLOW_ASSERT(HEAP->InFromSpace(object));
1688 MapWord first_word = object->map_word();
1689 SLOW_ASSERT(!first_word.IsForwardingAddress());
1690 Map* map = first_word.ToMap();
1691 map->GetHeap()->DoScavengeObject(map, p, object);
1695 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1696 int instance_size) {
1698 { MaybeObject* maybe_result = AllocateRawMap();
1699 if (!maybe_result->ToObject(&result)) return maybe_result;
1702 // Map::cast cannot be used due to uninitialized map field.
1703 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1704 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1705 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1706 reinterpret_cast<Map*>(result)->set_visitor_id(
1707 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1708 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1709 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1710 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1711 reinterpret_cast<Map*>(result)->set_bit_field(0);
1712 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1717 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
1719 ElementsKind elements_kind) {
1721 { MaybeObject* maybe_result = AllocateRawMap();
1722 if (!maybe_result->ToObject(&result)) return maybe_result;
1725 Map* map = reinterpret_cast<Map*>(result);
1726 map->set_map(meta_map());
1727 map->set_instance_type(instance_type);
1728 map->set_visitor_id(
1729 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1730 map->set_prototype(null_value());
1731 map->set_constructor(null_value());
1732 map->set_instance_size(instance_size);
1733 map->set_inobject_properties(0);
1734 map->set_pre_allocated_property_fields(0);
1735 map->init_instance_descriptors();
1736 map->set_code_cache(empty_fixed_array());
1737 map->set_prototype_transitions(empty_fixed_array());
1738 map->set_unused_property_fields(0);
1739 map->set_bit_field(0);
1740 map->set_bit_field2(1 << Map::kIsExtensible);
1741 map->set_elements_kind(elements_kind);
1743 // If the map object is aligned fill the padding area with Smi 0 objects.
1744 if (Map::kPadStart < Map::kSize) {
1745 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1747 Map::kSize - Map::kPadStart);
1753 MaybeObject* Heap::AllocateCodeCache() {
1755 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1756 if (!maybe_result->ToObject(&result)) return maybe_result;
1758 CodeCache* code_cache = CodeCache::cast(result);
1759 code_cache->set_default_cache(empty_fixed_array());
1760 code_cache->set_normal_type_cache(undefined_value());
1765 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1766 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1770 const Heap::StringTypeTable Heap::string_type_table[] = {
1771 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1772 {type, size, k##camel_name##MapRootIndex},
1773 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1774 #undef STRING_TYPE_ELEMENT
1778 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1779 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1780 {contents, k##name##RootIndex},
1781 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1782 #undef CONSTANT_SYMBOL_ELEMENT
1786 const Heap::StructTable Heap::struct_table[] = {
1787 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1788 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1789 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1790 #undef STRUCT_TABLE_ELEMENT
1794 bool Heap::CreateInitialMaps() {
1796 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1797 if (!maybe_obj->ToObject(&obj)) return false;
1799 // Map::cast cannot be used due to uninitialized map field.
1800 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1801 set_meta_map(new_meta_map);
1802 new_meta_map->set_map(new_meta_map);
1804 { MaybeObject* maybe_obj =
1805 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1806 if (!maybe_obj->ToObject(&obj)) return false;
1808 set_fixed_array_map(Map::cast(obj));
1810 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1813 set_oddball_map(Map::cast(obj));
1815 // Allocate the empty array.
1816 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1817 if (!maybe_obj->ToObject(&obj)) return false;
1819 set_empty_fixed_array(FixedArray::cast(obj));
1821 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1822 if (!maybe_obj->ToObject(&obj)) return false;
1824 set_null_value(Oddball::cast(obj));
1825 Oddball::cast(obj)->set_kind(Oddball::kNull);
1827 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1828 if (!maybe_obj->ToObject(&obj)) return false;
1830 set_undefined_value(Oddball::cast(obj));
1831 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
1832 ASSERT(!InNewSpace(undefined_value()));
1834 // Allocate the empty descriptor array.
1835 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1836 if (!maybe_obj->ToObject(&obj)) return false;
1838 set_empty_descriptor_array(DescriptorArray::cast(obj));
1840 // Fix the instance_descriptors for the existing maps.
1841 meta_map()->init_instance_descriptors();
1842 meta_map()->set_code_cache(empty_fixed_array());
1843 meta_map()->set_prototype_transitions(empty_fixed_array());
1845 fixed_array_map()->init_instance_descriptors();
1846 fixed_array_map()->set_code_cache(empty_fixed_array());
1847 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
1849 oddball_map()->init_instance_descriptors();
1850 oddball_map()->set_code_cache(empty_fixed_array());
1851 oddball_map()->set_prototype_transitions(empty_fixed_array());
1853 // Fix prototype object for existing maps.
1854 meta_map()->set_prototype(null_value());
1855 meta_map()->set_constructor(null_value());
1857 fixed_array_map()->set_prototype(null_value());
1858 fixed_array_map()->set_constructor(null_value());
1860 oddball_map()->set_prototype(null_value());
1861 oddball_map()->set_constructor(null_value());
1863 { MaybeObject* maybe_obj =
1864 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1865 if (!maybe_obj->ToObject(&obj)) return false;
1867 set_fixed_cow_array_map(Map::cast(obj));
1868 ASSERT(fixed_array_map() != fixed_cow_array_map());
1870 { MaybeObject* maybe_obj =
1871 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1872 if (!maybe_obj->ToObject(&obj)) return false;
1874 set_serialized_scope_info_map(Map::cast(obj));
1876 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1877 if (!maybe_obj->ToObject(&obj)) return false;
1879 set_heap_number_map(Map::cast(obj));
1881 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
1882 if (!maybe_obj->ToObject(&obj)) return false;
1884 set_foreign_map(Map::cast(obj));
1886 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1887 const StringTypeTable& entry = string_type_table[i];
1888 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1889 if (!maybe_obj->ToObject(&obj)) return false;
1891 roots_[entry.index] = Map::cast(obj);
1894 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1895 if (!maybe_obj->ToObject(&obj)) return false;
1897 set_undetectable_string_map(Map::cast(obj));
1898 Map::cast(obj)->set_is_undetectable();
1900 { MaybeObject* maybe_obj =
1901 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1902 if (!maybe_obj->ToObject(&obj)) return false;
1904 set_undetectable_ascii_string_map(Map::cast(obj));
1905 Map::cast(obj)->set_is_undetectable();
1907 { MaybeObject* maybe_obj =
1908 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
1909 if (!maybe_obj->ToObject(&obj)) return false;
1911 set_fixed_double_array_map(Map::cast(obj));
1913 { MaybeObject* maybe_obj =
1914 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1915 if (!maybe_obj->ToObject(&obj)) return false;
1917 set_byte_array_map(Map::cast(obj));
1919 { MaybeObject* maybe_obj =
1920 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
1921 if (!maybe_obj->ToObject(&obj)) return false;
1923 set_free_space_map(Map::cast(obj));
1925 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1926 if (!maybe_obj->ToObject(&obj)) return false;
1928 set_empty_byte_array(ByteArray::cast(obj));
1930 { MaybeObject* maybe_obj =
1931 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
1932 if (!maybe_obj->ToObject(&obj)) return false;
1934 set_external_pixel_array_map(Map::cast(obj));
1936 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1937 ExternalArray::kAlignedSize);
1938 if (!maybe_obj->ToObject(&obj)) return false;
1940 set_external_byte_array_map(Map::cast(obj));
1942 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1943 ExternalArray::kAlignedSize);
1944 if (!maybe_obj->ToObject(&obj)) return false;
1946 set_external_unsigned_byte_array_map(Map::cast(obj));
1948 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1949 ExternalArray::kAlignedSize);
1950 if (!maybe_obj->ToObject(&obj)) return false;
1952 set_external_short_array_map(Map::cast(obj));
1954 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1955 ExternalArray::kAlignedSize);
1956 if (!maybe_obj->ToObject(&obj)) return false;
1958 set_external_unsigned_short_array_map(Map::cast(obj));
1960 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1961 ExternalArray::kAlignedSize);
1962 if (!maybe_obj->ToObject(&obj)) return false;
1964 set_external_int_array_map(Map::cast(obj));
1966 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1967 ExternalArray::kAlignedSize);
1968 if (!maybe_obj->ToObject(&obj)) return false;
1970 set_external_unsigned_int_array_map(Map::cast(obj));
1972 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1973 ExternalArray::kAlignedSize);
1974 if (!maybe_obj->ToObject(&obj)) return false;
1976 set_external_float_array_map(Map::cast(obj));
1978 { MaybeObject* maybe_obj =
1979 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1980 if (!maybe_obj->ToObject(&obj)) return false;
1982 set_non_strict_arguments_elements_map(Map::cast(obj));
1984 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
1985 ExternalArray::kAlignedSize);
1986 if (!maybe_obj->ToObject(&obj)) return false;
1988 set_external_double_array_map(Map::cast(obj));
1990 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1991 if (!maybe_obj->ToObject(&obj)) return false;
1993 set_code_map(Map::cast(obj));
1995 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1996 JSGlobalPropertyCell::kSize);
1997 if (!maybe_obj->ToObject(&obj)) return false;
1999 set_global_property_cell_map(Map::cast(obj));
2001 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2002 if (!maybe_obj->ToObject(&obj)) return false;
2004 set_one_pointer_filler_map(Map::cast(obj));
2006 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2007 if (!maybe_obj->ToObject(&obj)) return false;
2009 set_two_pointer_filler_map(Map::cast(obj));
2011 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2012 const StructTable& entry = struct_table[i];
2013 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2014 if (!maybe_obj->ToObject(&obj)) return false;
2016 roots_[entry.index] = Map::cast(obj);
2019 { MaybeObject* maybe_obj =
2020 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2021 if (!maybe_obj->ToObject(&obj)) return false;
2023 set_hash_table_map(Map::cast(obj));
2025 { MaybeObject* maybe_obj =
2026 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2027 if (!maybe_obj->ToObject(&obj)) return false;
2029 set_function_context_map(Map::cast(obj));
2031 { MaybeObject* maybe_obj =
2032 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2033 if (!maybe_obj->ToObject(&obj)) return false;
2035 set_catch_context_map(Map::cast(obj));
2037 { MaybeObject* maybe_obj =
2038 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2039 if (!maybe_obj->ToObject(&obj)) return false;
2041 set_with_context_map(Map::cast(obj));
2043 { MaybeObject* maybe_obj =
2044 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2045 if (!maybe_obj->ToObject(&obj)) return false;
2047 set_block_context_map(Map::cast(obj));
2049 { MaybeObject* maybe_obj =
2050 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2051 if (!maybe_obj->ToObject(&obj)) return false;
2053 Map* global_context_map = Map::cast(obj);
2054 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
2055 set_global_context_map(global_context_map);
2057 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2058 SharedFunctionInfo::kAlignedSize);
2059 if (!maybe_obj->ToObject(&obj)) return false;
2061 set_shared_function_info_map(Map::cast(obj));
2063 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2064 JSMessageObject::kSize);
2065 if (!maybe_obj->ToObject(&obj)) return false;
2067 set_message_object_map(Map::cast(obj));
2069 ASSERT(!InNewSpace(empty_fixed_array()));
2074 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2075 // Statically ensure that it is safe to allocate heap numbers in paged
2077 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2078 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2081 { MaybeObject* maybe_result =
2082 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2083 if (!maybe_result->ToObject(&result)) return maybe_result;
2086 HeapObject::cast(result)->set_map(heap_number_map());
2087 HeapNumber::cast(result)->set_value(value);
2092 MaybeObject* Heap::AllocateHeapNumber(double value) {
2093 // Use general version, if we're forced to always allocate.
2094 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2096 // This version of AllocateHeapNumber is optimized for
2097 // allocation in new space.
2098 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2099 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2101 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2102 if (!maybe_result->ToObject(&result)) return maybe_result;
2104 HeapObject::cast(result)->set_map(heap_number_map());
2105 HeapNumber::cast(result)->set_value(value);
2110 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2112 { MaybeObject* maybe_result = AllocateRawCell();
2113 if (!maybe_result->ToObject(&result)) return maybe_result;
2115 HeapObject::cast(result)->set_map(global_property_cell_map());
2116 JSGlobalPropertyCell::cast(result)->set_value(value);
2121 MaybeObject* Heap::CreateOddball(const char* to_string,
2125 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2126 if (!maybe_result->ToObject(&result)) return maybe_result;
2128 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2132 bool Heap::CreateApiObjects() {
2135 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2136 if (!maybe_obj->ToObject(&obj)) return false;
2138 // Don't use Smi-only elements optimizations for objects with the neander
2139 // map. There are too many cases where element values are set directly with a
2140 // bottleneck to trap the Smi-only -> fast elements transition, and there
2141 // appears to be no benefit for optimize this case.
2142 Map* new_neander_map = Map::cast(obj);
2143 new_neander_map->set_elements_kind(FAST_ELEMENTS);
2144 set_neander_map(new_neander_map);
2146 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2147 if (!maybe_obj->ToObject(&obj)) return false;
2150 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2151 if (!maybe_elements->ToObject(&elements)) return false;
2153 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2154 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2155 set_message_listeners(JSObject::cast(obj));
2161 void Heap::CreateJSEntryStub() {
2163 set_js_entry_code(*stub.GetCode());
2167 void Heap::CreateJSConstructEntryStub() {
2168 JSConstructEntryStub stub;
2169 set_js_construct_entry_code(*stub.GetCode());
2173 void Heap::CreateFixedStubs() {
2174 // Here we create roots for fixed stubs. They are needed at GC
2175 // for cooking and uncooking (check out frames.cc).
2176 // The eliminates the need for doing dictionary lookup in the
2177 // stub cache for these stubs.
2179 // gcc-4.4 has problem generating correct code of following snippet:
2180 // { JSEntryStub stub;
2181 // js_entry_code_ = *stub.GetCode();
2183 // { JSConstructEntryStub stub;
2184 // js_construct_entry_code_ = *stub.GetCode();
2186 // To workaround the problem, make separate functions without inlining.
2187 Heap::CreateJSEntryStub();
2188 Heap::CreateJSConstructEntryStub();
2190 // Create stubs that should be there, so we don't unexpectedly have to
2191 // create them if we need them during the creation of another stub.
2192 // Stub creation mixes raw pointers and handles in an unsafe manner so
2193 // we cannot create stubs while we are creating stubs.
2194 CodeStub::GenerateStubsAheadOfTime();
2198 bool Heap::CreateInitialObjects() {
2201 // The -0 value must be set before NumberFromDouble works.
2202 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2203 if (!maybe_obj->ToObject(&obj)) return false;
2205 set_minus_zero_value(HeapNumber::cast(obj));
2206 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2208 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2209 if (!maybe_obj->ToObject(&obj)) return false;
2211 set_nan_value(HeapNumber::cast(obj));
2213 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2214 if (!maybe_obj->ToObject(&obj)) return false;
2216 set_infinity_value(HeapNumber::cast(obj));
2218 // Allocate initial symbol table.
2219 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2220 if (!maybe_obj->ToObject(&obj)) return false;
2222 // Don't use set_symbol_table() due to asserts.
2223 roots_[kSymbolTableRootIndex] = obj;
2225 // Finish initializing oddballs after creating symboltable.
2226 { MaybeObject* maybe_obj =
2227 undefined_value()->Initialize("undefined",
2229 Oddball::kUndefined);
2230 if (!maybe_obj->ToObject(&obj)) return false;
2233 // Initialize the null_value.
2234 { MaybeObject* maybe_obj =
2235 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2236 if (!maybe_obj->ToObject(&obj)) return false;
2239 { MaybeObject* maybe_obj = CreateOddball("true",
2242 if (!maybe_obj->ToObject(&obj)) return false;
2244 set_true_value(Oddball::cast(obj));
2246 { MaybeObject* maybe_obj = CreateOddball("false",
2249 if (!maybe_obj->ToObject(&obj)) return false;
2251 set_false_value(Oddball::cast(obj));
2253 { MaybeObject* maybe_obj = CreateOddball("hole",
2256 if (!maybe_obj->ToObject(&obj)) return false;
2258 set_the_hole_value(Oddball::cast(obj));
2260 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2262 Oddball::kArgumentMarker);
2263 if (!maybe_obj->ToObject(&obj)) return false;
2265 set_arguments_marker(Oddball::cast(obj));
2267 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2270 if (!maybe_obj->ToObject(&obj)) return false;
2272 set_no_interceptor_result_sentinel(obj);
2274 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2277 if (!maybe_obj->ToObject(&obj)) return false;
2279 set_termination_exception(obj);
2281 { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
2284 if (!maybe_obj->ToObject(&obj)) return false;
2286 set_frame_alignment_marker(Oddball::cast(obj));
2287 STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
2289 // Allocate the empty string.
2290 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2291 if (!maybe_obj->ToObject(&obj)) return false;
2293 set_empty_string(String::cast(obj));
2295 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2296 { MaybeObject* maybe_obj =
2297 LookupAsciiSymbol(constant_symbol_table[i].contents);
2298 if (!maybe_obj->ToObject(&obj)) return false;
2300 roots_[constant_symbol_table[i].index] = String::cast(obj);
2303 // Allocate the hidden symbol which is used to identify the hidden properties
2304 // in JSObjects. The hash code has a special value so that it will not match
2305 // the empty string when searching for the property. It cannot be part of the
2306 // loop above because it needs to be allocated manually with the special
2307 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2308 // that it will always be at the first entry in property descriptors.
2309 { MaybeObject* maybe_obj =
2310 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2311 if (!maybe_obj->ToObject(&obj)) return false;
2313 hidden_symbol_ = String::cast(obj);
2315 // Allocate the foreign for __proto__.
2316 { MaybeObject* maybe_obj =
2317 AllocateForeign((Address) &Accessors::ObjectPrototype);
2318 if (!maybe_obj->ToObject(&obj)) return false;
2320 set_prototype_accessors(Foreign::cast(obj));
2322 // Allocate the code_stubs dictionary. The initial size is set to avoid
2323 // expanding the dictionary during bootstrapping.
2324 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2325 if (!maybe_obj->ToObject(&obj)) return false;
2327 set_code_stubs(NumberDictionary::cast(obj));
2329 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2330 // is set to avoid expanding the dictionary during bootstrapping.
2331 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2332 if (!maybe_obj->ToObject(&obj)) return false;
2334 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2336 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2337 if (!maybe_obj->ToObject(&obj)) return false;
2339 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2341 set_instanceof_cache_function(Smi::FromInt(0));
2342 set_instanceof_cache_map(Smi::FromInt(0));
2343 set_instanceof_cache_answer(Smi::FromInt(0));
2347 // Allocate the dictionary of intrinsic function names.
2348 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2349 if (!maybe_obj->ToObject(&obj)) return false;
2351 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2353 if (!maybe_obj->ToObject(&obj)) return false;
2355 set_intrinsic_function_names(StringDictionary::cast(obj));
2357 if (InitializeNumberStringCache()->IsFailure()) return false;
2359 // Allocate cache for single character ASCII strings.
2360 { MaybeObject* maybe_obj =
2361 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2362 if (!maybe_obj->ToObject(&obj)) return false;
2364 set_single_character_string_cache(FixedArray::cast(obj));
2366 // Allocate cache for string split.
2367 { MaybeObject* maybe_obj =
2368 AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
2369 if (!maybe_obj->ToObject(&obj)) return false;
2371 set_string_split_cache(FixedArray::cast(obj));
2373 // Allocate cache for external strings pointing to native source code.
2374 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2375 if (!maybe_obj->ToObject(&obj)) return false;
2377 set_natives_source_cache(FixedArray::cast(obj));
2379 // Handling of script id generation is in FACTORY->NewScript.
2380 set_last_script_id(undefined_value());
2382 // Initialize keyed lookup cache.
2383 isolate_->keyed_lookup_cache()->Clear();
2385 // Initialize context slot cache.
2386 isolate_->context_slot_cache()->Clear();
2388 // Initialize descriptor cache.
2389 isolate_->descriptor_lookup_cache()->Clear();
2391 // Initialize compilation cache.
2392 isolate_->compilation_cache()->Clear();
2398 Object* StringSplitCache::Lookup(
2399 FixedArray* cache, String* string, String* pattern) {
2400 if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
2401 uint32_t hash = string->Hash();
2402 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2403 ~(kArrayEntriesPerCacheEntry - 1));
2404 if (cache->get(index + kStringOffset) == string &&
2405 cache->get(index + kPatternOffset) == pattern) {
2406 return cache->get(index + kArrayOffset);
2408 index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2409 if (cache->get(index + kStringOffset) == string &&
2410 cache->get(index + kPatternOffset) == pattern) {
2411 return cache->get(index + kArrayOffset);
2413 return Smi::FromInt(0);
2417 void StringSplitCache::Enter(Heap* heap,
2421 FixedArray* array) {
2422 if (!string->IsSymbol() || !pattern->IsSymbol()) return;
2423 uint32_t hash = string->Hash();
2424 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2425 ~(kArrayEntriesPerCacheEntry - 1));
2426 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2427 cache->set(index + kStringOffset, string);
2428 cache->set(index + kPatternOffset, pattern);
2429 cache->set(index + kArrayOffset, array);
2432 ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2433 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2434 cache->set(index2 + kStringOffset, string);
2435 cache->set(index2 + kPatternOffset, pattern);
2436 cache->set(index2 + kArrayOffset, array);
2438 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2439 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2440 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2441 cache->set(index + kStringOffset, string);
2442 cache->set(index + kPatternOffset, pattern);
2443 cache->set(index + kArrayOffset, array);
2446 if (array->length() < 100) { // Limit how many new symbols we want to make.
2447 for (int i = 0; i < array->length(); i++) {
2448 String* str = String::cast(array->get(i));
2450 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2451 if (maybe_symbol->ToObject(&symbol)) {
2452 array->set(i, symbol);
2456 array->set_map(heap->fixed_cow_array_map());
2460 void StringSplitCache::Clear(FixedArray* cache) {
2461 for (int i = 0; i < kStringSplitCacheSize; i++) {
2462 cache->set(i, Smi::FromInt(0));
2467 MaybeObject* Heap::InitializeNumberStringCache() {
2468 // Compute the size of the number string cache based on the max heap size.
2469 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2470 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2471 int number_string_cache_size = max_semispace_size_ / 512;
2472 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
2474 MaybeObject* maybe_obj =
2475 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2476 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2481 void Heap::FlushNumberStringCache() {
2482 // Flush the number to string cache.
2483 int len = number_string_cache()->length();
2484 for (int i = 0; i < len; i++) {
2485 number_string_cache()->set_undefined(this, i);
2490 static inline int double_get_hash(double d) {
2491 DoubleRepresentation rep(d);
2492 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2496 static inline int smi_get_hash(Smi* smi) {
2497 return smi->value();
2501 Object* Heap::GetNumberStringCache(Object* number) {
2503 int mask = (number_string_cache()->length() >> 1) - 1;
2504 if (number->IsSmi()) {
2505 hash = smi_get_hash(Smi::cast(number)) & mask;
2507 hash = double_get_hash(number->Number()) & mask;
2509 Object* key = number_string_cache()->get(hash * 2);
2510 if (key == number) {
2511 return String::cast(number_string_cache()->get(hash * 2 + 1));
2512 } else if (key->IsHeapNumber() &&
2513 number->IsHeapNumber() &&
2514 key->Number() == number->Number()) {
2515 return String::cast(number_string_cache()->get(hash * 2 + 1));
2517 return undefined_value();
2521 void Heap::SetNumberStringCache(Object* number, String* string) {
2523 int mask = (number_string_cache()->length() >> 1) - 1;
2524 if (number->IsSmi()) {
2525 hash = smi_get_hash(Smi::cast(number)) & mask;
2526 number_string_cache()->set(hash * 2, Smi::cast(number));
2528 hash = double_get_hash(number->Number()) & mask;
2529 number_string_cache()->set(hash * 2, number);
2531 number_string_cache()->set(hash * 2 + 1, string);
2535 MaybeObject* Heap::NumberToString(Object* number,
2536 bool check_number_string_cache) {
2537 isolate_->counters()->number_to_string_runtime()->Increment();
2538 if (check_number_string_cache) {
2539 Object* cached = GetNumberStringCache(number);
2540 if (cached != undefined_value()) {
2546 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2548 if (number->IsSmi()) {
2549 int num = Smi::cast(number)->value();
2550 str = IntToCString(num, buffer);
2552 double num = HeapNumber::cast(number)->value();
2553 str = DoubleToCString(num, buffer);
2557 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2558 if (maybe_js_string->ToObject(&js_string)) {
2559 SetNumberStringCache(number, String::cast(js_string));
2561 return maybe_js_string;
2565 MaybeObject* Heap::Uint32ToString(uint32_t value,
2566 bool check_number_string_cache) {
2568 MaybeObject* maybe = NumberFromUint32(value);
2569 if (!maybe->To<Object>(&number)) return maybe;
2570 return NumberToString(number, check_number_string_cache);
2574 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2575 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2579 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2580 ExternalArrayType array_type) {
2581 switch (array_type) {
2582 case kExternalByteArray:
2583 return kExternalByteArrayMapRootIndex;
2584 case kExternalUnsignedByteArray:
2585 return kExternalUnsignedByteArrayMapRootIndex;
2586 case kExternalShortArray:
2587 return kExternalShortArrayMapRootIndex;
2588 case kExternalUnsignedShortArray:
2589 return kExternalUnsignedShortArrayMapRootIndex;
2590 case kExternalIntArray:
2591 return kExternalIntArrayMapRootIndex;
2592 case kExternalUnsignedIntArray:
2593 return kExternalUnsignedIntArrayMapRootIndex;
2594 case kExternalFloatArray:
2595 return kExternalFloatArrayMapRootIndex;
2596 case kExternalDoubleArray:
2597 return kExternalDoubleArrayMapRootIndex;
2598 case kExternalPixelArray:
2599 return kExternalPixelArrayMapRootIndex;
2602 return kUndefinedValueRootIndex;
2607 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
2608 // We need to distinguish the minus zero value and this cannot be
2609 // done after conversion to int. Doing this by comparing bit
2610 // patterns is faster than using fpclassify() et al.
2611 static const DoubleRepresentation minus_zero(-0.0);
2613 DoubleRepresentation rep(value);
2614 if (rep.bits == minus_zero.bits) {
2615 return AllocateHeapNumber(-0.0, pretenure);
2618 int int_value = FastD2I(value);
2619 if (value == int_value && Smi::IsValid(int_value)) {
2620 return Smi::FromInt(int_value);
2623 // Materialize the value in the heap.
2624 return AllocateHeapNumber(value, pretenure);
2628 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2629 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2630 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
2631 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2633 MaybeObject* maybe_result = Allocate(foreign_map(), space);
2634 if (!maybe_result->To(&result)) return maybe_result;
2635 result->set_foreign_address(address);
2640 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2641 SharedFunctionInfo* share;
2642 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2643 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
2645 // Set pointer fields.
2646 share->set_name(name);
2647 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
2648 share->set_code(illegal);
2649 share->set_scope_info(SerializedScopeInfo::Empty());
2650 Code* construct_stub =
2651 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
2652 share->set_construct_stub(construct_stub);
2653 share->set_instance_class_name(Object_symbol());
2654 share->set_function_data(undefined_value());
2655 share->set_script(undefined_value());
2656 share->set_debug_info(undefined_value());
2657 share->set_inferred_name(empty_string());
2658 share->set_initial_map(undefined_value());
2659 share->set_this_property_assignments(undefined_value());
2660 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2662 // Set integer fields (smi or int, depending on the architecture).
2663 share->set_length(0);
2664 share->set_formal_parameter_count(0);
2665 share->set_expected_nof_properties(0);
2666 share->set_num_literals(0);
2667 share->set_start_position_and_type(0);
2668 share->set_end_position(0);
2669 share->set_function_token_position(0);
2670 // All compiler hints default to false or 0.
2671 share->set_compiler_hints(0);
2672 share->set_this_property_assignments_count(0);
2673 share->set_opt_count(0);
2679 MaybeObject* Heap::AllocateJSMessageObject(String* type,
2684 Object* stack_trace,
2685 Object* stack_frames) {
2687 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2688 if (!maybe_result->ToObject(&result)) return maybe_result;
2690 JSMessageObject* message = JSMessageObject::cast(result);
2691 message->set_properties(Heap::empty_fixed_array());
2692 message->set_elements(Heap::empty_fixed_array());
2693 message->set_type(type);
2694 message->set_arguments(arguments);
2695 message->set_start_position(start_position);
2696 message->set_end_position(end_position);
2697 message->set_script(script);
2698 message->set_stack_trace(stack_trace);
2699 message->set_stack_frames(stack_frames);
2705 // Returns true for a character in a range. Both limits are inclusive.
2706 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2707 // This makes uses of the the unsigned wraparound.
2708 return character - from <= to - from;
2712 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2717 // Numeric strings have a different hash algorithm not known by
2718 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2719 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2720 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2722 // Now we know the length is 2, we might as well make use of that fact
2723 // when building the new string.
2724 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2725 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
2727 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
2728 if (!maybe_result->ToObject(&result)) return maybe_result;
2730 char* dest = SeqAsciiString::cast(result)->GetChars();
2736 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
2737 if (!maybe_result->ToObject(&result)) return maybe_result;
2739 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2747 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
2748 int first_length = first->length();
2749 if (first_length == 0) {
2753 int second_length = second->length();
2754 if (second_length == 0) {
2758 int length = first_length + second_length;
2760 // Optimization for 2-byte strings often used as keys in a decompression
2761 // dictionary. Check whether we already have the string in the symbol
2762 // table to prevent creation of many unneccesary strings.
2764 unsigned c1 = first->Get(0);
2765 unsigned c2 = second->Get(0);
2766 return MakeOrFindTwoCharacterString(this, c1, c2);
2769 bool first_is_ascii = first->IsAsciiRepresentation();
2770 bool second_is_ascii = second->IsAsciiRepresentation();
2771 bool is_ascii = first_is_ascii && second_is_ascii;
2773 // Make sure that an out of memory exception is thrown if the length
2774 // of the new cons string is too large.
2775 if (length > String::kMaxLength || length < 0) {
2776 isolate()->context()->mark_out_of_memory();
2777 return Failure::OutOfMemoryException();
2780 bool is_ascii_data_in_two_byte_string = false;
2782 // At least one of the strings uses two-byte representation so we
2783 // can't use the fast case code for short ascii strings below, but
2784 // we can try to save memory if all chars actually fit in ascii.
2785 is_ascii_data_in_two_byte_string =
2786 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2787 if (is_ascii_data_in_two_byte_string) {
2788 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2792 // If the resulting string is small make a flat string.
2793 if (length < String::kMinNonFlatLength) {
2794 // Note that neither of the two inputs can be a slice because:
2795 STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
2796 ASSERT(first->IsFlat());
2797 ASSERT(second->IsFlat());
2800 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2801 if (!maybe_result->ToObject(&result)) return maybe_result;
2803 // Copy the characters into the new object.
2804 char* dest = SeqAsciiString::cast(result)->GetChars();
2807 if (first->IsExternalString()) {
2808 src = ExternalAsciiString::cast(first)->resource()->data();
2810 src = SeqAsciiString::cast(first)->GetChars();
2812 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2813 // Copy second part.
2814 if (second->IsExternalString()) {
2815 src = ExternalAsciiString::cast(second)->resource()->data();
2817 src = SeqAsciiString::cast(second)->GetChars();
2819 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2822 if (is_ascii_data_in_two_byte_string) {
2824 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2825 if (!maybe_result->ToObject(&result)) return maybe_result;
2827 // Copy the characters into the new object.
2828 char* dest = SeqAsciiString::cast(result)->GetChars();
2829 String::WriteToFlat(first, dest, 0, first_length);
2830 String::WriteToFlat(second, dest + first_length, 0, second_length);
2831 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2836 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2837 if (!maybe_result->ToObject(&result)) return maybe_result;
2839 // Copy the characters into the new object.
2840 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2841 String::WriteToFlat(first, dest, 0, first_length);
2842 String::WriteToFlat(second, dest + first_length, 0, second_length);
2847 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2848 cons_ascii_string_map() : cons_string_map();
2851 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2852 if (!maybe_result->ToObject(&result)) return maybe_result;
2855 AssertNoAllocation no_gc;
2856 ConsString* cons_string = ConsString::cast(result);
2857 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
2858 cons_string->set_length(length);
2859 cons_string->set_hash_field(String::kEmptyHashField);
2860 cons_string->set_first(first, mode);
2861 cons_string->set_second(second, mode);
2866 MaybeObject* Heap::AllocateSubString(String* buffer,
2869 PretenureFlag pretenure) {
2870 int length = end - start;
2872 return empty_string();
2873 } else if (length == 1) {
2874 return LookupSingleCharacterStringFromCode(buffer->Get(start));
2875 } else if (length == 2) {
2876 // Optimization for 2-byte strings often used as keys in a decompression
2877 // dictionary. Check whether we already have the string in the symbol
2878 // table to prevent creation of many unneccesary strings.
2879 unsigned c1 = buffer->Get(start);
2880 unsigned c2 = buffer->Get(start + 1);
2881 return MakeOrFindTwoCharacterString(this, c1, c2);
2884 // Make an attempt to flatten the buffer to reduce access time.
2885 buffer = buffer->TryFlattenGetString();
2887 if (!FLAG_string_slices ||
2888 !buffer->IsFlat() ||
2889 length < SlicedString::kMinLength ||
2890 pretenure == TENURED) {
2892 // WriteToFlat takes care of the case when an indirect string has a
2893 // different encoding from its underlying string. These encodings may
2894 // differ because of externalization.
2895 bool is_ascii = buffer->IsAsciiRepresentation();
2896 { MaybeObject* maybe_result = is_ascii
2897 ? AllocateRawAsciiString(length, pretenure)
2898 : AllocateRawTwoByteString(length, pretenure);
2899 if (!maybe_result->ToObject(&result)) return maybe_result;
2901 String* string_result = String::cast(result);
2902 // Copy the characters into the new object.
2904 ASSERT(string_result->IsAsciiRepresentation());
2905 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2906 String::WriteToFlat(buffer, dest, start, end);
2908 ASSERT(string_result->IsTwoByteRepresentation());
2909 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2910 String::WriteToFlat(buffer, dest, start, end);
2915 ASSERT(buffer->IsFlat());
2917 if (FLAG_verify_heap) {
2918 buffer->StringVerify();
2923 // When slicing an indirect string we use its encoding for a newly created
2924 // slice and don't check the encoding of the underlying string. This is safe
2925 // even if the encodings are different because of externalization. If an
2926 // indirect ASCII string is pointing to a two-byte string, the two-byte char
2927 // codes of the underlying string must still fit into ASCII (because
2928 // externalization must not change char codes).
2929 { Map* map = buffer->IsAsciiRepresentation()
2930 ? sliced_ascii_string_map()
2931 : sliced_string_map();
2932 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2933 if (!maybe_result->ToObject(&result)) return maybe_result;
2936 AssertNoAllocation no_gc;
2937 SlicedString* sliced_string = SlicedString::cast(result);
2938 sliced_string->set_length(length);
2939 sliced_string->set_hash_field(String::kEmptyHashField);
2940 if (buffer->IsConsString()) {
2941 ConsString* cons = ConsString::cast(buffer);
2942 ASSERT(cons->second()->length() == 0);
2943 sliced_string->set_parent(cons->first());
2944 sliced_string->set_offset(start);
2945 } else if (buffer->IsSlicedString()) {
2946 // Prevent nesting sliced strings.
2947 SlicedString* parent_slice = SlicedString::cast(buffer);
2948 sliced_string->set_parent(parent_slice->parent());
2949 sliced_string->set_offset(start + parent_slice->offset());
2951 sliced_string->set_parent(buffer);
2952 sliced_string->set_offset(start);
2954 ASSERT(sliced_string->parent()->IsSeqString() ||
2955 sliced_string->parent()->IsExternalString());
2960 MaybeObject* Heap::AllocateExternalStringFromAscii(
2961 const ExternalAsciiString::Resource* resource) {
2962 size_t length = resource->length();
2963 if (length > static_cast<size_t>(String::kMaxLength)) {
2964 isolate()->context()->mark_out_of_memory();
2965 return Failure::OutOfMemoryException();
2968 Map* map = external_ascii_string_map();
2970 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2971 if (!maybe_result->ToObject(&result)) return maybe_result;
2974 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
2975 external_string->set_length(static_cast<int>(length));
2976 external_string->set_hash_field(String::kEmptyHashField);
2977 external_string->set_resource(resource);
2983 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
2984 const ExternalTwoByteString::Resource* resource) {
2985 size_t length = resource->length();
2986 if (length > static_cast<size_t>(String::kMaxLength)) {
2987 isolate()->context()->mark_out_of_memory();
2988 return Failure::OutOfMemoryException();
2991 // For small strings we check whether the resource contains only
2992 // ASCII characters. If yes, we use a different string map.
2993 static const size_t kAsciiCheckLengthLimit = 32;
2994 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2995 String::IsAscii(resource->data(), static_cast<int>(length));
2996 Map* map = is_ascii ?
2997 external_string_with_ascii_data_map() : external_string_map();
2999 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3000 if (!maybe_result->ToObject(&result)) return maybe_result;
3003 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3004 external_string->set_length(static_cast<int>(length));
3005 external_string->set_hash_field(String::kEmptyHashField);
3006 external_string->set_resource(resource);
3012 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3013 if (code <= String::kMaxAsciiCharCode) {
3014 Object* value = single_character_string_cache()->get(code);
3015 if (value != undefined_value()) return value;
3018 buffer[0] = static_cast<char>(code);
3020 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3022 if (!maybe_result->ToObject(&result)) return maybe_result;
3023 single_character_string_cache()->set(code, result);
3028 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3029 if (!maybe_result->ToObject(&result)) return maybe_result;
3031 String* answer = String::cast(result);
3032 answer->Set(0, code);
3037 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3038 if (length < 0 || length > ByteArray::kMaxLength) {
3039 return Failure::OutOfMemoryException();
3041 if (pretenure == NOT_TENURED) {
3042 return AllocateByteArray(length);
3044 int size = ByteArray::SizeFor(length);
3046 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
3047 ? old_data_space_->AllocateRaw(size)
3048 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3049 if (!maybe_result->ToObject(&result)) return maybe_result;
3052 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
3053 reinterpret_cast<ByteArray*>(result)->set_length(length);
3058 MaybeObject* Heap::AllocateByteArray(int length) {
3059 if (length < 0 || length > ByteArray::kMaxLength) {
3060 return Failure::OutOfMemoryException();
3062 int size = ByteArray::SizeFor(length);
3063 AllocationSpace space =
3064 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
3066 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3067 if (!maybe_result->ToObject(&result)) return maybe_result;
3070 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
3071 reinterpret_cast<ByteArray*>(result)->set_length(length);
3076 void Heap::CreateFillerObjectAt(Address addr, int size) {
3077 if (size == 0) return;
3078 HeapObject* filler = HeapObject::FromAddress(addr);
3079 if (size == kPointerSize) {
3080 filler->set_map(one_pointer_filler_map());
3081 } else if (size == 2 * kPointerSize) {
3082 filler->set_map(two_pointer_filler_map());
3084 filler->set_map(free_space_map());
3085 FreeSpace::cast(filler)->set_size(size);
3090 MaybeObject* Heap::AllocateExternalArray(int length,
3091 ExternalArrayType array_type,
3092 void* external_pointer,
3093 PretenureFlag pretenure) {
3094 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3096 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3099 if (!maybe_result->ToObject(&result)) return maybe_result;
3102 reinterpret_cast<ExternalArray*>(result)->set_map(
3103 MapForExternalArrayType(array_type));
3104 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3105 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3112 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3114 Handle<Object> self_reference,
3116 // Allocate ByteArray before the Code object, so that we do not risk
3117 // leaving uninitialized Code object (and breaking the heap).
3119 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3120 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
3124 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3125 int obj_size = Code::SizeFor(body_size);
3126 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3127 MaybeObject* maybe_result;
3128 // Large code objects and code objects which should stay at a fixed address
3129 // are allocated in large object space.
3130 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
3131 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3133 maybe_result = code_space_->AllocateRaw(obj_size);
3137 if (!maybe_result->ToObject(&result)) return maybe_result;
3139 // Initialize the object
3140 HeapObject::cast(result)->set_map(code_map());
3141 Code* code = Code::cast(result);
3142 ASSERT(!isolate_->code_range()->exists() ||
3143 isolate_->code_range()->contains(code->address()));
3144 code->set_instruction_size(desc.instr_size);
3145 code->set_relocation_info(ByteArray::cast(reloc_info));
3146 code->set_flags(flags);
3147 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3148 code->set_check_type(RECEIVER_MAP_CHECK);
3150 code->set_deoptimization_data(empty_fixed_array());
3151 code->set_next_code_flushing_candidate(undefined_value());
3152 // Allow self references to created code object by patching the handle to
3153 // point to the newly allocated Code object.
3154 if (!self_reference.is_null()) {
3155 *(self_reference.location()) = code;
3157 // Migrate generated code.
3158 // The generated code can contain Object** values (typically from handles)
3159 // that are dereferenced during the copy to point directly to the actual heap
3160 // objects. These pointers can include references to the code object itself,
3161 // through the self_reference parameter.
3162 code->CopyFrom(desc);
3165 if (FLAG_verify_heap) {
3173 MaybeObject* Heap::CopyCode(Code* code) {
3174 // Allocate an object the same size as the code object.
3175 int obj_size = code->Size();
3176 MaybeObject* maybe_result;
3177 if (obj_size > MaxObjectSizeInPagedSpace()) {
3178 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3180 maybe_result = code_space_->AllocateRaw(obj_size);
3184 if (!maybe_result->ToObject(&result)) return maybe_result;
3186 // Copy code object.
3187 Address old_addr = code->address();
3188 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3189 CopyBlock(new_addr, old_addr, obj_size);
3190 // Relocate the copy.
3191 Code* new_code = Code::cast(result);
3192 ASSERT(!isolate_->code_range()->exists() ||
3193 isolate_->code_range()->contains(code->address()));
3194 new_code->Relocate(new_addr - old_addr);
3199 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3200 // Allocate ByteArray before the Code object, so that we do not risk
3201 // leaving uninitialized Code object (and breaking the heap).
3202 Object* reloc_info_array;
3203 { MaybeObject* maybe_reloc_info_array =
3204 AllocateByteArray(reloc_info.length(), TENURED);
3205 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3206 return maybe_reloc_info_array;
3210 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3212 int new_obj_size = Code::SizeFor(new_body_size);
3214 Address old_addr = code->address();
3216 size_t relocation_offset =
3217 static_cast<size_t>(code->instruction_end() - old_addr);
3219 MaybeObject* maybe_result;
3220 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
3221 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3223 maybe_result = code_space_->AllocateRaw(new_obj_size);
3227 if (!maybe_result->ToObject(&result)) return maybe_result;
3229 // Copy code object.
3230 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3232 // Copy header and instructions.
3233 memcpy(new_addr, old_addr, relocation_offset);
3235 Code* new_code = Code::cast(result);
3236 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3238 // Copy patched rinfo.
3239 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3241 // Relocate the copy.
3242 ASSERT(!isolate_->code_range()->exists() ||
3243 isolate_->code_range()->contains(code->address()));
3244 new_code->Relocate(new_addr - old_addr);
3247 if (FLAG_verify_heap) {
3255 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3256 ASSERT(gc_state_ == NOT_IN_GC);
3257 ASSERT(map->instance_type() != MAP_TYPE);
3258 // If allocation failures are disallowed, we may allocate in a different
3259 // space when new space is full and the object is not a large object.
3260 AllocationSpace retry_space =
3261 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3263 { MaybeObject* maybe_result =
3264 AllocateRaw(map->instance_size(), space, retry_space);
3265 if (!maybe_result->ToObject(&result)) return maybe_result;
3267 HeapObject::cast(result)->set_map(map);
3272 void Heap::InitializeFunction(JSFunction* function,
3273 SharedFunctionInfo* shared,
3274 Object* prototype) {
3275 ASSERT(!prototype->IsMap());
3276 function->initialize_properties();
3277 function->initialize_elements();
3278 function->set_shared(shared);
3279 function->set_code(shared->code());
3280 function->set_prototype_or_initial_map(prototype);
3281 function->set_context(undefined_value());
3282 function->set_literals_or_bindings(empty_fixed_array());
3283 function->set_next_function_link(undefined_value());
3287 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3288 // Allocate the prototype. Make sure to use the object function
3289 // from the function's context, since the function can be from a
3290 // different context.
3291 JSFunction* object_function =
3292 function->context()->global_context()->object_function();
3294 // Each function prototype gets a copy of the object function map.
3295 // This avoid unwanted sharing of maps between prototypes of different
3298 ASSERT(object_function->has_initial_map());
3299 { MaybeObject* maybe_map =
3300 object_function->initial_map()->CopyDropTransitions();
3301 if (!maybe_map->To<Map>(&new_map)) return maybe_map;
3304 { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3305 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3307 // When creating the prototype for the function we must set its
3308 // constructor to the function.
3310 { MaybeObject* maybe_result =
3311 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3312 constructor_symbol(), function, DONT_ENUM);
3313 if (!maybe_result->ToObject(&result)) return maybe_result;
3319 MaybeObject* Heap::AllocateFunction(Map* function_map,
3320 SharedFunctionInfo* shared,
3322 PretenureFlag pretenure) {
3323 AllocationSpace space =
3324 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3326 { MaybeObject* maybe_result = Allocate(function_map, space);
3327 if (!maybe_result->ToObject(&result)) return maybe_result;
3329 InitializeFunction(JSFunction::cast(result), shared, prototype);
3334 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3335 // To get fast allocation and map sharing for arguments objects we
3336 // allocate them based on an arguments boilerplate.
3338 JSObject* boilerplate;
3339 int arguments_object_size;
3340 bool strict_mode_callee = callee->IsJSFunction() &&
3341 JSFunction::cast(callee)->shared()->strict_mode();
3342 if (strict_mode_callee) {
3344 isolate()->context()->global_context()->
3345 strict_mode_arguments_boilerplate();
3346 arguments_object_size = kArgumentsObjectSizeStrict;
3349 isolate()->context()->global_context()->arguments_boilerplate();
3350 arguments_object_size = kArgumentsObjectSize;
3353 // This calls Copy directly rather than using Heap::AllocateRaw so we
3354 // duplicate the check here.
3355 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3357 // Check that the size of the boilerplate matches our
3358 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3359 // on the size being a known constant.
3360 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3362 // Do the allocation.
3364 { MaybeObject* maybe_result =
3365 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3366 if (!maybe_result->ToObject(&result)) return maybe_result;
3369 // Copy the content. The arguments boilerplate doesn't have any
3370 // fields that point to new space so it's safe to skip the write
3372 CopyBlock(HeapObject::cast(result)->address(),
3373 boilerplate->address(),
3374 JSObject::kHeaderSize);
3376 // Set the length property.
3377 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3378 Smi::FromInt(length),
3379 SKIP_WRITE_BARRIER);
3380 // Set the callee property for non-strict mode arguments object only.
3381 if (!strict_mode_callee) {
3382 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3386 // Check the state of the object
3387 ASSERT(JSObject::cast(result)->HasFastProperties());
3388 ASSERT(JSObject::cast(result)->HasFastElements());
3394 static bool HasDuplicates(DescriptorArray* descriptors) {
3395 int count = descriptors->number_of_descriptors();
3397 String* prev_key = descriptors->GetKey(0);
3398 for (int i = 1; i != count; i++) {
3399 String* current_key = descriptors->GetKey(i);
3400 if (prev_key == current_key) return true;
3401 prev_key = current_key;
3408 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3409 ASSERT(!fun->has_initial_map());
3411 // First create a new map with the size and number of in-object properties
3412 // suggested by the function.
3413 int instance_size = fun->shared()->CalculateInstanceSize();
3414 int in_object_properties = fun->shared()->CalculateInObjectProperties();
3416 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
3417 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3420 // Fetch or allocate prototype.
3422 if (fun->has_instance_prototype()) {
3423 prototype = fun->instance_prototype();
3425 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3426 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3429 Map* map = Map::cast(map_obj);
3430 map->set_inobject_properties(in_object_properties);
3431 map->set_unused_property_fields(in_object_properties);
3432 map->set_prototype(prototype);
3433 ASSERT(map->has_fast_elements());
3435 // If the function has only simple this property assignments add
3436 // field descriptors for these to the initial map as the object
3437 // cannot be constructed without having these properties. Guard by
3438 // the inline_new flag so we only change the map if we generate a
3439 // specialized construct stub.
3440 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3441 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3442 int count = fun->shared()->this_property_assignments_count();
3443 if (count > in_object_properties) {
3444 // Inline constructor can only handle inobject properties.
3445 fun->shared()->ForbidInlineConstructor();
3447 DescriptorArray* descriptors;
3448 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3449 if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
3450 return maybe_descriptors_obj;
3453 DescriptorArray::WhitenessWitness witness(descriptors);
3454 for (int i = 0; i < count; i++) {
3455 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3456 ASSERT(name->IsSymbol());
3457 FieldDescriptor field(name, i, NONE);
3458 field.SetEnumerationIndex(i);
3459 descriptors->Set(i, &field, witness);
3461 descriptors->SetNextEnumerationIndex(count);
3462 descriptors->SortUnchecked(witness);
3464 // The descriptors may contain duplicates because the compiler does not
3465 // guarantee the uniqueness of property names (it would have required
3466 // quadratic time). Once the descriptors are sorted we can check for
3467 // duplicates in linear time.
3468 if (HasDuplicates(descriptors)) {
3469 fun->shared()->ForbidInlineConstructor();
3471 map->set_instance_descriptors(descriptors);
3472 map->set_pre_allocated_property_fields(count);
3473 map->set_unused_property_fields(in_object_properties - count);
3478 fun->shared()->StartInobjectSlackTracking(map);
3484 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3485 FixedArray* properties,
3487 obj->set_properties(properties);
3488 obj->initialize_elements();
3489 // TODO(1240798): Initialize the object's body using valid initial values
3490 // according to the object's initial map. For example, if the map's
3491 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3492 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3493 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3494 // verification code has to cope with (temporarily) invalid objects. See
3495 // for example, JSArray::JSArrayVerify).
3497 // We cannot always fill with one_pointer_filler_map because objects
3498 // created from API functions expect their internal fields to be initialized
3499 // with undefined_value.
3500 // Pre-allocated fields need to be initialized with undefined_value as well
3501 // so that object accesses before the constructor completes (e.g. in the
3502 // debugger) will not cause a crash.
3503 if (map->constructor()->IsJSFunction() &&
3504 JSFunction::cast(map->constructor())->shared()->
3505 IsInobjectSlackTrackingInProgress()) {
3506 // We might want to shrink the object later.
3507 ASSERT(obj->GetInternalFieldCount() == 0);
3508 filler = Heap::one_pointer_filler_map();
3510 filler = Heap::undefined_value();
3512 obj->InitializeBody(map, Heap::undefined_value(), filler);
3516 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3517 // JSFunctions should be allocated using AllocateFunction to be
3518 // properly initialized.
3519 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3521 // Both types of global objects should be allocated using
3522 // AllocateGlobalObject to be properly initialized.
3523 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3524 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3526 // Allocate the backing storage for the properties.
3528 map->pre_allocated_property_fields() +
3529 map->unused_property_fields() -
3530 map->inobject_properties();
3531 ASSERT(prop_size >= 0);
3533 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3534 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3537 // Allocate the JSObject.
3538 AllocationSpace space =
3539 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3540 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3542 { MaybeObject* maybe_obj = Allocate(map, space);
3543 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3546 // Initialize the JSObject.
3547 InitializeJSObjectFromMap(JSObject::cast(obj),
3548 FixedArray::cast(properties),
3550 ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
3551 JSObject::cast(obj)->HasFastElements());
3556 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3557 PretenureFlag pretenure) {
3558 // Allocate the initial map if absent.
3559 if (!constructor->has_initial_map()) {
3560 Object* initial_map;
3561 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3562 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3564 constructor->set_initial_map(Map::cast(initial_map));
3565 Map::cast(initial_map)->set_constructor(constructor);
3567 // Allocate the object based on the constructors initial map.
3568 MaybeObject* result =
3569 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
3571 // Make sure result is NOT a global object if valid.
3572 Object* non_failure;
3573 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3579 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3581 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3582 // maps. Will probably depend on the identity of the handler object, too.
3584 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3585 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3586 map->set_prototype(prototype);
3588 // Allocate the proxy object.
3590 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3591 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
3592 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3593 result->set_handler(handler);
3594 result->set_hash(undefined_value());
3599 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
3601 Object* construct_trap,
3602 Object* prototype) {
3604 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3605 // maps. Will probably depend on the identity of the handler object, too.
3607 MaybeObject* maybe_map_obj =
3608 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
3609 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3610 map->set_prototype(prototype);
3612 // Allocate the proxy object.
3613 JSFunctionProxy* result;
3614 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3615 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
3616 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3617 result->set_handler(handler);
3618 result->set_hash(undefined_value());
3619 result->set_call_trap(call_trap);
3620 result->set_construct_trap(construct_trap);
3625 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
3626 ASSERT(constructor->has_initial_map());
3627 Map* map = constructor->initial_map();
3629 // Make sure no field properties are described in the initial map.
3630 // This guarantees us that normalizing the properties does not
3631 // require us to change property values to JSGlobalPropertyCells.
3632 ASSERT(map->NextFreePropertyIndex() == 0);
3634 // Make sure we don't have a ton of pre-allocated slots in the
3635 // global objects. They will be unused once we normalize the object.
3636 ASSERT(map->unused_property_fields() == 0);
3637 ASSERT(map->inobject_properties() == 0);
3639 // Initial size of the backing store to avoid resize of the storage during
3640 // bootstrapping. The size differs between the JS global object ad the
3642 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3644 // Allocate a dictionary object for backing storage.
3646 { MaybeObject* maybe_obj =
3647 StringDictionary::Allocate(
3648 map->NumberOfDescribedProperties() * 2 + initial_size);
3649 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3651 StringDictionary* dictionary = StringDictionary::cast(obj);
3653 // The global object might be created from an object template with accessors.
3654 // Fill these accessors into the dictionary.
3655 DescriptorArray* descs = map->instance_descriptors();
3656 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3657 PropertyDetails details(descs->GetDetails(i));
3658 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3660 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3661 Object* value = descs->GetCallbacksObject(i);
3662 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
3663 if (!maybe_value->ToObject(&value)) return maybe_value;
3667 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3668 if (!maybe_result->ToObject(&result)) return maybe_result;
3670 dictionary = StringDictionary::cast(result);
3673 // Allocate the global object and initialize it with the backing store.
3674 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3675 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3677 JSObject* global = JSObject::cast(obj);
3678 InitializeJSObjectFromMap(global, dictionary, map);
3680 // Create a new map for the global object.
3681 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3682 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3684 Map* new_map = Map::cast(obj);
3686 // Setup the global object as a normalized object.
3687 global->set_map(new_map);
3688 global->map()->clear_instance_descriptors();
3689 global->set_properties(dictionary);
3691 // Make sure result is a global object with properties in dictionary.
3692 ASSERT(global->IsGlobalObject());
3693 ASSERT(!global->HasFastProperties());
3698 MaybeObject* Heap::CopyJSObject(JSObject* source) {
3699 // Never used to copy functions. If functions need to be copied we
3700 // have to be careful to clear the literals array.
3701 SLOW_ASSERT(!source->IsJSFunction());
3704 Map* map = source->map();
3705 int object_size = map->instance_size();
3708 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3710 // If we're forced to always allocate, we use the general allocation
3711 // functions which may leave us with an object in old space.
3712 if (always_allocate()) {
3713 { MaybeObject* maybe_clone =
3714 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3715 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3717 Address clone_address = HeapObject::cast(clone)->address();
3718 CopyBlock(clone_address,
3721 // Update write barrier for all fields that lie beyond the header.
3722 RecordWrites(clone_address,
3723 JSObject::kHeaderSize,
3724 (object_size - JSObject::kHeaderSize) / kPointerSize);
3726 wb_mode = SKIP_WRITE_BARRIER;
3727 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3728 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3730 SLOW_ASSERT(InNewSpace(clone));
3731 // Since we know the clone is allocated in new space, we can copy
3732 // the contents without worrying about updating the write barrier.
3733 CopyBlock(HeapObject::cast(clone)->address(),
3739 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3740 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3741 FixedArray* properties = FixedArray::cast(source->properties());
3742 // Update elements if necessary.
3743 if (elements->length() > 0) {
3745 { MaybeObject* maybe_elem;
3746 if (elements->map() == fixed_cow_array_map()) {
3747 maybe_elem = FixedArray::cast(elements);
3748 } else if (source->HasFastDoubleElements()) {
3749 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3751 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
3753 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3755 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
3757 // Update properties if necessary.
3758 if (properties->length() > 0) {
3760 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3761 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3763 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
3765 // Return the new clone.
3770 MaybeObject* Heap::ReinitializeJSReceiver(
3771 JSReceiver* object, InstanceType type, int size) {
3772 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
3774 // Allocate fresh map.
3775 // TODO(rossberg): Once we optimize proxies, cache these maps.
3777 MaybeObject* maybe = AllocateMap(type, size);
3778 if (!maybe->To<Map>(&map)) return maybe;
3780 // Check that the receiver has at least the size of the fresh object.
3781 int size_difference = object->map()->instance_size() - map->instance_size();
3782 ASSERT(size_difference >= 0);
3784 map->set_prototype(object->map()->prototype());
3786 // Allocate the backing storage for the properties.
3787 int prop_size = map->unused_property_fields() - map->inobject_properties();
3789 maybe = AllocateFixedArray(prop_size, TENURED);
3790 if (!maybe->ToObject(&properties)) return maybe;
3792 // Functions require some allocation, which might fail here.
3793 SharedFunctionInfo* shared = NULL;
3794 if (type == JS_FUNCTION_TYPE) {
3796 maybe = LookupAsciiSymbol("<freezing call trap>");
3797 if (!maybe->To<String>(&name)) return maybe;
3798 maybe = AllocateSharedFunctionInfo(name);
3799 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
3802 // Because of possible retries of this function after failure,
3803 // we must NOT fail after this point, where we have changed the type!
3805 // Reset the map for the object.
3806 object->set_map(map);
3807 JSObject* jsobj = JSObject::cast(object);
3809 // Reinitialize the object from the constructor map.
3810 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
3812 // Functions require some minimal initialization.
3813 if (type == JS_FUNCTION_TYPE) {
3814 map->set_function_with_prototype(true);
3815 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3816 JSFunction::cast(object)->set_context(
3817 isolate()->context()->global_context());
3820 // Put in filler if the new object is smaller than the old.
3821 if (size_difference > 0) {
3822 CreateFillerObjectAt(
3823 object->address() + map->instance_size(), size_difference);
3830 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3831 JSGlobalProxy* object) {
3832 ASSERT(constructor->has_initial_map());
3833 Map* map = constructor->initial_map();
3835 // Check that the already allocated object has the same size and type as
3836 // objects allocated using the constructor.
3837 ASSERT(map->instance_size() == object->map()->instance_size());
3838 ASSERT(map->instance_type() == object->map()->instance_type());
3840 // Allocate the backing storage for the properties.
3841 int prop_size = map->unused_property_fields() - map->inobject_properties();
3843 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3844 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3847 // Reset the map for the object.
3848 object->set_map(constructor->initial_map());
3850 // Reinitialize the object from the constructor map.
3851 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3856 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3857 PretenureFlag pretenure) {
3858 if (string.length() == 1) {
3859 return Heap::LookupSingleCharacterStringFromCode(string[0]);
3862 { MaybeObject* maybe_result =
3863 AllocateRawAsciiString(string.length(), pretenure);
3864 if (!maybe_result->ToObject(&result)) return maybe_result;
3867 // Copy the characters into the new object.
3868 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3869 for (int i = 0; i < string.length(); i++) {
3870 string_result->SeqAsciiStringSet(i, string[i]);
3876 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3877 PretenureFlag pretenure) {
3878 // V8 only supports characters in the Basic Multilingual Plane.
3879 const uc32 kMaxSupportedChar = 0xFFFF;
3880 // Count the number of characters in the UTF-8 string and check if
3881 // it is an ASCII string.
3882 Access<UnicodeCache::Utf8Decoder>
3883 decoder(isolate_->unicode_cache()->utf8_decoder());
3884 decoder->Reset(string.start(), string.length());
3886 while (decoder->has_more()) {
3892 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3893 if (!maybe_result->ToObject(&result)) return maybe_result;
3896 // Convert and copy the characters into the new object.
3897 String* string_result = String::cast(result);
3898 decoder->Reset(string.start(), string.length());
3899 for (int i = 0; i < chars; i++) {
3900 uc32 r = decoder->GetNext();
3901 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
3902 string_result->Set(i, r);
3908 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3909 PretenureFlag pretenure) {
3910 // Check if the string is an ASCII string.
3911 MaybeObject* maybe_result;
3912 if (String::IsAscii(string.start(), string.length())) {
3913 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
3914 } else { // It's not an ASCII string.
3915 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
3918 if (!maybe_result->ToObject(&result)) return maybe_result;
3920 // Copy the characters into the new object, which may be either ASCII or
3922 String* string_result = String::cast(result);
3923 for (int i = 0; i < string.length(); i++) {
3924 string_result->Set(i, string[i]);
3930 Map* Heap::SymbolMapForString(String* string) {
3931 // If the string is in new space it cannot be used as a symbol.
3932 if (InNewSpace(string)) return NULL;
3934 // Find the corresponding symbol map for strings.
3935 Map* map = string->map();
3936 if (map == ascii_string_map()) {
3937 return ascii_symbol_map();
3939 if (map == string_map()) {
3940 return symbol_map();
3942 if (map == cons_string_map()) {
3943 return cons_symbol_map();
3945 if (map == cons_ascii_string_map()) {
3946 return cons_ascii_symbol_map();
3948 if (map == external_string_map()) {
3949 return external_symbol_map();
3951 if (map == external_ascii_string_map()) {
3952 return external_ascii_symbol_map();
3954 if (map == external_string_with_ascii_data_map()) {
3955 return external_symbol_with_ascii_data_map();
3963 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3965 uint32_t hash_field) {
3967 // Ensure the chars matches the number of characters in the buffer.
3968 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3969 // Determine whether the string is ascii.
3970 bool is_ascii = true;
3971 while (buffer->has_more()) {
3972 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3979 // Compute map and object size.
3984 if (chars > SeqAsciiString::kMaxLength) {
3985 return Failure::OutOfMemoryException();
3987 map = ascii_symbol_map();
3988 size = SeqAsciiString::SizeFor(chars);
3990 if (chars > SeqTwoByteString::kMaxLength) {
3991 return Failure::OutOfMemoryException();
3994 size = SeqTwoByteString::SizeFor(chars);
3999 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
4000 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4001 : old_data_space_->AllocateRaw(size);
4002 if (!maybe_result->ToObject(&result)) return maybe_result;
4005 reinterpret_cast<HeapObject*>(result)->set_map(map);
4006 // Set length and hash fields of the allocated string.
4007 String* answer = String::cast(result);
4008 answer->set_length(chars);
4009 answer->set_hash_field(hash_field);
4010 SeqString::cast(answer)->set_symbol_id(0);
4012 ASSERT_EQ(size, answer->Size());
4014 // Fill in the characters.
4015 for (int i = 0; i < chars; i++) {
4016 answer->Set(i, buffer->GetNext());
4022 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4023 if (length < 0 || length > SeqAsciiString::kMaxLength) {
4024 return Failure::OutOfMemoryException();
4027 int size = SeqAsciiString::SizeFor(length);
4028 ASSERT(size <= SeqAsciiString::kMaxSize);
4030 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4031 AllocationSpace retry_space = OLD_DATA_SPACE;
4033 if (space == NEW_SPACE) {
4034 if (size > kMaxObjectSizeInNewSpace) {
4035 // Allocate in large object space, retry space will be ignored.
4037 } else if (size > MaxObjectSizeInPagedSpace()) {
4038 // Allocate in new space, retry in large object space.
4039 retry_space = LO_SPACE;
4041 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4045 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4046 if (!maybe_result->ToObject(&result)) return maybe_result;
4049 // Partially initialize the object.
4050 HeapObject::cast(result)->set_map(ascii_string_map());
4051 String::cast(result)->set_length(length);
4052 String::cast(result)->set_hash_field(String::kEmptyHashField);
4053 SeqString::cast(result)->set_symbol_id(0);
4054 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4059 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4060 PretenureFlag pretenure) {
4061 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4062 return Failure::OutOfMemoryException();
4064 int size = SeqTwoByteString::SizeFor(length);
4065 ASSERT(size <= SeqTwoByteString::kMaxSize);
4066 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4067 AllocationSpace retry_space = OLD_DATA_SPACE;
4069 if (space == NEW_SPACE) {
4070 if (size > kMaxObjectSizeInNewSpace) {
4071 // Allocate in large object space, retry space will be ignored.
4073 } else if (size > MaxObjectSizeInPagedSpace()) {
4074 // Allocate in new space, retry in large object space.
4075 retry_space = LO_SPACE;
4077 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4081 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4082 if (!maybe_result->ToObject(&result)) return maybe_result;
4085 // Partially initialize the object.
4086 HeapObject::cast(result)->set_map(string_map());
4087 String::cast(result)->set_length(length);
4088 String::cast(result)->set_hash_field(String::kEmptyHashField);
4089 SeqString::cast(result)->set_symbol_id(0);
4090 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4095 MaybeObject* Heap::AllocateEmptyFixedArray() {
4096 int size = FixedArray::SizeFor(0);
4098 { MaybeObject* maybe_result =
4099 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4100 if (!maybe_result->ToObject(&result)) return maybe_result;
4102 // Initialize the object.
4103 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
4104 reinterpret_cast<FixedArray*>(result)->set_length(0);
4109 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4110 if (length < 0 || length > FixedArray::kMaxLength) {
4111 return Failure::OutOfMemoryException();
4114 // Use the general function if we're forced to always allocate.
4115 if (always_allocate()) return AllocateFixedArray(length, TENURED);
4116 // Allocate the raw data for a fixed array.
4117 int size = FixedArray::SizeFor(length);
4118 return size <= kMaxObjectSizeInNewSpace
4119 ? new_space_.AllocateRaw(size)
4120 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4124 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4125 int len = src->length();
4127 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4128 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4130 if (InNewSpace(obj)) {
4131 HeapObject* dst = HeapObject::cast(obj);
4133 CopyBlock(dst->address() + kPointerSize,
4134 src->address() + kPointerSize,
4135 FixedArray::SizeFor(len) - kPointerSize);
4138 HeapObject::cast(obj)->set_map(map);
4139 FixedArray* result = FixedArray::cast(obj);
4140 result->set_length(len);
4143 AssertNoAllocation no_gc;
4144 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4145 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4150 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4152 int len = src->length();
4154 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4155 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4157 HeapObject* dst = HeapObject::cast(obj);
4160 dst->address() + FixedDoubleArray::kLengthOffset,
4161 src->address() + FixedDoubleArray::kLengthOffset,
4162 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4167 MaybeObject* Heap::AllocateFixedArray(int length) {
4168 ASSERT(length >= 0);
4169 if (length == 0) return empty_fixed_array();
4171 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4172 if (!maybe_result->ToObject(&result)) return maybe_result;
4174 // Initialize header.
4175 FixedArray* array = reinterpret_cast<FixedArray*>(result);
4176 array->set_map(fixed_array_map());
4177 array->set_length(length);
4179 ASSERT(!InNewSpace(undefined_value()));
4180 MemsetPointer(array->data_start(), undefined_value(), length);
4185 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4186 if (length < 0 || length > FixedArray::kMaxLength) {
4187 return Failure::OutOfMemoryException();
4190 AllocationSpace space =
4191 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4192 int size = FixedArray::SizeFor(length);
4193 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4194 // Too big for new space.
4196 } else if (space == OLD_POINTER_SPACE &&
4197 size > MaxObjectSizeInPagedSpace()) {
4198 // Too big for old pointer space.
4202 AllocationSpace retry_space =
4203 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
4205 return AllocateRaw(size, space, retry_space);
4209 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4212 PretenureFlag pretenure,
4214 ASSERT(length >= 0);
4215 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4216 if (length == 0) return heap->empty_fixed_array();
4218 ASSERT(!heap->InNewSpace(filler));
4220 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4221 if (!maybe_result->ToObject(&result)) return maybe_result;
4224 HeapObject::cast(result)->set_map(heap->fixed_array_map());
4225 FixedArray* array = FixedArray::cast(result);
4226 array->set_length(length);
4227 MemsetPointer(array->data_start(), filler, length);
4232 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4233 return AllocateFixedArrayWithFiller(this,
4240 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4241 PretenureFlag pretenure) {
4242 return AllocateFixedArrayWithFiller(this,
4249 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4250 if (length == 0) return empty_fixed_array();
4253 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4254 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4257 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
4258 FixedArray::cast(obj)->set_length(length);
4263 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4264 int size = FixedDoubleArray::SizeFor(0);
4266 { MaybeObject* maybe_result =
4267 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4268 if (!maybe_result->ToObject(&result)) return maybe_result;
4270 // Initialize the object.
4271 reinterpret_cast<FixedDoubleArray*>(result)->set_map(
4272 fixed_double_array_map());
4273 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4278 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4280 PretenureFlag pretenure) {
4281 if (length == 0) return empty_fixed_double_array();
4284 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4285 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4288 reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
4289 FixedDoubleArray::cast(obj)->set_length(length);
4294 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4295 PretenureFlag pretenure) {
4296 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4297 return Failure::OutOfMemoryException();
4300 AllocationSpace space =
4301 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4302 int size = FixedDoubleArray::SizeFor(length);
4303 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4304 // Too big for new space.
4306 } else if (space == OLD_DATA_SPACE &&
4307 size > MaxObjectSizeInPagedSpace()) {
4308 // Too big for old data space.
4312 AllocationSpace retry_space =
4313 (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
4315 return AllocateRaw(size, space, retry_space);
4319 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4321 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4322 if (!maybe_result->ToObject(&result)) return maybe_result;
4324 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
4325 ASSERT(result->IsHashTable());
4330 MaybeObject* Heap::AllocateGlobalContext() {
4332 { MaybeObject* maybe_result =
4333 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
4334 if (!maybe_result->ToObject(&result)) return maybe_result;
4336 Context* context = reinterpret_cast<Context*>(result);
4337 context->set_map(global_context_map());
4338 ASSERT(context->IsGlobalContext());
4339 ASSERT(result->IsContext());
4344 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
4345 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
4347 { MaybeObject* maybe_result = AllocateFixedArray(length);
4348 if (!maybe_result->ToObject(&result)) return maybe_result;
4350 Context* context = reinterpret_cast<Context*>(result);
4351 context->set_map(function_context_map());
4352 context->set_closure(function);
4353 context->set_previous(function->context());
4354 context->set_extension(NULL);
4355 context->set_global(function->context()->global());
4360 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4363 Object* thrown_object) {
4364 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4366 { MaybeObject* maybe_result =
4367 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4368 if (!maybe_result->ToObject(&result)) return maybe_result;
4370 Context* context = reinterpret_cast<Context*>(result);
4371 context->set_map(catch_context_map());
4372 context->set_closure(function);
4373 context->set_previous(previous);
4374 context->set_extension(name);
4375 context->set_global(previous->global());
4376 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4381 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4383 JSObject* extension) {
4385 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
4386 if (!maybe_result->ToObject(&result)) return maybe_result;
4388 Context* context = reinterpret_cast<Context*>(result);
4389 context->set_map(with_context_map());
4390 context->set_closure(function);
4391 context->set_previous(previous);
4392 context->set_extension(extension);
4393 context->set_global(previous->global());
4398 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4400 SerializedScopeInfo* scope_info) {
4402 { MaybeObject* maybe_result =
4403 AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
4404 if (!maybe_result->ToObject(&result)) return maybe_result;
4406 Context* context = reinterpret_cast<Context*>(result);
4407 context->set_map(block_context_map());
4408 context->set_closure(function);
4409 context->set_previous(previous);
4410 context->set_extension(scope_info);
4411 context->set_global(previous->global());
4416 MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
4418 { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
4419 if (!maybe_result->ToObject(&result)) return maybe_result;
4421 SerializedScopeInfo* scope_info =
4422 reinterpret_cast<SerializedScopeInfo*>(result);
4423 scope_info->set_map(serialized_scope_info_map());
4428 MaybeObject* Heap::AllocateStruct(InstanceType type) {
4431 #define MAKE_CASE(NAME, Name, name) \
4432 case NAME##_TYPE: map = name##_map(); break;
4433 STRUCT_LIST(MAKE_CASE)
4437 return Failure::InternalError();
4439 int size = map->instance_size();
4440 AllocationSpace space =
4441 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
4443 { MaybeObject* maybe_result = Allocate(map, space);
4444 if (!maybe_result->ToObject(&result)) return maybe_result;
4446 Struct::cast(result)->InitializeBody(size);
4451 bool Heap::IsHeapIterable() {
4452 return (!old_pointer_space()->was_swept_conservatively() &&
4453 !old_data_space()->was_swept_conservatively());
4457 void Heap::EnsureHeapIsIterable() {
4458 ASSERT(IsAllocationAllowed());
4459 if (!IsHeapIterable()) {
4460 CollectAllGarbage(kMakeHeapIterableMask);
4462 ASSERT(IsHeapIterable());
4466 bool Heap::IdleNotification() {
4467 static const int kIdlesBeforeScavenge = 4;
4468 static const int kIdlesBeforeMarkSweep = 7;
4469 static const int kIdlesBeforeMarkCompact = 8;
4470 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4471 static const unsigned int kGCsBetweenCleanup = 4;
4473 if (!last_idle_notification_gc_count_init_) {
4474 last_idle_notification_gc_count_ = gc_count_;
4475 last_idle_notification_gc_count_init_ = true;
4478 bool uncommit = true;
4479 bool finished = false;
4481 // Reset the number of idle notifications received when a number of
4482 // GCs have taken place. This allows another round of cleanup based
4483 // on idle notifications if enough work has been carried out to
4484 // provoke a number of garbage collections.
4485 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4486 number_idle_notifications_ =
4487 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4489 number_idle_notifications_ = 0;
4490 last_idle_notification_gc_count_ = gc_count_;
4493 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4494 if (contexts_disposed_ > 0) {
4495 HistogramTimerScope scope(isolate_->counters()->gc_context());
4496 CollectAllGarbage(kNoGCFlags);
4498 CollectGarbage(NEW_SPACE);
4500 new_space_.Shrink();
4501 last_idle_notification_gc_count_ = gc_count_;
4502 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4503 // Before doing the mark-sweep collections we clear the
4504 // compilation cache to avoid hanging on to source code and
4505 // generated code for cached functions.
4506 isolate_->compilation_cache()->Clear();
4508 CollectAllGarbage(kNoGCFlags);
4509 new_space_.Shrink();
4510 last_idle_notification_gc_count_ = gc_count_;
4512 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4513 CollectAllGarbage(kNoGCFlags);
4514 new_space_.Shrink();
4515 last_idle_notification_gc_count_ = gc_count_;
4516 number_idle_notifications_ = 0;
4518 } else if (contexts_disposed_ > 0) {
4519 if (FLAG_expose_gc) {
4520 contexts_disposed_ = 0;
4522 HistogramTimerScope scope(isolate_->counters()->gc_context());
4523 CollectAllGarbage(kNoGCFlags);
4524 last_idle_notification_gc_count_ = gc_count_;
4526 // If this is the first idle notification, we reset the
4527 // notification count to avoid letting idle notifications for
4528 // context disposal garbage collections start a potentially too
4529 // aggressive idle GC cycle.
4530 if (number_idle_notifications_ <= 1) {
4531 number_idle_notifications_ = 0;
4534 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4535 // If we have received more than kIdlesBeforeMarkCompact idle
4536 // notifications we do not perform any cleanup because we don't
4537 // expect to gain much by doing so.
4541 // Make sure that we have no pending context disposals and
4542 // conditionally uncommit from space.
4543 // Take into account that we might have decided to delay full collection
4544 // because incremental marking is in progress.
4545 ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
4546 if (uncommit) UncommitFromSpace();
4554 void Heap::Print() {
4555 if (!HasBeenSetup()) return;
4556 isolate()->PrintStack();
4558 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4563 void Heap::ReportCodeStatistics(const char* title) {
4564 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4565 PagedSpace::ResetCodeStatistics();
4566 // We do not look for code in new space, map space, or old space. If code
4567 // somehow ends up in those spaces, we would miss it here.
4568 code_space_->CollectCodeStatistics();
4569 lo_space_->CollectCodeStatistics();
4570 PagedSpace::ReportCodeStatistics();
4574 // This function expects that NewSpace's allocated objects histogram is
4575 // populated (via a call to CollectStatistics or else as a side effect of a
4576 // just-completed scavenge collection).
4577 void Heap::ReportHeapStatistics(const char* title) {
4579 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4581 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4582 old_gen_promotion_limit_);
4583 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4584 old_gen_allocation_limit_);
4585 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
4588 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
4589 isolate_->global_handles()->PrintStats();
4592 PrintF("Heap statistics : ");
4593 isolate_->memory_allocator()->ReportStatistics();
4594 PrintF("To space : ");
4595 new_space_.ReportStatistics();
4596 PrintF("Old pointer space : ");
4597 old_pointer_space_->ReportStatistics();
4598 PrintF("Old data space : ");
4599 old_data_space_->ReportStatistics();
4600 PrintF("Code space : ");
4601 code_space_->ReportStatistics();
4602 PrintF("Map space : ");
4603 map_space_->ReportStatistics();
4604 PrintF("Cell space : ");
4605 cell_space_->ReportStatistics();
4606 PrintF("Large object space : ");
4607 lo_space_->ReportStatistics();
4608 PrintF(">>>>>> ========================================= >>>>>>\n");
4613 bool Heap::Contains(HeapObject* value) {
4614 return Contains(value->address());
4618 bool Heap::Contains(Address addr) {
4619 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4620 return HasBeenSetup() &&
4621 (new_space_.ToSpaceContains(addr) ||
4622 old_pointer_space_->Contains(addr) ||
4623 old_data_space_->Contains(addr) ||
4624 code_space_->Contains(addr) ||
4625 map_space_->Contains(addr) ||
4626 cell_space_->Contains(addr) ||
4627 lo_space_->SlowContains(addr));
4631 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4632 return InSpace(value->address(), space);
4636 bool Heap::InSpace(Address addr, AllocationSpace space) {
4637 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4638 if (!HasBeenSetup()) return false;
4642 return new_space_.ToSpaceContains(addr);
4643 case OLD_POINTER_SPACE:
4644 return old_pointer_space_->Contains(addr);
4645 case OLD_DATA_SPACE:
4646 return old_data_space_->Contains(addr);
4648 return code_space_->Contains(addr);
4650 return map_space_->Contains(addr);
4652 return cell_space_->Contains(addr);
4654 return lo_space_->SlowContains(addr);
4662 void Heap::Verify() {
4663 ASSERT(HasBeenSetup());
4665 store_buffer()->Verify();
4667 VerifyPointersVisitor visitor;
4668 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4670 new_space_.Verify();
4672 old_pointer_space_->Verify(&visitor);
4673 map_space_->Verify(&visitor);
4675 VerifyPointersVisitor no_dirty_regions_visitor;
4676 old_data_space_->Verify(&no_dirty_regions_visitor);
4677 code_space_->Verify(&no_dirty_regions_visitor);
4678 cell_space_->Verify(&no_dirty_regions_visitor);
4680 lo_space_->Verify();
4686 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4687 Object* symbol = NULL;
4689 { MaybeObject* maybe_new_table =
4690 symbol_table()->LookupSymbol(string, &symbol);
4691 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4693 // Can't use set_symbol_table because SymbolTable::cast knows that
4694 // SymbolTable is a singleton and checks for identity.
4695 roots_[kSymbolTableRootIndex] = new_table;
4696 ASSERT(symbol != NULL);
4701 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4702 Object* symbol = NULL;
4704 { MaybeObject* maybe_new_table =
4705 symbol_table()->LookupAsciiSymbol(string, &symbol);
4706 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4708 // Can't use set_symbol_table because SymbolTable::cast knows that
4709 // SymbolTable is a singleton and checks for identity.
4710 roots_[kSymbolTableRootIndex] = new_table;
4711 ASSERT(symbol != NULL);
4716 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4719 Object* symbol = NULL;
4721 { MaybeObject* maybe_new_table =
4722 symbol_table()->LookupSubStringAsciiSymbol(string,
4726 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4728 // Can't use set_symbol_table because SymbolTable::cast knows that
4729 // SymbolTable is a singleton and checks for identity.
4730 roots_[kSymbolTableRootIndex] = new_table;
4731 ASSERT(symbol != NULL);
4736 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4737 Object* symbol = NULL;
4739 { MaybeObject* maybe_new_table =
4740 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4741 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4743 // Can't use set_symbol_table because SymbolTable::cast knows that
4744 // SymbolTable is a singleton and checks for identity.
4745 roots_[kSymbolTableRootIndex] = new_table;
4746 ASSERT(symbol != NULL);
4751 MaybeObject* Heap::LookupSymbol(String* string) {
4752 if (string->IsSymbol()) return string;
4753 Object* symbol = NULL;
4755 { MaybeObject* maybe_new_table =
4756 symbol_table()->LookupString(string, &symbol);
4757 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4759 // Can't use set_symbol_table because SymbolTable::cast knows that
4760 // SymbolTable is a singleton and checks for identity.
4761 roots_[kSymbolTableRootIndex] = new_table;
4762 ASSERT(symbol != NULL);
4767 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4768 if (string->IsSymbol()) {
4772 return symbol_table()->LookupSymbolIfExists(string, symbol);
4777 void Heap::ZapFromSpace() {
4778 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4779 new_space_.FromSpaceEnd());
4780 while (it.has_next()) {
4781 NewSpacePage* page = it.next();
4782 for (Address cursor = page->body(), limit = page->body_limit();
4784 cursor += kPointerSize) {
4785 Memory::Address_at(cursor) = kFromSpaceZapValue;
4792 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4794 ObjectSlotCallback callback) {
4795 Address slot_address = start;
4797 // We are not collecting slots on new space objects during mutation
4798 // thus we have to scan for pointers to evacuation candidates when we
4799 // promote objects. But we should not record any slots in non-black
4800 // objects. Grey object's slots would be rescanned.
4801 // White object might not survive until the end of collection
4802 // it would be a violation of the invariant to record it's slots.
4803 bool record_slots = false;
4804 if (incremental_marking()->IsCompacting()) {
4805 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4806 record_slots = Marking::IsBlack(mark_bit);
4809 while (slot_address < end) {
4810 Object** slot = reinterpret_cast<Object**>(slot_address);
4811 Object* object = *slot;
4812 // If the store buffer becomes overfull we mark pages as being exempt from
4813 // the store buffer. These pages are scanned to find pointers that point
4814 // to the new space. In that case we may hit newly promoted objects and
4815 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4816 if (object->IsHeapObject()) {
4817 if (Heap::InFromSpace(object)) {
4818 callback(reinterpret_cast<HeapObject**>(slot),
4819 HeapObject::cast(object));
4820 Object* new_object = *slot;
4821 if (InNewSpace(new_object)) {
4822 SLOW_ASSERT(Heap::InToSpace(new_object));
4823 SLOW_ASSERT(new_object->IsHeapObject());
4824 store_buffer_.EnterDirectlyIntoStoreBuffer(
4825 reinterpret_cast<Address>(slot));
4827 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4828 } else if (record_slots &&
4829 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4830 mark_compact_collector()->RecordSlot(slot, slot, object);
4833 slot_address += kPointerSize;
4839 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4842 bool IsAMapPointerAddress(Object** addr) {
4843 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4844 int mod = a % Map::kSize;
4845 return mod >= Map::kPointerFieldsBeginOffset &&
4846 mod < Map::kPointerFieldsEndOffset;
4850 bool EverythingsAPointer(Object** addr) {
4855 static void CheckStoreBuffer(Heap* heap,
4858 Object**** store_buffer_position,
4859 Object*** store_buffer_top,
4860 CheckStoreBufferFilter filter,
4861 Address special_garbage_start,
4862 Address special_garbage_end) {
4863 Map* free_space_map = heap->free_space_map();
4864 for ( ; current < limit; current++) {
4865 Object* o = *current;
4866 Address current_address = reinterpret_cast<Address>(current);
4868 if (o == free_space_map) {
4869 Address current_address = reinterpret_cast<Address>(current);
4870 FreeSpace* free_space =
4871 FreeSpace::cast(HeapObject::FromAddress(current_address));
4872 int skip = free_space->Size();
4873 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4875 current_address += skip - kPointerSize;
4876 current = reinterpret_cast<Object**>(current_address);
4879 // Skip the current linear allocation space between top and limit which is
4880 // unmarked with the free space map, but can contain junk.
4881 if (current_address == special_garbage_start &&
4882 special_garbage_end != special_garbage_start) {
4883 current_address = special_garbage_end - kPointerSize;
4884 current = reinterpret_cast<Object**>(current_address);
4887 if (!(*filter)(current)) continue;
4888 ASSERT(current_address < special_garbage_start ||
4889 current_address >= special_garbage_end);
4890 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4891 // We have to check that the pointer does not point into new space
4892 // without trying to cast it to a heap object since the hash field of
4893 // a string can contain values like 1 and 3 which are tagged null
4895 if (!heap->InNewSpace(o)) continue;
4896 while (**store_buffer_position < current &&
4897 *store_buffer_position < store_buffer_top) {
4898 (*store_buffer_position)++;
4900 if (**store_buffer_position != current ||
4901 *store_buffer_position == store_buffer_top) {
4902 Object** obj_start = current;
4903 while (!(*obj_start)->IsMap()) obj_start--;
4910 // Check that the store buffer contains all intergenerational pointers by
4911 // scanning a page and ensuring that all pointers to young space are in the
4913 void Heap::OldPointerSpaceCheckStoreBuffer() {
4914 OldSpace* space = old_pointer_space();
4915 PageIterator pages(space);
4917 store_buffer()->SortUniq();
4919 while (pages.has_next()) {
4920 Page* page = pages.next();
4921 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4923 Address end = page->ObjectAreaEnd();
4925 Object*** store_buffer_position = store_buffer()->Start();
4926 Object*** store_buffer_top = store_buffer()->Top();
4928 Object** limit = reinterpret_cast<Object**>(end);
4929 CheckStoreBuffer(this,
4932 &store_buffer_position,
4934 &EverythingsAPointer,
4941 void Heap::MapSpaceCheckStoreBuffer() {
4942 MapSpace* space = map_space();
4943 PageIterator pages(space);
4945 store_buffer()->SortUniq();
4947 while (pages.has_next()) {
4948 Page* page = pages.next();
4949 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4951 Address end = page->ObjectAreaEnd();
4953 Object*** store_buffer_position = store_buffer()->Start();
4954 Object*** store_buffer_top = store_buffer()->Top();
4956 Object** limit = reinterpret_cast<Object**>(end);
4957 CheckStoreBuffer(this,
4960 &store_buffer_position,
4962 &IsAMapPointerAddress,
4969 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4970 LargeObjectIterator it(lo_space());
4971 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4972 // We only have code, sequential strings, or fixed arrays in large
4973 // object space, and only fixed arrays can possibly contain pointers to
4974 // the young generation.
4975 if (object->IsFixedArray()) {
4976 Object*** store_buffer_position = store_buffer()->Start();
4977 Object*** store_buffer_top = store_buffer()->Top();
4978 Object** current = reinterpret_cast<Object**>(object->address());
4980 reinterpret_cast<Object**>(object->address() + object->Size());
4981 CheckStoreBuffer(this,
4984 &store_buffer_position,
4986 &EverythingsAPointer,
4995 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4996 IterateStrongRoots(v, mode);
4997 IterateWeakRoots(v, mode);
5001 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5002 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5003 v->Synchronize("symbol_table");
5004 if (mode != VISIT_ALL_IN_SCAVENGE &&
5005 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5006 // Scavenge collections have special processing for this.
5007 external_string_table_.Iterate(v);
5009 v->Synchronize("external_string_table");
5013 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5014 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5015 v->Synchronize("strong_root_list");
5017 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5018 v->Synchronize("symbol");
5020 isolate_->bootstrapper()->Iterate(v);
5021 v->Synchronize("bootstrapper");
5022 isolate_->Iterate(v);
5023 v->Synchronize("top");
5024 Relocatable::Iterate(v);
5025 v->Synchronize("relocatable");
5027 #ifdef ENABLE_DEBUGGER_SUPPORT
5028 isolate_->debug()->Iterate(v);
5029 if (isolate_->deoptimizer_data() != NULL) {
5030 isolate_->deoptimizer_data()->Iterate(v);
5033 v->Synchronize("debug");
5034 isolate_->compilation_cache()->Iterate(v);
5035 v->Synchronize("compilationcache");
5037 // Iterate over local handles in handle scopes.
5038 isolate_->handle_scope_implementer()->Iterate(v);
5039 v->Synchronize("handlescope");
5041 // Iterate over the builtin code objects and code stubs in the
5042 // heap. Note that it is not necessary to iterate over code objects
5043 // on scavenge collections.
5044 if (mode != VISIT_ALL_IN_SCAVENGE) {
5045 isolate_->builtins()->IterateBuiltins(v);
5047 v->Synchronize("builtins");
5049 // Iterate over global handles.
5051 case VISIT_ONLY_STRONG:
5052 isolate_->global_handles()->IterateStrongRoots(v);
5054 case VISIT_ALL_IN_SCAVENGE:
5055 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5057 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5059 isolate_->global_handles()->IterateAllRoots(v);
5062 v->Synchronize("globalhandles");
5064 // Iterate over pointers being held by inactive threads.
5065 isolate_->thread_manager()->Iterate(v);
5066 v->Synchronize("threadmanager");
5068 // Iterate over the pointers the Serialization/Deserialization code is
5070 // During garbage collection this keeps the partial snapshot cache alive.
5071 // During deserialization of the startup snapshot this creates the partial
5072 // snapshot cache and deserializes the objects it refers to. During
5073 // serialization this does nothing, since the partial snapshot cache is
5074 // empty. However the next thing we do is create the partial snapshot,
5075 // filling up the partial snapshot cache with objects it needs as we go.
5076 SerializerDeserializer::Iterate(v);
5077 // We don't do a v->Synchronize call here, because in debug mode that will
5078 // output a flag to the snapshot. However at this point the serializer and
5079 // deserializer are deliberately a little unsynchronized (see above) so the
5080 // checking of the sync flag in the snapshot would fail.
5084 // TODO(1236194): Since the heap size is configurable on the command line
5085 // and through the API, we should gracefully handle the case that the heap
5086 // size is not big enough to fit all the initial objects.
5087 bool Heap::ConfigureHeap(int max_semispace_size,
5088 intptr_t max_old_gen_size,
5089 intptr_t max_executable_size) {
5090 if (HasBeenSetup()) return false;
5092 if (max_semispace_size > 0) {
5093 if (max_semispace_size < Page::kPageSize) {
5094 max_semispace_size = Page::kPageSize;
5095 if (FLAG_trace_gc) {
5096 PrintF("Max semispace size cannot be less than %dkbytes\n",
5097 Page::kPageSize >> 10);
5100 max_semispace_size_ = max_semispace_size;
5103 if (Snapshot::IsEnabled()) {
5104 // If we are using a snapshot we always reserve the default amount
5105 // of memory for each semispace because code in the snapshot has
5106 // write-barrier code that relies on the size and alignment of new
5107 // space. We therefore cannot use a larger max semispace size
5108 // than the default reserved semispace size.
5109 if (max_semispace_size_ > reserved_semispace_size_) {
5110 max_semispace_size_ = reserved_semispace_size_;
5111 if (FLAG_trace_gc) {
5112 PrintF("Max semispace size cannot be more than %dkbytes\n",
5113 reserved_semispace_size_ >> 10);
5117 // If we are not using snapshots we reserve space for the actual
5118 // max semispace size.
5119 reserved_semispace_size_ = max_semispace_size_;
5122 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5123 if (max_executable_size > 0) {
5124 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5127 // The max executable size must be less than or equal to the max old
5129 if (max_executable_size_ > max_old_generation_size_) {
5130 max_executable_size_ = max_old_generation_size_;
5133 // The new space size must be a power of two to support single-bit testing
5135 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5136 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5137 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5138 external_allocation_limit_ = 10 * max_semispace_size_;
5140 // The old generation is paged and needs at least one page for each space.
5141 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5142 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5144 RoundUp(max_old_generation_size_,
5152 bool Heap::ConfigureHeapDefault() {
5153 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5154 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5155 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5159 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5160 *stats->start_marker = HeapStats::kStartMarker;
5161 *stats->end_marker = HeapStats::kEndMarker;
5162 *stats->new_space_size = new_space_.SizeAsInt();
5163 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5164 *stats->old_pointer_space_size = old_pointer_space_->Size();
5165 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5166 *stats->old_data_space_size = old_data_space_->Size();
5167 *stats->old_data_space_capacity = old_data_space_->Capacity();
5168 *stats->code_space_size = code_space_->Size();
5169 *stats->code_space_capacity = code_space_->Capacity();
5170 *stats->map_space_size = map_space_->Size();
5171 *stats->map_space_capacity = map_space_->Capacity();
5172 *stats->cell_space_size = cell_space_->Size();
5173 *stats->cell_space_capacity = cell_space_->Capacity();
5174 *stats->lo_space_size = lo_space_->Size();
5175 isolate_->global_handles()->RecordStats(stats);
5176 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5177 *stats->memory_allocator_capacity =
5178 isolate()->memory_allocator()->Size() +
5179 isolate()->memory_allocator()->Available();
5180 *stats->os_error = OS::GetLastError();
5181 isolate()->memory_allocator()->Available();
5182 if (take_snapshot) {
5183 HeapIterator iterator;
5184 for (HeapObject* obj = iterator.next();
5186 obj = iterator.next()) {
5187 InstanceType type = obj->map()->instance_type();
5188 ASSERT(0 <= type && type <= LAST_TYPE);
5189 stats->objects_per_type[type]++;
5190 stats->size_per_type[type] += obj->Size();
5196 intptr_t Heap::PromotedSpaceSize() {
5197 return old_pointer_space_->Size()
5198 + old_data_space_->Size()
5199 + code_space_->Size()
5200 + map_space_->Size()
5201 + cell_space_->Size()
5202 + lo_space_->Size();
5206 int Heap::PromotedExternalMemorySize() {
5207 if (amount_of_external_allocated_memory_
5208 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5209 return amount_of_external_allocated_memory_
5210 - amount_of_external_allocated_memory_at_last_global_gc_;
5215 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5216 static const int kMarkTag = 2;
5219 class HeapDebugUtils {
5221 explicit HeapDebugUtils(Heap* heap)
5222 : search_for_any_global_(false),
5223 search_target_(NULL),
5224 found_target_(false),
5229 class MarkObjectVisitor : public ObjectVisitor {
5231 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5233 void VisitPointers(Object** start, Object** end) {
5234 // Copy all HeapObject pointers in [start, end)
5235 for (Object** p = start; p < end; p++) {
5236 if ((*p)->IsHeapObject())
5237 utils_->MarkObjectRecursively(p);
5241 HeapDebugUtils* utils_;
5244 void MarkObjectRecursively(Object** p) {
5245 if (!(*p)->IsHeapObject()) return;
5247 HeapObject* obj = HeapObject::cast(*p);
5249 Object* map = obj->map();
5251 if (!map->IsHeapObject()) return; // visited before
5253 if (found_target_) return; // stop if target found
5254 object_stack_.Add(obj);
5255 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5256 (!search_for_any_global_ && (obj == search_target_))) {
5257 found_target_ = true;
5262 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5264 Address map_addr = map_p->address();
5266 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5268 MarkObjectRecursively(&map);
5270 MarkObjectVisitor mark_visitor(this);
5272 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5275 if (!found_target_) // don't pop if found the target
5276 object_stack_.RemoveLast();
5280 class UnmarkObjectVisitor : public ObjectVisitor {
5282 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5284 void VisitPointers(Object** start, Object** end) {
5285 // Copy all HeapObject pointers in [start, end)
5286 for (Object** p = start; p < end; p++) {
5287 if ((*p)->IsHeapObject())
5288 utils_->UnmarkObjectRecursively(p);
5292 HeapDebugUtils* utils_;
5296 void UnmarkObjectRecursively(Object** p) {
5297 if (!(*p)->IsHeapObject()) return;
5299 HeapObject* obj = HeapObject::cast(*p);
5301 Object* map = obj->map();
5303 if (map->IsHeapObject()) return; // unmarked already
5305 Address map_addr = reinterpret_cast<Address>(map);
5307 map_addr -= kMarkTag;
5309 ASSERT_TAG_ALIGNED(map_addr);
5311 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5313 obj->set_map(reinterpret_cast<Map*>(map_p));
5315 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5317 UnmarkObjectVisitor unmark_visitor(this);
5319 obj->IterateBody(Map::cast(map_p)->instance_type(),
5320 obj->SizeFromMap(Map::cast(map_p)),
5325 void MarkRootObjectRecursively(Object** root) {
5326 if (search_for_any_global_) {
5327 ASSERT(search_target_ == NULL);
5329 ASSERT(search_target_->IsHeapObject());
5331 found_target_ = false;
5332 object_stack_.Clear();
5334 MarkObjectRecursively(root);
5335 UnmarkObjectRecursively(root);
5337 if (found_target_) {
5338 PrintF("=====================================\n");
5339 PrintF("==== Path to object ====\n");
5340 PrintF("=====================================\n\n");
5342 ASSERT(!object_stack_.is_empty());
5343 for (int i = 0; i < object_stack_.length(); i++) {
5344 if (i > 0) PrintF("\n |\n |\n V\n\n");
5345 Object* obj = object_stack_[i];
5348 PrintF("=====================================\n");
5352 // Helper class for visiting HeapObjects recursively.
5353 class MarkRootVisitor: public ObjectVisitor {
5355 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5357 void VisitPointers(Object** start, Object** end) {
5358 // Visit all HeapObject pointers in [start, end)
5359 for (Object** p = start; p < end; p++) {
5360 if ((*p)->IsHeapObject())
5361 utils_->MarkRootObjectRecursively(p);
5365 HeapDebugUtils* utils_;
5368 bool search_for_any_global_;
5369 Object* search_target_;
5371 List<Object*> object_stack_;
5379 bool Heap::Setup(bool create_heap_objects) {
5381 allocation_timeout_ = FLAG_gc_interval;
5382 debug_utils_ = new HeapDebugUtils(this);
5385 // Initialize heap spaces and initial maps and objects. Whenever something
5386 // goes wrong, just return false. The caller should check the results and
5387 // call Heap::TearDown() to release allocated memory.
5389 // If the heap is not yet configured (eg, through the API), configure it.
5390 // Configuration is based on the flags new-space-size (really the semispace
5391 // size) and old-space-size if set or the initial values of semispace_size_
5392 // and old_generation_size_ otherwise.
5394 if (!ConfigureHeapDefault()) return false;
5397 gc_initializer_mutex->Lock();
5398 static bool initialized_gc = false;
5399 if (!initialized_gc) {
5400 initialized_gc = true;
5401 InitializeScavengingVisitorsTables();
5402 NewSpaceScavenger::Initialize();
5403 MarkCompactCollector::Initialize();
5405 gc_initializer_mutex->Unlock();
5407 MarkMapPointersAsEncoded(false);
5409 // Setup memory allocator.
5410 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5414 if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
5418 // Initialize old pointer space.
5419 old_pointer_space_ =
5421 max_old_generation_size_,
5424 if (old_pointer_space_ == NULL) return false;
5425 if (!old_pointer_space_->Setup()) return false;
5427 // Initialize old data space.
5430 max_old_generation_size_,
5433 if (old_data_space_ == NULL) return false;
5434 if (!old_data_space_->Setup()) return false;
5436 // Initialize the code space, set its maximum capacity to the old
5437 // generation size. It needs executable memory.
5438 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5439 // virtual address space, so that they can call each other with near calls.
5440 if (code_range_size_ > 0) {
5441 if (!isolate_->code_range()->Setup(code_range_size_)) {
5447 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5448 if (code_space_ == NULL) return false;
5449 if (!code_space_->Setup()) return false;
5451 // Initialize map space.
5452 map_space_ = new MapSpace(this,
5453 max_old_generation_size_,
5454 FLAG_max_map_space_pages,
5456 if (map_space_ == NULL) return false;
5457 if (!map_space_->Setup()) return false;
5459 // Initialize global property cell space.
5460 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5461 if (cell_space_ == NULL) return false;
5462 if (!cell_space_->Setup()) return false;
5464 // The large object code space may contain code or data. We set the memory
5465 // to be non-executable here for safety, but this means we need to enable it
5466 // explicitly when allocating large code objects.
5467 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5468 if (lo_space_ == NULL) return false;
5469 if (!lo_space_->Setup()) return false;
5470 if (create_heap_objects) {
5471 // Create initial maps.
5472 if (!CreateInitialMaps()) return false;
5473 if (!CreateApiObjects()) return false;
5475 // Create initial objects
5476 if (!CreateInitialObjects()) return false;
5478 global_contexts_list_ = undefined_value();
5481 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5482 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5484 store_buffer()->Setup();
5490 void Heap::SetStackLimits() {
5491 ASSERT(isolate_ != NULL);
5492 ASSERT(isolate_ == isolate());
5493 // On 64 bit machines, pointers are generally out of range of Smis. We write
5494 // something that looks like an out of range Smi to the GC.
5496 // Set up the special root array entries containing the stack limits.
5497 // These are actually addresses, but the tag makes the GC ignore it.
5498 roots_[kStackLimitRootIndex] =
5499 reinterpret_cast<Object*>(
5500 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5501 roots_[kRealStackLimitRootIndex] =
5502 reinterpret_cast<Object*>(
5503 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5507 void Heap::TearDown() {
5508 if (FLAG_print_cumulative_gc_stat) {
5510 PrintF("gc_count=%d ", gc_count_);
5511 PrintF("mark_sweep_count=%d ", ms_count_);
5512 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5513 PrintF("min_in_mutator=%d ", get_min_in_mutator());
5514 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5515 get_max_alive_after_gc());
5519 isolate_->global_handles()->TearDown();
5521 external_string_table_.TearDown();
5523 new_space_.TearDown();
5525 if (old_pointer_space_ != NULL) {
5526 old_pointer_space_->TearDown();
5527 delete old_pointer_space_;
5528 old_pointer_space_ = NULL;
5531 if (old_data_space_ != NULL) {
5532 old_data_space_->TearDown();
5533 delete old_data_space_;
5534 old_data_space_ = NULL;
5537 if (code_space_ != NULL) {
5538 code_space_->TearDown();
5543 if (map_space_ != NULL) {
5544 map_space_->TearDown();
5549 if (cell_space_ != NULL) {
5550 cell_space_->TearDown();
5555 if (lo_space_ != NULL) {
5556 lo_space_->TearDown();
5561 store_buffer()->TearDown();
5562 incremental_marking()->TearDown();
5564 isolate_->memory_allocator()->TearDown();
5567 delete debug_utils_;
5568 debug_utils_ = NULL;
5573 void Heap::Shrink() {
5574 // Try to shrink all paged spaces.
5576 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5577 space->ReleaseAllUnusedPages();
5581 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5582 ASSERT(callback != NULL);
5583 GCPrologueCallbackPair pair(callback, gc_type);
5584 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5585 return gc_prologue_callbacks_.Add(pair);
5589 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5590 ASSERT(callback != NULL);
5591 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5592 if (gc_prologue_callbacks_[i].callback == callback) {
5593 gc_prologue_callbacks_.Remove(i);
5601 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5602 ASSERT(callback != NULL);
5603 GCEpilogueCallbackPair pair(callback, gc_type);
5604 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5605 return gc_epilogue_callbacks_.Add(pair);
5609 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5610 ASSERT(callback != NULL);
5611 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5612 if (gc_epilogue_callbacks_[i].callback == callback) {
5613 gc_epilogue_callbacks_.Remove(i);
5623 class PrintHandleVisitor: public ObjectVisitor {
5625 void VisitPointers(Object** start, Object** end) {
5626 for (Object** p = start; p < end; p++)
5627 PrintF(" handle %p to %p\n",
5628 reinterpret_cast<void*>(p),
5629 reinterpret_cast<void*>(*p));
5633 void Heap::PrintHandles() {
5634 PrintF("Handles:\n");
5635 PrintHandleVisitor v;
5636 isolate_->handle_scope_implementer()->Iterate(&v);
5642 Space* AllSpaces::next() {
5643 switch (counter_++) {
5645 return HEAP->new_space();
5646 case OLD_POINTER_SPACE:
5647 return HEAP->old_pointer_space();
5648 case OLD_DATA_SPACE:
5649 return HEAP->old_data_space();
5651 return HEAP->code_space();
5653 return HEAP->map_space();
5655 return HEAP->cell_space();
5657 return HEAP->lo_space();
5664 PagedSpace* PagedSpaces::next() {
5665 switch (counter_++) {
5666 case OLD_POINTER_SPACE:
5667 return HEAP->old_pointer_space();
5668 case OLD_DATA_SPACE:
5669 return HEAP->old_data_space();
5671 return HEAP->code_space();
5673 return HEAP->map_space();
5675 return HEAP->cell_space();
5683 OldSpace* OldSpaces::next() {
5684 switch (counter_++) {
5685 case OLD_POINTER_SPACE:
5686 return HEAP->old_pointer_space();
5687 case OLD_DATA_SPACE:
5688 return HEAP->old_data_space();
5690 return HEAP->code_space();
5697 SpaceIterator::SpaceIterator()
5698 : current_space_(FIRST_SPACE),
5704 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5705 : current_space_(FIRST_SPACE),
5707 size_func_(size_func) {
5711 SpaceIterator::~SpaceIterator() {
5712 // Delete active iterator if any.
5717 bool SpaceIterator::has_next() {
5718 // Iterate until no more spaces.
5719 return current_space_ != LAST_SPACE;
5723 ObjectIterator* SpaceIterator::next() {
5724 if (iterator_ != NULL) {
5727 // Move to the next space
5729 if (current_space_ > LAST_SPACE) {
5734 // Return iterator for the new current space.
5735 return CreateIterator();
5739 // Create an iterator for the space to iterate.
5740 ObjectIterator* SpaceIterator::CreateIterator() {
5741 ASSERT(iterator_ == NULL);
5743 switch (current_space_) {
5745 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
5747 case OLD_POINTER_SPACE:
5748 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
5750 case OLD_DATA_SPACE:
5751 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
5754 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
5757 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
5760 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
5763 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
5767 // Return the newly allocated iterator;
5768 ASSERT(iterator_ != NULL);
5773 class HeapObjectsFilter {
5775 virtual ~HeapObjectsFilter() {}
5776 virtual bool SkipObject(HeapObject* object) = 0;
5780 class UnreachableObjectsFilter : public HeapObjectsFilter {
5782 UnreachableObjectsFilter() {
5783 MarkReachableObjects();
5786 ~UnreachableObjectsFilter() {
5787 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
5790 bool SkipObject(HeapObject* object) {
5791 MarkBit mark_bit = Marking::MarkBitFrom(object);
5792 return !mark_bit.Get();
5796 class MarkingVisitor : public ObjectVisitor {
5798 MarkingVisitor() : marking_stack_(10) {}
5800 void VisitPointers(Object** start, Object** end) {
5801 for (Object** p = start; p < end; p++) {
5802 if (!(*p)->IsHeapObject()) continue;
5803 HeapObject* obj = HeapObject::cast(*p);
5804 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5805 if (!mark_bit.Get()) {
5807 marking_stack_.Add(obj);
5812 void TransitiveClosure() {
5813 while (!marking_stack_.is_empty()) {
5814 HeapObject* obj = marking_stack_.RemoveLast();
5820 List<HeapObject*> marking_stack_;
5823 void MarkReachableObjects() {
5824 Heap* heap = Isolate::Current()->heap();
5825 MarkingVisitor visitor;
5826 heap->IterateRoots(&visitor, VISIT_ALL);
5827 visitor.TransitiveClosure();
5830 AssertNoAllocation no_alloc;
5834 HeapIterator::HeapIterator()
5835 : filtering_(HeapIterator::kNoFiltering),
5841 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
5842 : filtering_(filtering),
5848 HeapIterator::~HeapIterator() {
5853 void HeapIterator::Init() {
5854 // Start the iteration.
5855 space_iterator_ = new SpaceIterator;
5856 switch (filtering_) {
5857 case kFilterUnreachable:
5858 filter_ = new UnreachableObjectsFilter;
5863 object_iterator_ = space_iterator_->next();
5867 void HeapIterator::Shutdown() {
5869 // Assert that in filtering mode we have iterated through all
5870 // objects. Otherwise, heap will be left in an inconsistent state.
5871 if (filtering_ != kNoFiltering) {
5872 ASSERT(object_iterator_ == NULL);
5875 // Make sure the last iterator is deallocated.
5876 delete space_iterator_;
5877 space_iterator_ = NULL;
5878 object_iterator_ = NULL;
5884 HeapObject* HeapIterator::next() {
5885 if (filter_ == NULL) return NextObject();
5887 HeapObject* obj = NextObject();
5888 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5893 HeapObject* HeapIterator::NextObject() {
5894 // No iterator means we are done.
5895 if (object_iterator_ == NULL) return NULL;
5897 if (HeapObject* obj = object_iterator_->next_object()) {
5898 // If the current iterator has more objects we are fine.
5901 // Go though the spaces looking for one that has objects.
5902 while (space_iterator_->has_next()) {
5903 object_iterator_ = space_iterator_->next();
5904 if (HeapObject* obj = object_iterator_->next_object()) {
5909 // Done with the last space.
5910 object_iterator_ = NULL;
5915 void HeapIterator::reset() {
5916 // Restart the iterator.
5922 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
5924 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
5926 class PathTracer::MarkVisitor: public ObjectVisitor {
5928 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5929 void VisitPointers(Object** start, Object** end) {
5930 // Scan all HeapObject pointers in [start, end)
5931 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5932 if ((*p)->IsHeapObject())
5933 tracer_->MarkRecursively(p, this);
5938 PathTracer* tracer_;
5942 class PathTracer::UnmarkVisitor: public ObjectVisitor {
5944 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5945 void VisitPointers(Object** start, Object** end) {
5946 // Scan all HeapObject pointers in [start, end)
5947 for (Object** p = start; p < end; p++) {
5948 if ((*p)->IsHeapObject())
5949 tracer_->UnmarkRecursively(p, this);
5954 PathTracer* tracer_;
5958 void PathTracer::VisitPointers(Object** start, Object** end) {
5959 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5960 // Visit all HeapObject pointers in [start, end)
5961 for (Object** p = start; !done && (p < end); p++) {
5962 if ((*p)->IsHeapObject()) {
5964 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5970 void PathTracer::Reset() {
5971 found_target_ = false;
5972 object_stack_.Clear();
5976 void PathTracer::TracePathFrom(Object** root) {
5977 ASSERT((search_target_ == kAnyGlobalObject) ||
5978 search_target_->IsHeapObject());
5979 found_target_in_trace_ = false;
5980 object_stack_.Clear();
5982 MarkVisitor mark_visitor(this);
5983 MarkRecursively(root, &mark_visitor);
5985 UnmarkVisitor unmark_visitor(this);
5986 UnmarkRecursively(root, &unmark_visitor);
5992 static bool SafeIsGlobalContext(HeapObject* obj) {
5993 return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
5997 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5998 if (!(*p)->IsHeapObject()) return;
6000 HeapObject* obj = HeapObject::cast(*p);
6002 Object* map = obj->map();
6004 if (!map->IsHeapObject()) return; // visited before
6006 if (found_target_in_trace_) return; // stop if target found
6007 object_stack_.Add(obj);
6008 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6009 (obj == search_target_)) {
6010 found_target_in_trace_ = true;
6011 found_target_ = true;
6015 bool is_global_context = SafeIsGlobalContext(obj);
6018 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6020 Address map_addr = map_p->address();
6022 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
6024 // Scan the object body.
6025 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6026 // This is specialized to scan Context's properly.
6027 Object** start = reinterpret_cast<Object**>(obj->address() +
6028 Context::kHeaderSize);
6029 Object** end = reinterpret_cast<Object**>(obj->address() +
6030 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6031 mark_visitor->VisitPointers(start, end);
6033 obj->IterateBody(map_p->instance_type(),
6034 obj->SizeFromMap(map_p),
6038 // Scan the map after the body because the body is a lot more interesting
6039 // when doing leak detection.
6040 MarkRecursively(&map, mark_visitor);
6042 if (!found_target_in_trace_) // don't pop if found the target
6043 object_stack_.RemoveLast();
6047 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6048 if (!(*p)->IsHeapObject()) return;
6050 HeapObject* obj = HeapObject::cast(*p);
6052 Object* map = obj->map();
6054 if (map->IsHeapObject()) return; // unmarked already
6056 Address map_addr = reinterpret_cast<Address>(map);
6058 map_addr -= kMarkTag;
6060 ASSERT_TAG_ALIGNED(map_addr);
6062 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6064 obj->set_map(reinterpret_cast<Map*>(map_p));
6066 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6068 obj->IterateBody(Map::cast(map_p)->instance_type(),
6069 obj->SizeFromMap(Map::cast(map_p)),
6074 void PathTracer::ProcessResults() {
6075 if (found_target_) {
6076 PrintF("=====================================\n");
6077 PrintF("==== Path to object ====\n");
6078 PrintF("=====================================\n\n");
6080 ASSERT(!object_stack_.is_empty());
6081 for (int i = 0; i < object_stack_.length(); i++) {
6082 if (i > 0) PrintF("\n |\n |\n V\n\n");
6083 Object* obj = object_stack_[i];
6090 PrintF("=====================================\n");
6093 #endif // DEBUG || LIVE_OBJECT_LIST
6097 // Triggers a depth-first traversal of reachable objects from roots
6098 // and finds a path to a specific heap object and prints it.
6099 void Heap::TracePathToObject(Object* target) {
6100 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6101 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6105 // Triggers a depth-first traversal of reachable objects from roots
6106 // and finds a path to any global object and prints it. Useful for
6107 // determining the source for leaks of global objects.
6108 void Heap::TracePathToGlobal() {
6109 PathTracer tracer(PathTracer::kAnyGlobalObject,
6110 PathTracer::FIND_ALL,
6112 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6117 static intptr_t CountTotalHolesSize() {
6118 intptr_t holes_size = 0;
6120 for (OldSpace* space = spaces.next();
6122 space = spaces.next()) {
6123 holes_size += space->Waste() + space->Available();
6129 GCTracer::GCTracer(Heap* heap)
6134 allocated_since_last_gc_(0),
6135 spent_in_mutator_(0),
6136 promoted_objects_size_(0),
6138 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6139 start_time_ = OS::TimeCurrentMillis();
6140 start_size_ = heap_->SizeOfObjects();
6142 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6146 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6148 allocated_since_last_gc_ =
6149 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6151 if (heap_->last_gc_end_timestamp_ > 0) {
6152 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6155 steps_count_ = heap_->incremental_marking()->steps_count();
6156 steps_took_ = heap_->incremental_marking()->steps_took();
6157 longest_step_ = heap_->incremental_marking()->longest_step();
6158 steps_count_since_last_gc_ =
6159 heap_->incremental_marking()->steps_count_since_last_gc();
6160 steps_took_since_last_gc_ =
6161 heap_->incremental_marking()->steps_took_since_last_gc();
6165 GCTracer::~GCTracer() {
6166 // Printf ONE line iff flag is set.
6167 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6169 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6171 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6172 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6174 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6176 // Update cumulative GC statistics if required.
6177 if (FLAG_print_cumulative_gc_stat) {
6178 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6179 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6180 heap_->alive_after_last_gc_);
6182 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6183 static_cast<int>(spent_in_mutator_));
6187 if (!FLAG_trace_gc_nvp) {
6188 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6190 PrintF("%s %.1f -> %.1f MB, ",
6192 static_cast<double>(start_size_) / MB,
6193 SizeOfHeapObjects());
6195 if (external_time > 0) PrintF("%d / ", external_time);
6196 PrintF("%d ms", time);
6197 if (steps_count_ > 0) {
6198 if (collector_ == SCAVENGER) {
6199 PrintF(" (+ %d ms in %d steps since last GC)",
6200 static_cast<int>(steps_took_since_last_gc_),
6201 steps_count_since_last_gc_);
6203 PrintF(" (+ %d ms in %d steps since start of marking, "
6204 "biggest step %f ms)",
6205 static_cast<int>(steps_took_),
6212 PrintF("pause=%d ", time);
6213 PrintF("mutator=%d ",
6214 static_cast<int>(spent_in_mutator_));
6217 switch (collector_) {
6221 case MARK_COMPACTOR:
6229 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6230 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6231 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6232 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6233 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
6235 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
6236 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6237 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6238 in_free_list_or_wasted_before_gc_);
6239 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6241 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6242 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6244 if (collector_ == SCAVENGER) {
6245 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6246 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6248 PrintF("stepscount=%d ", steps_count_);
6249 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6255 heap_->PrintShortHeapStatistics();
6259 const char* GCTracer::CollectorString() {
6260 switch (collector_) {
6263 case MARK_COMPACTOR:
6264 return "Mark-sweep";
6266 return "Unknown GC";
6270 int KeyedLookupCache::Hash(Map* map, String* name) {
6271 // Uses only lower 32 bits if pointers are larger.
6272 uintptr_t addr_hash =
6273 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6274 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6278 int KeyedLookupCache::Lookup(Map* map, String* name) {
6279 int index = Hash(map, name);
6280 Key& key = keys_[index];
6281 if ((key.map == map) && key.name->Equals(name)) {
6282 return field_offsets_[index];
6288 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6290 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
6291 int index = Hash(map, symbol);
6292 Key& key = keys_[index];
6295 field_offsets_[index] = field_offset;
6300 void KeyedLookupCache::Clear() {
6301 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6305 void DescriptorLookupCache::Clear() {
6306 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
6311 void Heap::GarbageCollectionGreedyCheck() {
6312 ASSERT(FLAG_gc_greedy);
6313 if (isolate_->bootstrapper()->IsActive()) return;
6314 if (disallow_allocation_failure()) return;
6315 CollectGarbage(NEW_SPACE);
6320 TranscendentalCache::SubCache::SubCache(Type t)
6322 isolate_(Isolate::Current()) {
6323 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
6324 uint32_t in1 = 0xffffffffu; // generated by the FPU.
6325 for (int i = 0; i < kCacheSize; i++) {
6326 elements_[i].in[0] = in0;
6327 elements_[i].in[1] = in1;
6328 elements_[i].output = NULL;
6333 void TranscendentalCache::Clear() {
6334 for (int i = 0; i < kNumberOfCaches; i++) {
6335 if (caches_[i] != NULL) {
6343 void ExternalStringTable::CleanUp() {
6345 for (int i = 0; i < new_space_strings_.length(); ++i) {
6346 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6347 if (heap_->InNewSpace(new_space_strings_[i])) {
6348 new_space_strings_[last++] = new_space_strings_[i];
6350 old_space_strings_.Add(new_space_strings_[i]);
6353 new_space_strings_.Rewind(last);
6355 for (int i = 0; i < old_space_strings_.length(); ++i) {
6356 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6357 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6358 old_space_strings_[last++] = old_space_strings_[i];
6360 old_space_strings_.Rewind(last);
6361 if (FLAG_verify_heap) {
6367 void ExternalStringTable::TearDown() {
6368 new_space_strings_.Free();
6369 old_space_strings_.Free();
6373 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6374 chunk->set_next_chunk(chunks_queued_for_free_);
6375 chunks_queued_for_free_ = chunk;
6379 void Heap::FreeQueuedChunks() {
6380 if (chunks_queued_for_free_ == NULL) return;
6383 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6384 next = chunk->next_chunk();
6385 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6387 if (chunk->owner()->identity() == LO_SPACE) {
6388 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6389 // If FromAnyPointerAddress encounters a slot that belongs to a large
6390 // chunk queued for deletion it will fail to find the chunk because
6391 // it try to perform a search in the list of pages owned by of the large
6392 // object space and queued chunks were detached from that list.
6393 // To work around this we split large chunk into normal kPageSize aligned
6394 // pieces and initialize owner field and flags of every piece.
6395 // If FromAnyPointerAddress encounteres a slot that belongs to one of
6396 // these smaller pieces it will treat it as a slot on a normal Page.
6397 MemoryChunk* inner = MemoryChunk::FromAddress(
6398 chunk->address() + Page::kPageSize);
6399 MemoryChunk* inner_last = MemoryChunk::FromAddress(
6400 chunk->address() + chunk->size() - 1);
6401 while (inner <= inner_last) {
6402 // Size of a large chunk is always a multiple of
6403 // OS::AllocationAlignment() so there is always
6404 // enough space for a fake MemoryChunk header.
6405 inner->set_owner(lo_space());
6406 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6407 inner = MemoryChunk::FromAddress(
6408 inner->address() + Page::kPageSize);
6412 isolate_->heap()->store_buffer()->Compact();
6413 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6414 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6415 next = chunk->next_chunk();
6416 isolate_->memory_allocator()->Free(chunk);
6418 chunks_queued_for_free_ = NULL;
6421 } } // namespace v8::internal