1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "bootstrapper.h"
34 #include "compilation-cache.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "vm-state-inl.h"
52 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "arm/regexp-macro-assembler-arm.h"
56 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "mips/regexp-macro-assembler-mips.h"
67 // semispace_size_ should be a power of 2 and old_generation_size_ should be
68 // a multiple of Page::kPageSize.
70 #define LUMP_OF_MEMORY (128 * KB)
72 #elif defined(V8_TARGET_ARCH_X64)
73 #define LUMP_OF_MEMORY (2 * MB)
74 code_range_size_(512*MB),
76 #define LUMP_OF_MEMORY MB
79 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
81 initial_semispace_size_(Page::kPageSize),
82 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
83 max_executable_size_(256l * LUMP_OF_MEMORY),
85 // Variables set based on semispace_size_ and old_generation_size_ in
86 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
87 // Will be 4 * reserved_semispace_size_ to ensure that young
88 // generation can be aligned to its size.
89 survived_since_last_expansion_(0),
91 always_allocate_scope_depth_(0),
92 linear_allocation_scope_depth_(0),
93 contexts_disposed_(0),
95 scan_on_scavenge_pages_(0),
97 old_pointer_space_(NULL),
98 old_data_space_(NULL),
103 gc_state_(NOT_IN_GC),
104 gc_post_processing_depth_(0),
107 remembered_unmapped_pages_index_(0),
108 unflattened_strings_length_(0),
110 allocation_allowed_(true),
111 allocation_timeout_(0),
112 disallow_allocation_failure_(false),
115 new_space_high_promotion_mode_active_(false),
116 old_gen_promotion_limit_(kMinimumPromotionLimit),
117 old_gen_allocation_limit_(kMinimumAllocationLimit),
118 old_gen_limit_factor_(1),
119 size_of_old_gen_at_last_old_space_gc_(0),
120 external_allocation_limit_(0),
121 amount_of_external_allocated_memory_(0),
122 amount_of_external_allocated_memory_at_last_global_gc_(0),
123 old_gen_exhausted_(false),
124 store_buffer_rebuilder_(store_buffer()),
125 hidden_symbol_(NULL),
126 global_gc_prologue_callback_(NULL),
127 global_gc_epilogue_callback_(NULL),
128 gc_safe_size_of_old_object_(NULL),
129 total_regexp_code_generated_(0),
131 young_survivors_after_last_gc_(0),
132 high_survival_rate_period_length_(0),
134 previous_survival_rate_trend_(Heap::STABLE),
135 survival_rate_trend_(Heap::STABLE),
137 max_alive_after_gc_(0),
138 min_in_mutator_(kMaxInt),
139 alive_after_last_gc_(0),
140 last_gc_end_timestamp_(0.0),
143 incremental_marking_(this),
144 number_idle_notifications_(0),
145 last_idle_notification_gc_count_(0),
146 last_idle_notification_gc_count_init_(false),
147 mark_sweeps_since_idle_round_started_(0),
148 ms_count_at_last_idle_notification_(0),
149 gc_count_at_last_idle_gc_(0),
150 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
151 promotion_queue_(this),
153 chunks_queued_for_free_(NULL) {
154 // Allow build-time customization of the max semispace size. Building
155 // V8 with snapshots and a non-default max semispace size is much
156 // easier if you can define it as part of the build environment.
157 #if defined(V8_MAX_SEMISPACE_SIZE)
158 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
161 intptr_t max_virtual = OS::MaxVirtualMemory();
163 if (max_virtual > 0) {
164 if (code_range_size_ > 0) {
165 // Reserve no more than 1/8 of the memory for the code range.
166 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
170 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
171 global_contexts_list_ = NULL;
172 mark_compact_collector_.heap_ = this;
173 external_string_table_.heap_ = this;
174 // Put a dummy entry in the remembered pages so we can find the list the
175 // minidump even if there are no real unmapped pages.
176 RememberUnmappedPage(NULL, false);
180 intptr_t Heap::Capacity() {
181 if (!HasBeenSetUp()) return 0;
183 return new_space_.Capacity() +
184 old_pointer_space_->Capacity() +
185 old_data_space_->Capacity() +
186 code_space_->Capacity() +
187 map_space_->Capacity() +
188 cell_space_->Capacity();
192 intptr_t Heap::CommittedMemory() {
193 if (!HasBeenSetUp()) return 0;
195 return new_space_.CommittedMemory() +
196 old_pointer_space_->CommittedMemory() +
197 old_data_space_->CommittedMemory() +
198 code_space_->CommittedMemory() +
199 map_space_->CommittedMemory() +
200 cell_space_->CommittedMemory() +
204 intptr_t Heap::CommittedMemoryExecutable() {
205 if (!HasBeenSetUp()) return 0;
207 return isolate()->memory_allocator()->SizeExecutable();
211 intptr_t Heap::Available() {
212 if (!HasBeenSetUp()) return 0;
214 return new_space_.Available() +
215 old_pointer_space_->Available() +
216 old_data_space_->Available() +
217 code_space_->Available() +
218 map_space_->Available() +
219 cell_space_->Available();
223 bool Heap::HasBeenSetUp() {
224 return old_pointer_space_ != NULL &&
225 old_data_space_ != NULL &&
226 code_space_ != NULL &&
227 map_space_ != NULL &&
228 cell_space_ != NULL &&
233 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
234 if (IntrusiveMarking::IsMarked(object)) {
235 return IntrusiveMarking::SizeOfMarkedObject(object);
237 return object->SizeFromMap(object->map());
241 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
242 const char** reason) {
243 // Is global GC requested?
244 if (space != NEW_SPACE) {
245 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
246 *reason = "GC in old space requested";
247 return MARK_COMPACTOR;
250 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
251 *reason = "GC in old space forced by flags";
252 return MARK_COMPACTOR;
255 // Is enough data promoted to justify a global GC?
256 if (OldGenerationPromotionLimitReached()) {
257 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
258 *reason = "promotion limit reached";
259 return MARK_COMPACTOR;
262 // Have allocation in OLD and LO failed?
263 if (old_gen_exhausted_) {
264 isolate_->counters()->
265 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
266 *reason = "old generations exhausted";
267 return MARK_COMPACTOR;
270 // Is there enough space left in OLD to guarantee that a scavenge can
273 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
274 // for object promotion. It counts only the bytes that the memory
275 // allocator has not yet allocated from the OS and assigned to any space,
276 // and does not count available bytes already in the old space or code
277 // space. Undercounting is safe---we may get an unrequested full GC when
278 // a scavenge would have succeeded.
279 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
280 isolate_->counters()->
281 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
282 *reason = "scavenge might not succeed";
283 return MARK_COMPACTOR;
292 // TODO(1238405): Combine the infrastructure for --heap-stats and
293 // --log-gc to avoid the complicated preprocessor and flag testing.
294 void Heap::ReportStatisticsBeforeGC() {
295 // Heap::ReportHeapStatistics will also log NewSpace statistics when
296 // compiled --log-gc is set. The following logic is used to avoid
299 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
300 if (FLAG_heap_stats) {
301 ReportHeapStatistics("Before GC");
302 } else if (FLAG_log_gc) {
303 new_space_.ReportStatistics();
305 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
308 new_space_.CollectStatistics();
309 new_space_.ReportStatistics();
310 new_space_.ClearHistograms();
316 void Heap::PrintShortHeapStatistics() {
317 if (!FLAG_trace_gc_verbose) return;
318 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
319 ", available: %8" V8_PTR_PREFIX "d\n",
320 isolate_->memory_allocator()->Size(),
321 isolate_->memory_allocator()->Available());
322 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
323 ", available: %8" V8_PTR_PREFIX "d\n",
324 Heap::new_space_.Size(),
325 new_space_.Available());
326 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
327 ", available: %8" V8_PTR_PREFIX "d"
328 ", waste: %8" V8_PTR_PREFIX "d\n",
329 old_pointer_space_->Size(),
330 old_pointer_space_->Available(),
331 old_pointer_space_->Waste());
332 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
333 ", available: %8" V8_PTR_PREFIX "d"
334 ", waste: %8" V8_PTR_PREFIX "d\n",
335 old_data_space_->Size(),
336 old_data_space_->Available(),
337 old_data_space_->Waste());
338 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
339 ", available: %8" V8_PTR_PREFIX "d"
340 ", waste: %8" V8_PTR_PREFIX "d\n",
342 code_space_->Available(),
343 code_space_->Waste());
344 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
345 ", available: %8" V8_PTR_PREFIX "d"
346 ", waste: %8" V8_PTR_PREFIX "d\n",
348 map_space_->Available(),
349 map_space_->Waste());
350 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
351 ", available: %8" V8_PTR_PREFIX "d"
352 ", waste: %8" V8_PTR_PREFIX "d\n",
354 cell_space_->Available(),
355 cell_space_->Waste());
356 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
357 ", available: %8" V8_PTR_PREFIX "d\n",
359 lo_space_->Available());
363 // TODO(1238405): Combine the infrastructure for --heap-stats and
364 // --log-gc to avoid the complicated preprocessor and flag testing.
365 void Heap::ReportStatisticsAfterGC() {
366 // Similar to the before GC, we use some complicated logic to ensure that
367 // NewSpace statistics are logged exactly once when --log-gc is turned on.
369 if (FLAG_heap_stats) {
370 new_space_.CollectStatistics();
371 ReportHeapStatistics("After GC");
372 } else if (FLAG_log_gc) {
373 new_space_.ReportStatistics();
376 if (FLAG_log_gc) new_space_.ReportStatistics();
381 void Heap::GarbageCollectionPrologue() {
382 isolate_->transcendental_cache()->Clear();
383 ClearJSFunctionResultCaches();
385 unflattened_strings_length_ = 0;
387 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
388 allow_allocation(false);
390 if (FLAG_verify_heap) {
394 if (FLAG_gc_verbose) Print();
398 ReportStatisticsBeforeGC();
401 LiveObjectList::GCPrologue();
402 store_buffer()->GCPrologue();
405 intptr_t Heap::SizeOfObjects() {
408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
409 total += space->SizeOfObjects();
414 void Heap::GarbageCollectionEpilogue() {
415 store_buffer()->GCEpilogue();
416 LiveObjectList::GCEpilogue();
418 allow_allocation(true);
421 if (FLAG_verify_heap) {
425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
439 ReportStatisticsAfterGC();
441 #ifdef ENABLE_DEBUGGER_SUPPORT
442 isolate_->debug()->AfterGarbageCollection();
443 #endif // ENABLE_DEBUGGER_SUPPORT
447 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
448 // Since we are ignoring the return value, the exact choice of space does
449 // not matter, so long as we do not specify NEW_SPACE, which would not
451 mark_compact_collector_.SetFlags(flags);
452 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
453 mark_compact_collector_.SetFlags(kNoGCFlags);
457 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
458 // Since we are ignoring the return value, the exact choice of space does
459 // not matter, so long as we do not specify NEW_SPACE, which would not
461 // Major GC would invoke weak handle callbacks on weakly reachable
462 // handles, but won't collect weakly reachable objects until next
463 // major GC. Therefore if we collect aggressively and weak handle callback
464 // has been invoked, we rerun major GC to release objects which become
466 // Note: as weak callbacks can execute arbitrary code, we cannot
467 // hope that eventually there will be no weak callbacks invocations.
468 // Therefore stop recollecting after several attempts.
469 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
470 kReduceMemoryFootprintMask);
471 isolate_->compilation_cache()->Clear();
472 const int kMaxNumberOfAttempts = 7;
473 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
474 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
478 mark_compact_collector()->SetFlags(kNoGCFlags);
482 incremental_marking()->UncommitMarkingDeque();
486 bool Heap::CollectGarbage(AllocationSpace space,
487 GarbageCollector collector,
488 const char* gc_reason,
489 const char* collector_reason) {
490 // The VM is in the GC state until exiting this function.
491 VMState state(isolate_, GC);
494 // Reset the allocation timeout to the GC interval, but make sure to
495 // allow at least a few allocations after a collection. The reason
496 // for this is that we have a lot of allocation sequences and we
497 // assume that a garbage collection will allow the subsequent
498 // allocation attempts to go through.
499 allocation_timeout_ = Max(6, FLAG_gc_interval);
502 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
503 if (FLAG_trace_incremental_marking) {
504 PrintF("[IncrementalMarking] Scavenge during marking.\n");
508 if (collector == MARK_COMPACTOR &&
509 !mark_compact_collector()->abort_incremental_marking_ &&
510 !incremental_marking()->IsStopped() &&
511 !incremental_marking()->should_hurry() &&
512 FLAG_incremental_marking_steps) {
513 // Make progress in incremental marking.
514 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
515 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
516 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
517 if (!incremental_marking()->IsComplete()) {
518 if (FLAG_trace_incremental_marking) {
519 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
521 collector = SCAVENGER;
522 collector_reason = "incremental marking delaying mark-sweep";
526 bool next_gc_likely_to_collect_more = false;
528 { GCTracer tracer(this, gc_reason, collector_reason);
529 GarbageCollectionPrologue();
530 // The GC count was incremented in the prologue. Tell the tracer about
532 tracer.set_gc_count(gc_count_);
534 // Tell the tracer which collector we've selected.
535 tracer.set_collector(collector);
537 HistogramTimer* rate = (collector == SCAVENGER)
538 ? isolate_->counters()->gc_scavenger()
539 : isolate_->counters()->gc_compactor();
541 next_gc_likely_to_collect_more =
542 PerformGarbageCollection(collector, &tracer);
545 GarbageCollectionEpilogue();
548 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
549 if (incremental_marking()->IsStopped()) {
550 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
551 incremental_marking()->Start();
555 return next_gc_likely_to_collect_more;
559 void Heap::PerformScavenge() {
560 GCTracer tracer(this, NULL, NULL);
561 if (incremental_marking()->IsStopped()) {
562 PerformGarbageCollection(SCAVENGER, &tracer);
564 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
570 // Helper class for verifying the symbol table.
571 class SymbolTableVerifier : public ObjectVisitor {
573 void VisitPointers(Object** start, Object** end) {
574 // Visit all HeapObject pointers in [start, end).
575 for (Object** p = start; p < end; p++) {
576 if ((*p)->IsHeapObject()) {
577 // Check that the symbol is actually a symbol.
578 ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
586 static void VerifySymbolTable() {
588 SymbolTableVerifier verifier;
589 HEAP->symbol_table()->IterateElements(&verifier);
594 static bool AbortIncrementalMarkingAndCollectGarbage(
596 AllocationSpace space,
597 const char* gc_reason = NULL) {
598 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
599 bool result = heap->CollectGarbage(space, gc_reason);
600 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
605 void Heap::ReserveSpace(
607 int pointer_space_size,
612 int large_object_size) {
613 NewSpace* new_space = Heap::new_space();
614 PagedSpace* old_pointer_space = Heap::old_pointer_space();
615 PagedSpace* old_data_space = Heap::old_data_space();
616 PagedSpace* code_space = Heap::code_space();
617 PagedSpace* map_space = Heap::map_space();
618 PagedSpace* cell_space = Heap::cell_space();
619 LargeObjectSpace* lo_space = Heap::lo_space();
620 bool gc_performed = true;
622 static const int kThreshold = 20;
623 while (gc_performed && counter++ < kThreshold) {
624 gc_performed = false;
625 if (!new_space->ReserveSpace(new_space_size)) {
626 Heap::CollectGarbage(NEW_SPACE,
627 "failed to reserve space in the new space");
630 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
631 AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
632 "failed to reserve space in the old pointer space");
635 if (!(old_data_space->ReserveSpace(data_space_size))) {
636 AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
637 "failed to reserve space in the old data space");
640 if (!(code_space->ReserveSpace(code_space_size))) {
641 AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
642 "failed to reserve space in the code space");
645 if (!(map_space->ReserveSpace(map_space_size))) {
646 AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
647 "failed to reserve space in the map space");
650 if (!(cell_space->ReserveSpace(cell_space_size))) {
651 AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
652 "failed to reserve space in the cell space");
655 // We add a slack-factor of 2 in order to have space for a series of
656 // large-object allocations that are only just larger than the page size.
657 large_object_size *= 2;
658 // The ReserveSpace method on the large object space checks how much
659 // we can expand the old generation. This includes expansion caused by
660 // allocation in the other spaces.
661 large_object_size += cell_space_size + map_space_size + code_space_size +
662 data_space_size + pointer_space_size;
663 if (!(lo_space->ReserveSpace(large_object_size))) {
664 AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
665 "failed to reserve space in the large object space");
671 // Failed to reserve the space after several attempts.
672 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
677 void Heap::EnsureFromSpaceIsCommitted() {
678 if (new_space_.CommitFromSpaceIfNeeded()) return;
680 // Committing memory to from space failed.
681 // Try shrinking and try again.
683 if (new_space_.CommitFromSpaceIfNeeded()) return;
685 // Committing memory to from space failed again.
686 // Memory is exhausted and we will die.
687 V8::FatalProcessOutOfMemory("Committing semi space failed.");
691 void Heap::ClearJSFunctionResultCaches() {
692 if (isolate_->bootstrapper()->IsActive()) return;
694 Object* context = global_contexts_list_;
695 while (!context->IsUndefined()) {
696 // Get the caches for this context. GC can happen when the context
697 // is not fully initialized, so the caches can be undefined.
698 Object* caches_or_undefined =
699 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
700 if (!caches_or_undefined->IsUndefined()) {
701 FixedArray* caches = FixedArray::cast(caches_or_undefined);
703 int length = caches->length();
704 for (int i = 0; i < length; i++) {
705 JSFunctionResultCache::cast(caches->get(i))->Clear();
708 // Get the next context:
709 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
715 void Heap::ClearNormalizedMapCaches() {
716 if (isolate_->bootstrapper()->IsActive() &&
717 !incremental_marking()->IsMarking()) {
721 Object* context = global_contexts_list_;
722 while (!context->IsUndefined()) {
723 // GC can happen when the context is not fully initialized,
724 // so the cache can be undefined.
726 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
727 if (!cache->IsUndefined()) {
728 NormalizedMapCache::cast(cache)->Clear();
730 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
735 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
736 double survival_rate =
737 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
738 start_new_space_size;
740 if (survival_rate > kYoungSurvivalRateHighThreshold) {
741 high_survival_rate_period_length_++;
743 high_survival_rate_period_length_ = 0;
746 if (survival_rate < kYoungSurvivalRateLowThreshold) {
747 low_survival_rate_period_length_++;
749 low_survival_rate_period_length_ = 0;
752 double survival_rate_diff = survival_rate_ - survival_rate;
754 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
755 set_survival_rate_trend(DECREASING);
756 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
757 set_survival_rate_trend(INCREASING);
759 set_survival_rate_trend(STABLE);
762 survival_rate_ = survival_rate;
765 bool Heap::PerformGarbageCollection(GarbageCollector collector,
767 bool next_gc_likely_to_collect_more = false;
769 if (collector != SCAVENGER) {
770 PROFILE(isolate_, CodeMovingGCEvent());
773 if (FLAG_verify_heap) {
776 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
777 ASSERT(!allocation_allowed_);
778 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
779 global_gc_prologue_callback_();
783 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
785 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
786 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
787 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
791 EnsureFromSpaceIsCommitted();
793 int start_new_space_size = Heap::new_space()->SizeAsInt();
795 if (IsHighSurvivalRate()) {
796 // We speed up the incremental marker if it is running so that it
797 // does not fall behind the rate of promotion, which would cause a
798 // constantly growing old space.
799 incremental_marking()->NotifyOfHighPromotionRate();
802 if (collector == MARK_COMPACTOR) {
803 // Perform mark-sweep with optional compaction.
806 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
807 IsStableOrIncreasingSurvivalTrend();
809 UpdateSurvivalRateTrend(start_new_space_size);
811 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
813 if (high_survival_rate_during_scavenges &&
814 IsStableOrIncreasingSurvivalTrend()) {
815 // Stable high survival rates of young objects both during partial and
816 // full collection indicate that mutator is either building or modifying
817 // a structure with a long lifetime.
818 // In this case we aggressively raise old generation memory limits to
819 // postpone subsequent mark-sweep collection and thus trade memory
820 // space for the mutation speed.
821 old_gen_limit_factor_ = 2;
823 old_gen_limit_factor_ = 1;
826 old_gen_promotion_limit_ =
827 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
828 old_gen_allocation_limit_ =
829 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
831 old_gen_exhausted_ = false;
837 UpdateSurvivalRateTrend(start_new_space_size);
840 if (!new_space_high_promotion_mode_active_ &&
841 new_space_.Capacity() == new_space_.MaximumCapacity() &&
842 IsStableOrIncreasingSurvivalTrend() &&
843 IsHighSurvivalRate()) {
844 // Stable high survival rates even though young generation is at
845 // maximum capacity indicates that most objects will be promoted.
846 // To decrease scavenger pauses and final mark-sweep pauses, we
847 // have to limit maximal capacity of the young generation.
848 new_space_high_promotion_mode_active_ = true;
850 PrintF("Limited new space size due to high promotion rate: %d MB\n",
851 new_space_.InitialCapacity() / MB);
853 } else if (new_space_high_promotion_mode_active_ &&
854 IsStableOrDecreasingSurvivalTrend() &&
855 IsLowSurvivalRate()) {
856 // Decreasing low survival rates might indicate that the above high
857 // promotion mode is over and we should allow the young generation
859 new_space_high_promotion_mode_active_ = false;
861 PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
862 new_space_.MaximumCapacity() / MB);
866 if (new_space_high_promotion_mode_active_ &&
867 new_space_.Capacity() > new_space_.InitialCapacity()) {
871 isolate_->counters()->objs_since_last_young()->Set(0);
873 gc_post_processing_depth_++;
874 { DisableAssertNoAllocation allow_allocation;
875 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
876 next_gc_likely_to_collect_more =
877 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
879 gc_post_processing_depth_--;
881 // Update relocatables.
882 Relocatable::PostGarbageCollectionProcessing();
884 if (collector == MARK_COMPACTOR) {
885 // Register the amount of external allocated memory.
886 amount_of_external_allocated_memory_at_last_global_gc_ =
887 amount_of_external_allocated_memory_;
890 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
891 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
892 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
893 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
897 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
898 ASSERT(!allocation_allowed_);
899 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
900 global_gc_epilogue_callback_();
902 if (FLAG_verify_heap) {
906 return next_gc_likely_to_collect_more;
910 void Heap::MarkCompact(GCTracer* tracer) {
911 gc_state_ = MARK_COMPACT;
912 LOG(isolate_, ResourceEvent("markcompact", "begin"));
914 mark_compact_collector_.Prepare(tracer);
917 tracer->set_full_gc_count(ms_count_);
919 MarkCompactPrologue();
921 mark_compact_collector_.CollectGarbage();
923 LOG(isolate_, ResourceEvent("markcompact", "end"));
925 gc_state_ = NOT_IN_GC;
927 isolate_->counters()->objs_since_last_full()->Set(0);
929 contexts_disposed_ = 0;
931 isolate_->set_context_exit_happened(false);
935 void Heap::MarkCompactPrologue() {
936 // At any old GC clear the keyed lookup cache to enable collection of unused
938 isolate_->keyed_lookup_cache()->Clear();
939 isolate_->context_slot_cache()->Clear();
940 isolate_->descriptor_lookup_cache()->Clear();
941 StringSplitCache::Clear(string_split_cache());
943 isolate_->compilation_cache()->MarkCompactPrologue();
945 CompletelyClearInstanceofCache();
947 FlushNumberStringCache();
948 if (FLAG_cleanup_code_caches_at_gc) {
949 polymorphic_code_cache()->set_cache(undefined_value());
952 ClearNormalizedMapCaches();
956 Object* Heap::FindCodeObject(Address a) {
957 return isolate()->inner_pointer_to_code_cache()->
958 GcSafeFindCodeForInnerPointer(a);
962 // Helper class for copying HeapObjects
963 class ScavengeVisitor: public ObjectVisitor {
965 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
967 void VisitPointer(Object** p) { ScavengePointer(p); }
969 void VisitPointers(Object** start, Object** end) {
970 // Copy all HeapObject pointers in [start, end)
971 for (Object** p = start; p < end; p++) ScavengePointer(p);
975 void ScavengePointer(Object** p) {
977 if (!heap_->InNewSpace(object)) return;
978 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
979 reinterpret_cast<HeapObject*>(object));
987 // Visitor class to verify pointers in code or data space do not point into
989 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
991 void VisitPointers(Object** start, Object**end) {
992 for (Object** current = start; current < end; current++) {
993 if ((*current)->IsHeapObject()) {
994 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
1001 static void VerifyNonPointerSpacePointers() {
1002 // Verify that there are no pointers to new space in spaces where we
1003 // do not expect them.
1004 VerifyNonPointerSpacePointersVisitor v;
1005 HeapObjectIterator code_it(HEAP->code_space());
1006 for (HeapObject* object = code_it.Next();
1007 object != NULL; object = code_it.Next())
1008 object->Iterate(&v);
1010 // The old data space was normally swept conservatively so that the iterator
1011 // doesn't work, so we normally skip the next bit.
1012 if (!HEAP->old_data_space()->was_swept_conservatively()) {
1013 HeapObjectIterator data_it(HEAP->old_data_space());
1014 for (HeapObject* object = data_it.Next();
1015 object != NULL; object = data_it.Next())
1016 object->Iterate(&v);
1022 void Heap::CheckNewSpaceExpansionCriteria() {
1023 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1024 survived_since_last_expansion_ > new_space_.Capacity() &&
1025 !new_space_high_promotion_mode_active_) {
1026 // Grow the size of new space if there is room to grow, enough data
1027 // has survived scavenge since the last expansion and we are not in
1028 // high promotion mode.
1030 survived_since_last_expansion_ = 0;
1035 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1036 return heap->InNewSpace(*p) &&
1037 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1041 void Heap::ScavengeStoreBufferCallback(
1044 StoreBufferEvent event) {
1045 heap->store_buffer_rebuilder_.Callback(page, event);
1049 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1050 if (event == kStoreBufferStartScanningPagesEvent) {
1051 start_of_current_page_ = NULL;
1052 current_page_ = NULL;
1053 } else if (event == kStoreBufferScanningPageEvent) {
1054 if (current_page_ != NULL) {
1055 // If this page already overflowed the store buffer during this iteration.
1056 if (current_page_->scan_on_scavenge()) {
1057 // Then we should wipe out the entries that have been added for it.
1058 store_buffer_->SetTop(start_of_current_page_);
1059 } else if (store_buffer_->Top() - start_of_current_page_ >=
1060 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1061 // Did we find too many pointers in the previous page? The heuristic is
1062 // that no page can take more then 1/5 the remaining slots in the store
1064 current_page_->set_scan_on_scavenge(true);
1065 store_buffer_->SetTop(start_of_current_page_);
1067 // In this case the page we scanned took a reasonable number of slots in
1068 // the store buffer. It has now been rehabilitated and is no longer
1069 // marked scan_on_scavenge.
1070 ASSERT(!current_page_->scan_on_scavenge());
1073 start_of_current_page_ = store_buffer_->Top();
1074 current_page_ = page;
1075 } else if (event == kStoreBufferFullEvent) {
1076 // The current page overflowed the store buffer again. Wipe out its entries
1077 // in the store buffer and mark it scan-on-scavenge again. This may happen
1078 // several times while scanning.
1079 if (current_page_ == NULL) {
1080 // Store Buffer overflowed while scanning promoted objects. These are not
1081 // in any particular page, though they are likely to be clustered by the
1082 // allocation routines.
1083 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
1085 // Store Buffer overflowed while scanning a particular old space page for
1086 // pointers to new space.
1087 ASSERT(current_page_ == page);
1088 ASSERT(page != NULL);
1089 current_page_->set_scan_on_scavenge(true);
1090 ASSERT(start_of_current_page_ != store_buffer_->Top());
1091 store_buffer_->SetTop(start_of_current_page_);
1099 void PromotionQueue::Initialize() {
1100 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1101 // entries (where each is a pair of intptr_t). This allows us to simplify
1102 // the test fpr when to switch pages.
1103 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1105 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1107 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1108 emergency_stack_ = NULL;
1113 void PromotionQueue::RelocateQueueHead() {
1114 ASSERT(emergency_stack_ == NULL);
1116 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1117 intptr_t* head_start = rear_;
1118 intptr_t* head_end =
1119 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1122 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1124 emergency_stack_ = new List<Entry>(2 * entries_count);
1126 while (head_start != head_end) {
1127 int size = static_cast<int>(*(head_start++));
1128 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1129 emergency_stack_->Add(Entry(obj, size));
1135 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1137 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1139 virtual Object* RetainAs(Object* object) {
1140 if (!heap_->InFromSpace(object)) {
1144 MapWord map_word = HeapObject::cast(object)->map_word();
1145 if (map_word.IsForwardingAddress()) {
1146 return map_word.ToForwardingAddress();
1156 void Heap::Scavenge() {
1158 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1161 gc_state_ = SCAVENGE;
1163 // Implements Cheney's copying algorithm
1164 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1166 // Clear descriptor cache.
1167 isolate_->descriptor_lookup_cache()->Clear();
1169 // Used for updating survived_since_last_expansion_ at function end.
1170 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1172 CheckNewSpaceExpansionCriteria();
1174 SelectScavengingVisitorsTable();
1176 incremental_marking()->PrepareForScavenge();
1178 AdvanceSweepers(static_cast<int>(new_space_.Size()));
1180 // Flip the semispaces. After flipping, to space is empty, from space has
1183 new_space_.ResetAllocationInfo();
1185 // We need to sweep newly copied objects which can be either in the
1186 // to space or promoted to the old generation. For to-space
1187 // objects, we treat the bottom of the to space as a queue. Newly
1188 // copied and unswept objects lie between a 'front' mark and the
1189 // allocation pointer.
1191 // Promoted objects can go into various old-generation spaces, and
1192 // can be allocated internally in the spaces (from the free list).
1193 // We treat the top of the to space as a queue of addresses of
1194 // promoted objects. The addresses of newly promoted and unswept
1195 // objects lie between a 'front' mark and a 'rear' mark that is
1196 // updated as a side effect of promoting an object.
1198 // There is guaranteed to be enough room at the top of the to space
1199 // for the addresses of promoted objects: every object promoted
1200 // frees up its size in bytes from the top of the new space, and
1201 // objects are at least one pointer in size.
1202 Address new_space_front = new_space_.ToSpaceStart();
1203 promotion_queue_.Initialize();
1206 store_buffer()->Clean();
1209 ScavengeVisitor scavenge_visitor(this);
1211 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1213 // Copy objects reachable from the old generation.
1215 StoreBufferRebuildScope scope(this,
1217 &ScavengeStoreBufferCallback);
1218 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1221 // Copy objects reachable from cells by scavenging cell values directly.
1222 HeapObjectIterator cell_iterator(cell_space_);
1223 for (HeapObject* cell = cell_iterator.Next();
1224 cell != NULL; cell = cell_iterator.Next()) {
1225 if (cell->IsJSGlobalPropertyCell()) {
1226 Address value_address =
1227 reinterpret_cast<Address>(cell) +
1228 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1229 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1233 // Scavenge object reachable from the global contexts list directly.
1234 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1236 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1237 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1238 &IsUnscavengedHeapObject);
1239 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1241 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1243 UpdateNewSpaceReferencesInExternalStringTable(
1244 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1246 promotion_queue_.Destroy();
1248 LiveObjectList::UpdateReferencesForScavengeGC();
1249 if (!FLAG_watch_ic_patching) {
1250 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1252 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1254 ScavengeWeakObjectRetainer weak_object_retainer(this);
1255 ProcessWeakReferences(&weak_object_retainer);
1257 ASSERT(new_space_front == new_space_.top());
1260 new_space_.set_age_mark(new_space_.top());
1262 new_space_.LowerInlineAllocationLimit(
1263 new_space_.inline_allocation_limit_step());
1265 // Update how much has survived scavenge.
1266 IncrementYoungSurvivorsCounter(static_cast<int>(
1267 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1269 LOG(isolate_, ResourceEvent("scavenge", "end"));
1271 gc_state_ = NOT_IN_GC;
1273 scavenges_since_last_idle_round_++;
1277 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1279 MapWord first_word = HeapObject::cast(*p)->map_word();
1281 if (!first_word.IsForwardingAddress()) {
1282 // Unreachable external string can be finalized.
1283 heap->FinalizeExternalString(String::cast(*p));
1287 // String is still reachable.
1288 return String::cast(first_word.ToForwardingAddress());
1292 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1293 ExternalStringTableUpdaterCallback updater_func) {
1294 if (FLAG_verify_heap) {
1295 external_string_table_.Verify();
1298 if (external_string_table_.new_space_strings_.is_empty()) return;
1300 Object** start = &external_string_table_.new_space_strings_[0];
1301 Object** end = start + external_string_table_.new_space_strings_.length();
1302 Object** last = start;
1304 for (Object** p = start; p < end; ++p) {
1305 ASSERT(InFromSpace(*p));
1306 String* target = updater_func(this, p);
1308 if (target == NULL) continue;
1310 ASSERT(target->IsExternalString());
1312 if (InNewSpace(target)) {
1313 // String is still in new space. Update the table entry.
1317 // String got promoted. Move it to the old string list.
1318 external_string_table_.AddOldString(target);
1322 ASSERT(last <= end);
1323 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1327 void Heap::UpdateReferencesInExternalStringTable(
1328 ExternalStringTableUpdaterCallback updater_func) {
1330 // Update old space string references.
1331 if (external_string_table_.old_space_strings_.length() > 0) {
1332 Object** start = &external_string_table_.old_space_strings_[0];
1333 Object** end = start + external_string_table_.old_space_strings_.length();
1334 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1337 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1341 static Object* ProcessFunctionWeakReferences(Heap* heap,
1343 WeakObjectRetainer* retainer,
1344 bool record_slots) {
1345 Object* undefined = heap->undefined_value();
1346 Object* head = undefined;
1347 JSFunction* tail = NULL;
1348 Object* candidate = function;
1349 while (candidate != undefined) {
1350 // Check whether to keep the candidate in the list.
1351 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1352 Object* retain = retainer->RetainAs(candidate);
1353 if (retain != NULL) {
1354 if (head == undefined) {
1355 // First element in the list.
1358 // Subsequent elements in the list.
1359 ASSERT(tail != NULL);
1360 tail->set_next_function_link(retain);
1362 Object** next_function =
1363 HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
1364 heap->mark_compact_collector()->RecordSlot(
1365 next_function, next_function, retain);
1368 // Retained function is new tail.
1369 candidate_function = reinterpret_cast<JSFunction*>(retain);
1370 tail = candidate_function;
1372 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1374 if (retain == undefined) break;
1377 // Move to next element in the list.
1378 candidate = candidate_function->next_function_link();
1381 // Terminate the list if there is one or more elements.
1383 tail->set_next_function_link(undefined);
1390 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1391 Object* undefined = undefined_value();
1392 Object* head = undefined;
1393 Context* tail = NULL;
1394 Object* candidate = global_contexts_list_;
1396 // We don't record weak slots during marking or scavenges.
1397 // Instead we do it once when we complete mark-compact cycle.
1398 // Note that write barrier has no effect if we are already in the middle of
1399 // compacting mark-sweep cycle and we have to record slots manually.
1401 gc_state() == MARK_COMPACT &&
1402 mark_compact_collector()->is_compacting();
1404 while (candidate != undefined) {
1405 // Check whether to keep the candidate in the list.
1406 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1407 Object* retain = retainer->RetainAs(candidate);
1408 if (retain != NULL) {
1409 if (head == undefined) {
1410 // First element in the list.
1413 // Subsequent elements in the list.
1414 ASSERT(tail != NULL);
1415 tail->set_unchecked(this,
1416 Context::NEXT_CONTEXT_LINK,
1418 UPDATE_WRITE_BARRIER);
1421 Object** next_context =
1422 HeapObject::RawField(
1423 tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
1424 mark_compact_collector()->RecordSlot(
1425 next_context, next_context, retain);
1428 // Retained context is new tail.
1429 candidate_context = reinterpret_cast<Context*>(retain);
1430 tail = candidate_context;
1432 if (retain == undefined) break;
1434 // Process the weak list of optimized functions for the context.
1435 Object* function_list_head =
1436 ProcessFunctionWeakReferences(
1438 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1441 candidate_context->set_unchecked(this,
1442 Context::OPTIMIZED_FUNCTIONS_LIST,
1444 UPDATE_WRITE_BARRIER);
1446 Object** optimized_functions =
1447 HeapObject::RawField(
1448 tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1449 mark_compact_collector()->RecordSlot(
1450 optimized_functions, optimized_functions, function_list_head);
1454 // Move to next element in the list.
1455 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1458 // Terminate the list if there is one or more elements.
1460 tail->set_unchecked(this,
1461 Context::NEXT_CONTEXT_LINK,
1462 Heap::undefined_value(),
1463 UPDATE_WRITE_BARRIER);
1466 // Update the head of the list of contexts.
1467 global_contexts_list_ = head;
1471 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1472 AssertNoAllocation no_allocation;
1474 class VisitorAdapter : public ObjectVisitor {
1476 explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1477 : visitor_(visitor) {}
1478 virtual void VisitPointers(Object** start, Object** end) {
1479 for (Object** p = start; p < end; p++) {
1480 if ((*p)->IsExternalString()) {
1481 visitor_->VisitExternalString(Utils::ToLocal(
1482 Handle<String>(String::cast(*p))));
1487 v8::ExternalResourceVisitor* visitor_;
1488 } visitor_adapter(visitor);
1489 external_string_table_.Iterate(&visitor_adapter);
1493 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1495 static inline void VisitPointer(Heap* heap, Object** p) {
1496 Object* object = *p;
1497 if (!heap->InNewSpace(object)) return;
1498 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1499 reinterpret_cast<HeapObject*>(object));
1504 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1505 Address new_space_front) {
1507 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1508 // The addresses new_space_front and new_space_.top() define a
1509 // queue of unprocessed copied objects. Process them until the
1511 while (new_space_front != new_space_.top()) {
1512 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1513 HeapObject* object = HeapObject::FromAddress(new_space_front);
1515 NewSpaceScavenger::IterateBody(object->map(), object);
1518 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1522 // Promote and process all the to-be-promoted objects.
1524 StoreBufferRebuildScope scope(this,
1526 &ScavengeStoreBufferCallback);
1527 while (!promotion_queue()->is_empty()) {
1530 promotion_queue()->remove(&target, &size);
1532 // Promoted object might be already partially visited
1533 // during old space pointer iteration. Thus we search specificly
1534 // for pointers to from semispace instead of looking for pointers
1536 ASSERT(!target->IsMap());
1537 IterateAndMarkPointersToFromSpace(target->address(),
1538 target->address() + size,
1543 // Take another spin if there are now unswept objects in new space
1544 // (there are currently no more unswept promoted objects).
1545 } while (new_space_front != new_space_.top());
1547 return new_space_front;
1551 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1554 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1558 static HeapObject* EnsureDoubleAligned(Heap* heap,
1561 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1562 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1563 return HeapObject::FromAddress(object->address() + kPointerSize);
1565 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1572 enum LoggingAndProfiling {
1573 LOGGING_AND_PROFILING_ENABLED,
1574 LOGGING_AND_PROFILING_DISABLED
1578 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1581 template<MarksHandling marks_handling,
1582 LoggingAndProfiling logging_and_profiling_mode>
1583 class ScavengingVisitor : public StaticVisitorBase {
1585 static void Initialize() {
1586 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1587 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1588 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1589 table_.Register(kVisitByteArray, &EvacuateByteArray);
1590 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1591 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1593 table_.Register(kVisitGlobalContext,
1594 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1595 template VisitSpecialized<Context::kSize>);
1597 table_.Register(kVisitConsString,
1598 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1599 template VisitSpecialized<ConsString::kSize>);
1601 table_.Register(kVisitSlicedString,
1602 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1603 template VisitSpecialized<SlicedString::kSize>);
1605 table_.Register(kVisitSharedFunctionInfo,
1606 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1607 template VisitSpecialized<SharedFunctionInfo::kSize>);
1609 table_.Register(kVisitJSWeakMap,
1610 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1613 table_.Register(kVisitJSRegExp,
1614 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1617 if (marks_handling == IGNORE_MARKS) {
1618 table_.Register(kVisitJSFunction,
1619 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1620 template VisitSpecialized<JSFunction::kSize>);
1622 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1625 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1627 kVisitDataObjectGeneric>();
1629 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1631 kVisitJSObjectGeneric>();
1633 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1635 kVisitStructGeneric>();
1638 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1643 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1644 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1646 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1647 bool should_record = false;
1649 should_record = FLAG_heap_stats;
1651 should_record = should_record || FLAG_log_gc;
1652 if (should_record) {
1653 if (heap->new_space()->Contains(obj)) {
1654 heap->new_space()->RecordAllocation(obj);
1656 heap->new_space()->RecordPromotion(obj);
1661 // Helper function used by CopyObject to copy a source object to an
1662 // allocated target object and update the forwarding pointer in the source
1663 // object. Returns the target object.
1664 INLINE(static void MigrateObject(Heap* heap,
1668 // Copy the content of source to target.
1669 heap->CopyBlock(target->address(), source->address(), size);
1671 // Set the forwarding address.
1672 source->set_map_word(MapWord::FromForwardingAddress(target));
1674 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1675 // Update NewSpace stats if necessary.
1676 RecordCopiedObject(heap, target);
1677 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1678 Isolate* isolate = heap->isolate();
1679 if (isolate->logger()->is_logging() ||
1680 CpuProfiler::is_profiling(isolate)) {
1681 if (target->IsSharedFunctionInfo()) {
1682 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1683 source->address(), target->address()));
1688 if (marks_handling == TRANSFER_MARKS) {
1689 if (Marking::TransferColor(source, target)) {
1690 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1696 template<ObjectContents object_contents,
1697 SizeRestriction size_restriction,
1699 static inline void EvacuateObject(Map* map,
1703 SLOW_ASSERT((size_restriction != SMALL) ||
1704 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1705 SLOW_ASSERT(object->Size() == object_size);
1707 int allocation_size = object_size;
1708 if (alignment != kObjectAlignment) {
1709 ASSERT(alignment == kDoubleAlignment);
1710 allocation_size += kPointerSize;
1713 Heap* heap = map->GetHeap();
1714 if (heap->ShouldBePromoted(object->address(), object_size)) {
1715 MaybeObject* maybe_result;
1717 if ((size_restriction != SMALL) &&
1718 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1719 maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1722 if (object_contents == DATA_OBJECT) {
1723 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1726 heap->old_pointer_space()->AllocateRaw(allocation_size);
1730 Object* result = NULL; // Initialization to please compiler.
1731 if (maybe_result->ToObject(&result)) {
1732 HeapObject* target = HeapObject::cast(result);
1734 if (alignment != kObjectAlignment) {
1735 target = EnsureDoubleAligned(heap, target, allocation_size);
1738 // Order is important: slot might be inside of the target if target
1739 // was allocated over a dead object and slot comes from the store
1742 MigrateObject(heap, object, target, object_size);
1744 if (object_contents == POINTER_OBJECT) {
1745 if (map->instance_type() == JS_FUNCTION_TYPE) {
1746 heap->promotion_queue()->insert(
1747 target, JSFunction::kNonWeakFieldsEndOffset);
1749 heap->promotion_queue()->insert(target, object_size);
1753 heap->tracer()->increment_promoted_objects_size(object_size);
1757 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1758 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1759 Object* result = allocation->ToObjectUnchecked();
1760 HeapObject* target = HeapObject::cast(result);
1762 if (alignment != kObjectAlignment) {
1763 target = EnsureDoubleAligned(heap, target, allocation_size);
1766 // Order is important: slot might be inside of the target if target
1767 // was allocated over a dead object and slot comes from the store
1770 MigrateObject(heap, object, target, object_size);
1775 static inline void EvacuateJSFunction(Map* map,
1777 HeapObject* object) {
1778 ObjectEvacuationStrategy<POINTER_OBJECT>::
1779 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1781 HeapObject* target = *slot;
1782 MarkBit mark_bit = Marking::MarkBitFrom(target);
1783 if (Marking::IsBlack(mark_bit)) {
1784 // This object is black and it might not be rescanned by marker.
1785 // We should explicitly record code entry slot for compaction because
1786 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1787 // miss it as it is not HeapObject-tagged.
1788 Address code_entry_slot =
1789 target->address() + JSFunction::kCodeEntryOffset;
1790 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1791 map->GetHeap()->mark_compact_collector()->
1792 RecordCodeEntrySlot(code_entry_slot, code);
1797 static inline void EvacuateFixedArray(Map* map,
1799 HeapObject* object) {
1800 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1801 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1808 static inline void EvacuateFixedDoubleArray(Map* map,
1810 HeapObject* object) {
1811 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1812 int object_size = FixedDoubleArray::SizeFor(length);
1813 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1821 static inline void EvacuateByteArray(Map* map,
1823 HeapObject* object) {
1824 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1825 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1826 map, slot, object, object_size);
1830 static inline void EvacuateSeqAsciiString(Map* map,
1832 HeapObject* object) {
1833 int object_size = SeqAsciiString::cast(object)->
1834 SeqAsciiStringSize(map->instance_type());
1835 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1836 map, slot, object, object_size);
1840 static inline void EvacuateSeqTwoByteString(Map* map,
1842 HeapObject* object) {
1843 int object_size = SeqTwoByteString::cast(object)->
1844 SeqTwoByteStringSize(map->instance_type());
1845 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1846 map, slot, object, object_size);
1850 static inline bool IsShortcutCandidate(int type) {
1851 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1854 static inline void EvacuateShortcutCandidate(Map* map,
1856 HeapObject* object) {
1857 ASSERT(IsShortcutCandidate(map->instance_type()));
1859 Heap* heap = map->GetHeap();
1861 if (marks_handling == IGNORE_MARKS &&
1862 ConsString::cast(object)->unchecked_second() ==
1863 heap->empty_string()) {
1865 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1869 if (!heap->InNewSpace(first)) {
1870 object->set_map_word(MapWord::FromForwardingAddress(first));
1874 MapWord first_word = first->map_word();
1875 if (first_word.IsForwardingAddress()) {
1876 HeapObject* target = first_word.ToForwardingAddress();
1879 object->set_map_word(MapWord::FromForwardingAddress(target));
1883 heap->DoScavengeObject(first->map(), slot, first);
1884 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1888 int object_size = ConsString::kSize;
1889 EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
1890 map, slot, object, object_size);
1893 template<ObjectContents object_contents>
1894 class ObjectEvacuationStrategy {
1896 template<int object_size>
1897 static inline void VisitSpecialized(Map* map,
1899 HeapObject* object) {
1900 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1901 map, slot, object, object_size);
1904 static inline void Visit(Map* map,
1906 HeapObject* object) {
1907 int object_size = map->instance_size();
1908 EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1909 map, slot, object, object_size);
1913 static VisitorDispatchTable<ScavengingCallback> table_;
1917 template<MarksHandling marks_handling,
1918 LoggingAndProfiling logging_and_profiling_mode>
1919 VisitorDispatchTable<ScavengingCallback>
1920 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1923 static void InitializeScavengingVisitorsTables() {
1924 ScavengingVisitor<TRANSFER_MARKS,
1925 LOGGING_AND_PROFILING_DISABLED>::Initialize();
1926 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1927 ScavengingVisitor<TRANSFER_MARKS,
1928 LOGGING_AND_PROFILING_ENABLED>::Initialize();
1929 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1933 void Heap::SelectScavengingVisitorsTable() {
1934 bool logging_and_profiling =
1935 isolate()->logger()->is_logging() ||
1936 CpuProfiler::is_profiling(isolate()) ||
1937 (isolate()->heap_profiler() != NULL &&
1938 isolate()->heap_profiler()->is_profiling());
1940 if (!incremental_marking()->IsMarking()) {
1941 if (!logging_and_profiling) {
1942 scavenging_visitors_table_.CopyFrom(
1943 ScavengingVisitor<IGNORE_MARKS,
1944 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1946 scavenging_visitors_table_.CopyFrom(
1947 ScavengingVisitor<IGNORE_MARKS,
1948 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1951 if (!logging_and_profiling) {
1952 scavenging_visitors_table_.CopyFrom(
1953 ScavengingVisitor<TRANSFER_MARKS,
1954 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1956 scavenging_visitors_table_.CopyFrom(
1957 ScavengingVisitor<TRANSFER_MARKS,
1958 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1961 if (incremental_marking()->IsCompacting()) {
1962 // When compacting forbid short-circuiting of cons-strings.
1963 // Scavenging code relies on the fact that new space object
1964 // can't be evacuated into evacuation candidate but
1965 // short-circuiting violates this assumption.
1966 scavenging_visitors_table_.Register(
1967 StaticVisitorBase::kVisitShortcutCandidate,
1968 scavenging_visitors_table_.GetVisitorById(
1969 StaticVisitorBase::kVisitConsString));
1975 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1976 SLOW_ASSERT(HEAP->InFromSpace(object));
1977 MapWord first_word = object->map_word();
1978 SLOW_ASSERT(!first_word.IsForwardingAddress());
1979 Map* map = first_word.ToMap();
1980 map->GetHeap()->DoScavengeObject(map, p, object);
1984 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1985 int instance_size) {
1987 { MaybeObject* maybe_result = AllocateRawMap();
1988 if (!maybe_result->ToObject(&result)) return maybe_result;
1991 // Map::cast cannot be used due to uninitialized map field.
1992 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1993 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1994 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1995 reinterpret_cast<Map*>(result)->set_visitor_id(
1996 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1997 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1998 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1999 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2000 reinterpret_cast<Map*>(result)->set_bit_field(0);
2001 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2006 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2008 ElementsKind elements_kind) {
2010 { MaybeObject* maybe_result = AllocateRawMap();
2011 if (!maybe_result->ToObject(&result)) return maybe_result;
2014 Map* map = reinterpret_cast<Map*>(result);
2015 map->set_map_no_write_barrier(meta_map());
2016 map->set_instance_type(instance_type);
2017 map->set_visitor_id(
2018 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2019 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2020 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2021 map->set_instance_size(instance_size);
2022 map->set_inobject_properties(0);
2023 map->set_pre_allocated_property_fields(0);
2024 map->init_instance_descriptors();
2025 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2026 map->init_prototype_transitions(undefined_value());
2027 map->set_unused_property_fields(0);
2028 map->set_bit_field(0);
2029 map->set_bit_field2(1 << Map::kIsExtensible);
2030 map->set_elements_kind(elements_kind);
2032 // If the map object is aligned fill the padding area with Smi 0 objects.
2033 if (Map::kPadStart < Map::kSize) {
2034 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
2036 Map::kSize - Map::kPadStart);
2042 MaybeObject* Heap::AllocateCodeCache() {
2043 CodeCache* code_cache;
2044 { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2045 if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2047 code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2048 code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2053 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2054 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2058 MaybeObject* Heap::AllocateAccessorPair() {
2059 AccessorPair* accessors;
2060 { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2061 if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2063 accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2064 accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2069 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2070 TypeFeedbackInfo* info;
2071 { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2072 if (!maybe_info->To(&info)) return maybe_info;
2074 info->set_ic_total_count(0);
2075 info->set_ic_with_type_info_count(0);
2076 info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2077 SKIP_WRITE_BARRIER);
2082 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2083 AliasedArgumentsEntry* entry;
2084 { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2085 if (!maybe_entry->To(&entry)) return maybe_entry;
2087 entry->set_aliased_context_slot(aliased_context_slot);
2092 const Heap::StringTypeTable Heap::string_type_table[] = {
2093 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2094 {type, size, k##camel_name##MapRootIndex},
2095 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2096 #undef STRING_TYPE_ELEMENT
2100 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
2101 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
2102 {contents, k##name##RootIndex},
2103 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
2104 #undef CONSTANT_SYMBOL_ELEMENT
2108 const Heap::StructTable Heap::struct_table[] = {
2109 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2110 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2111 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2112 #undef STRUCT_TABLE_ELEMENT
2116 bool Heap::CreateInitialMaps() {
2118 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2119 if (!maybe_obj->ToObject(&obj)) return false;
2121 // Map::cast cannot be used due to uninitialized map field.
2122 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2123 set_meta_map(new_meta_map);
2124 new_meta_map->set_map(new_meta_map);
2126 { MaybeObject* maybe_obj =
2127 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2128 if (!maybe_obj->ToObject(&obj)) return false;
2130 set_fixed_array_map(Map::cast(obj));
2132 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2133 if (!maybe_obj->ToObject(&obj)) return false;
2135 set_oddball_map(Map::cast(obj));
2137 // Allocate the empty array.
2138 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2139 if (!maybe_obj->ToObject(&obj)) return false;
2141 set_empty_fixed_array(FixedArray::cast(obj));
2143 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2144 if (!maybe_obj->ToObject(&obj)) return false;
2146 set_null_value(Oddball::cast(obj));
2147 Oddball::cast(obj)->set_kind(Oddball::kNull);
2149 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2150 if (!maybe_obj->ToObject(&obj)) return false;
2152 set_undefined_value(Oddball::cast(obj));
2153 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2154 ASSERT(!InNewSpace(undefined_value()));
2156 // Allocate the empty descriptor array.
2157 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2158 if (!maybe_obj->ToObject(&obj)) return false;
2160 set_empty_descriptor_array(DescriptorArray::cast(obj));
2162 // Fix the instance_descriptors for the existing maps.
2163 meta_map()->init_instance_descriptors();
2164 meta_map()->set_code_cache(empty_fixed_array());
2165 meta_map()->init_prototype_transitions(undefined_value());
2167 fixed_array_map()->init_instance_descriptors();
2168 fixed_array_map()->set_code_cache(empty_fixed_array());
2169 fixed_array_map()->init_prototype_transitions(undefined_value());
2171 oddball_map()->init_instance_descriptors();
2172 oddball_map()->set_code_cache(empty_fixed_array());
2173 oddball_map()->init_prototype_transitions(undefined_value());
2175 // Fix prototype object for existing maps.
2176 meta_map()->set_prototype(null_value());
2177 meta_map()->set_constructor(null_value());
2179 fixed_array_map()->set_prototype(null_value());
2180 fixed_array_map()->set_constructor(null_value());
2182 oddball_map()->set_prototype(null_value());
2183 oddball_map()->set_constructor(null_value());
2185 { MaybeObject* maybe_obj =
2186 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2187 if (!maybe_obj->ToObject(&obj)) return false;
2189 set_fixed_cow_array_map(Map::cast(obj));
2190 ASSERT(fixed_array_map() != fixed_cow_array_map());
2192 { MaybeObject* maybe_obj =
2193 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2194 if (!maybe_obj->ToObject(&obj)) return false;
2196 set_scope_info_map(Map::cast(obj));
2198 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2199 if (!maybe_obj->ToObject(&obj)) return false;
2201 set_heap_number_map(Map::cast(obj));
2203 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2204 if (!maybe_obj->ToObject(&obj)) return false;
2206 set_foreign_map(Map::cast(obj));
2208 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2209 const StringTypeTable& entry = string_type_table[i];
2210 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2211 if (!maybe_obj->ToObject(&obj)) return false;
2213 roots_[entry.index] = Map::cast(obj);
2216 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2217 if (!maybe_obj->ToObject(&obj)) return false;
2219 set_undetectable_string_map(Map::cast(obj));
2220 Map::cast(obj)->set_is_undetectable();
2222 { MaybeObject* maybe_obj =
2223 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2224 if (!maybe_obj->ToObject(&obj)) return false;
2226 set_undetectable_ascii_string_map(Map::cast(obj));
2227 Map::cast(obj)->set_is_undetectable();
2229 { MaybeObject* maybe_obj =
2230 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2231 if (!maybe_obj->ToObject(&obj)) return false;
2233 set_fixed_double_array_map(Map::cast(obj));
2235 { MaybeObject* maybe_obj =
2236 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2237 if (!maybe_obj->ToObject(&obj)) return false;
2239 set_byte_array_map(Map::cast(obj));
2241 { MaybeObject* maybe_obj =
2242 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2243 if (!maybe_obj->ToObject(&obj)) return false;
2245 set_free_space_map(Map::cast(obj));
2247 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2248 if (!maybe_obj->ToObject(&obj)) return false;
2250 set_empty_byte_array(ByteArray::cast(obj));
2252 { MaybeObject* maybe_obj =
2253 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2254 if (!maybe_obj->ToObject(&obj)) return false;
2256 set_external_pixel_array_map(Map::cast(obj));
2258 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2259 ExternalArray::kAlignedSize);
2260 if (!maybe_obj->ToObject(&obj)) return false;
2262 set_external_byte_array_map(Map::cast(obj));
2264 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2265 ExternalArray::kAlignedSize);
2266 if (!maybe_obj->ToObject(&obj)) return false;
2268 set_external_unsigned_byte_array_map(Map::cast(obj));
2270 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2271 ExternalArray::kAlignedSize);
2272 if (!maybe_obj->ToObject(&obj)) return false;
2274 set_external_short_array_map(Map::cast(obj));
2276 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2277 ExternalArray::kAlignedSize);
2278 if (!maybe_obj->ToObject(&obj)) return false;
2280 set_external_unsigned_short_array_map(Map::cast(obj));
2282 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2283 ExternalArray::kAlignedSize);
2284 if (!maybe_obj->ToObject(&obj)) return false;
2286 set_external_int_array_map(Map::cast(obj));
2288 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2289 ExternalArray::kAlignedSize);
2290 if (!maybe_obj->ToObject(&obj)) return false;
2292 set_external_unsigned_int_array_map(Map::cast(obj));
2294 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2295 ExternalArray::kAlignedSize);
2296 if (!maybe_obj->ToObject(&obj)) return false;
2298 set_external_float_array_map(Map::cast(obj));
2300 { MaybeObject* maybe_obj =
2301 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2302 if (!maybe_obj->ToObject(&obj)) return false;
2304 set_non_strict_arguments_elements_map(Map::cast(obj));
2306 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2307 ExternalArray::kAlignedSize);
2308 if (!maybe_obj->ToObject(&obj)) return false;
2310 set_external_double_array_map(Map::cast(obj));
2312 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2313 if (!maybe_obj->ToObject(&obj)) return false;
2315 set_code_map(Map::cast(obj));
2317 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2318 JSGlobalPropertyCell::kSize);
2319 if (!maybe_obj->ToObject(&obj)) return false;
2321 set_global_property_cell_map(Map::cast(obj));
2323 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2324 if (!maybe_obj->ToObject(&obj)) return false;
2326 set_one_pointer_filler_map(Map::cast(obj));
2328 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2329 if (!maybe_obj->ToObject(&obj)) return false;
2331 set_two_pointer_filler_map(Map::cast(obj));
2333 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2334 const StructTable& entry = struct_table[i];
2335 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2336 if (!maybe_obj->ToObject(&obj)) return false;
2338 roots_[entry.index] = Map::cast(obj);
2341 { MaybeObject* maybe_obj =
2342 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2343 if (!maybe_obj->ToObject(&obj)) return false;
2345 set_hash_table_map(Map::cast(obj));
2347 { MaybeObject* maybe_obj =
2348 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2349 if (!maybe_obj->ToObject(&obj)) return false;
2351 set_function_context_map(Map::cast(obj));
2353 { MaybeObject* maybe_obj =
2354 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2355 if (!maybe_obj->ToObject(&obj)) return false;
2357 set_catch_context_map(Map::cast(obj));
2359 { MaybeObject* maybe_obj =
2360 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2361 if (!maybe_obj->ToObject(&obj)) return false;
2363 set_with_context_map(Map::cast(obj));
2365 { MaybeObject* maybe_obj =
2366 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2367 if (!maybe_obj->ToObject(&obj)) return false;
2369 set_block_context_map(Map::cast(obj));
2371 { MaybeObject* maybe_obj =
2372 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2373 if (!maybe_obj->ToObject(&obj)) return false;
2375 set_module_context_map(Map::cast(obj));
2377 { MaybeObject* maybe_obj =
2378 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2379 if (!maybe_obj->ToObject(&obj)) return false;
2381 Map* global_context_map = Map::cast(obj);
2382 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
2383 set_global_context_map(global_context_map);
2385 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2386 SharedFunctionInfo::kAlignedSize);
2387 if (!maybe_obj->ToObject(&obj)) return false;
2389 set_shared_function_info_map(Map::cast(obj));
2391 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2392 JSMessageObject::kSize);
2393 if (!maybe_obj->ToObject(&obj)) return false;
2395 set_message_object_map(Map::cast(obj));
2397 ASSERT(!InNewSpace(empty_fixed_array()));
2402 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2403 // Statically ensure that it is safe to allocate heap numbers in paged
2405 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2406 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2409 { MaybeObject* maybe_result =
2410 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2411 if (!maybe_result->ToObject(&result)) return maybe_result;
2414 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2415 HeapNumber::cast(result)->set_value(value);
2420 MaybeObject* Heap::AllocateHeapNumber(double value) {
2421 // Use general version, if we're forced to always allocate.
2422 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2424 // This version of AllocateHeapNumber is optimized for
2425 // allocation in new space.
2426 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2427 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2429 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2430 if (!maybe_result->ToObject(&result)) return maybe_result;
2432 HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2433 HeapNumber::cast(result)->set_value(value);
2438 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
2440 { MaybeObject* maybe_result = AllocateRawCell();
2441 if (!maybe_result->ToObject(&result)) return maybe_result;
2443 HeapObject::cast(result)->set_map_no_write_barrier(
2444 global_property_cell_map());
2445 JSGlobalPropertyCell::cast(result)->set_value(value);
2450 MaybeObject* Heap::CreateOddball(const char* to_string,
2454 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2455 if (!maybe_result->ToObject(&result)) return maybe_result;
2457 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2461 bool Heap::CreateApiObjects() {
2464 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2465 if (!maybe_obj->ToObject(&obj)) return false;
2467 // Don't use Smi-only elements optimizations for objects with the neander
2468 // map. There are too many cases where element values are set directly with a
2469 // bottleneck to trap the Smi-only -> fast elements transition, and there
2470 // appears to be no benefit for optimize this case.
2471 Map* new_neander_map = Map::cast(obj);
2472 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2473 set_neander_map(new_neander_map);
2475 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2476 if (!maybe_obj->ToObject(&obj)) return false;
2479 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2480 if (!maybe_elements->ToObject(&elements)) return false;
2482 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2483 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2484 set_message_listeners(JSObject::cast(obj));
2490 void Heap::CreateJSEntryStub() {
2492 set_js_entry_code(*stub.GetCode());
2496 void Heap::CreateJSConstructEntryStub() {
2497 JSConstructEntryStub stub;
2498 set_js_construct_entry_code(*stub.GetCode());
2502 void Heap::CreateFixedStubs() {
2503 // Here we create roots for fixed stubs. They are needed at GC
2504 // for cooking and uncooking (check out frames.cc).
2505 // The eliminates the need for doing dictionary lookup in the
2506 // stub cache for these stubs.
2508 // gcc-4.4 has problem generating correct code of following snippet:
2509 // { JSEntryStub stub;
2510 // js_entry_code_ = *stub.GetCode();
2512 // { JSConstructEntryStub stub;
2513 // js_construct_entry_code_ = *stub.GetCode();
2515 // To workaround the problem, make separate functions without inlining.
2516 Heap::CreateJSEntryStub();
2517 Heap::CreateJSConstructEntryStub();
2519 // Create stubs that should be there, so we don't unexpectedly have to
2520 // create them if we need them during the creation of another stub.
2521 // Stub creation mixes raw pointers and handles in an unsafe manner so
2522 // we cannot create stubs while we are creating stubs.
2523 CodeStub::GenerateStubsAheadOfTime();
2527 bool Heap::CreateInitialObjects() {
2530 // The -0 value must be set before NumberFromDouble works.
2531 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2532 if (!maybe_obj->ToObject(&obj)) return false;
2534 set_minus_zero_value(HeapNumber::cast(obj));
2535 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2537 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2538 if (!maybe_obj->ToObject(&obj)) return false;
2540 set_nan_value(HeapNumber::cast(obj));
2542 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2543 if (!maybe_obj->ToObject(&obj)) return false;
2545 set_infinity_value(HeapNumber::cast(obj));
2547 // The hole has not been created yet, but we want to put something
2548 // predictable in the gaps in the symbol table, so lets make that Smi zero.
2549 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2551 // Allocate initial symbol table.
2552 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2553 if (!maybe_obj->ToObject(&obj)) return false;
2555 // Don't use set_symbol_table() due to asserts.
2556 roots_[kSymbolTableRootIndex] = obj;
2558 // Finish initializing oddballs after creating symboltable.
2559 { MaybeObject* maybe_obj =
2560 undefined_value()->Initialize("undefined",
2562 Oddball::kUndefined);
2563 if (!maybe_obj->ToObject(&obj)) return false;
2566 // Initialize the null_value.
2567 { MaybeObject* maybe_obj =
2568 null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2569 if (!maybe_obj->ToObject(&obj)) return false;
2572 { MaybeObject* maybe_obj = CreateOddball("true",
2575 if (!maybe_obj->ToObject(&obj)) return false;
2577 set_true_value(Oddball::cast(obj));
2579 { MaybeObject* maybe_obj = CreateOddball("false",
2582 if (!maybe_obj->ToObject(&obj)) return false;
2584 set_false_value(Oddball::cast(obj));
2586 { MaybeObject* maybe_obj = CreateOddball("hole",
2589 if (!maybe_obj->ToObject(&obj)) return false;
2591 set_the_hole_value(Oddball::cast(obj));
2593 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2595 Oddball::kArgumentMarker);
2596 if (!maybe_obj->ToObject(&obj)) return false;
2598 set_arguments_marker(Oddball::cast(obj));
2600 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2603 if (!maybe_obj->ToObject(&obj)) return false;
2605 set_no_interceptor_result_sentinel(obj);
2607 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2610 if (!maybe_obj->ToObject(&obj)) return false;
2612 set_termination_exception(obj);
2614 // Allocate the empty string.
2615 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2616 if (!maybe_obj->ToObject(&obj)) return false;
2618 set_empty_string(String::cast(obj));
2620 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2621 { MaybeObject* maybe_obj =
2622 LookupAsciiSymbol(constant_symbol_table[i].contents);
2623 if (!maybe_obj->ToObject(&obj)) return false;
2625 roots_[constant_symbol_table[i].index] = String::cast(obj);
2628 // Allocate the hidden symbol which is used to identify the hidden properties
2629 // in JSObjects. The hash code has a special value so that it will not match
2630 // the empty string when searching for the property. It cannot be part of the
2631 // loop above because it needs to be allocated manually with the special
2632 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2633 // that it will always be at the first entry in property descriptors.
2634 { MaybeObject* maybe_obj =
2635 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2636 if (!maybe_obj->ToObject(&obj)) return false;
2638 hidden_symbol_ = String::cast(obj);
2640 // Allocate the foreign for __proto__.
2641 { MaybeObject* maybe_obj =
2642 AllocateForeign((Address) &Accessors::ObjectPrototype);
2643 if (!maybe_obj->ToObject(&obj)) return false;
2645 set_prototype_accessors(Foreign::cast(obj));
2647 // Allocate the code_stubs dictionary. The initial size is set to avoid
2648 // expanding the dictionary during bootstrapping.
2649 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2650 if (!maybe_obj->ToObject(&obj)) return false;
2652 set_code_stubs(UnseededNumberDictionary::cast(obj));
2655 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2656 // is set to avoid expanding the dictionary during bootstrapping.
2657 { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2658 if (!maybe_obj->ToObject(&obj)) return false;
2660 set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2662 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2663 if (!maybe_obj->ToObject(&obj)) return false;
2665 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2667 set_instanceof_cache_function(Smi::FromInt(0));
2668 set_instanceof_cache_map(Smi::FromInt(0));
2669 set_instanceof_cache_answer(Smi::FromInt(0));
2673 // Allocate the dictionary of intrinsic function names.
2674 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2675 if (!maybe_obj->ToObject(&obj)) return false;
2677 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2679 if (!maybe_obj->ToObject(&obj)) return false;
2681 set_intrinsic_function_names(StringDictionary::cast(obj));
2683 { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2684 if (!maybe_obj->ToObject(&obj)) return false;
2686 set_number_string_cache(FixedArray::cast(obj));
2688 // Allocate cache for single character ASCII strings.
2689 { MaybeObject* maybe_obj =
2690 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2691 if (!maybe_obj->ToObject(&obj)) return false;
2693 set_single_character_string_cache(FixedArray::cast(obj));
2695 // Allocate cache for string split.
2696 { MaybeObject* maybe_obj =
2697 AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
2698 if (!maybe_obj->ToObject(&obj)) return false;
2700 set_string_split_cache(FixedArray::cast(obj));
2702 // Allocate cache for external strings pointing to native source code.
2703 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2704 if (!maybe_obj->ToObject(&obj)) return false;
2706 set_natives_source_cache(FixedArray::cast(obj));
2708 // Handling of script id generation is in FACTORY->NewScript.
2709 set_last_script_id(undefined_value());
2711 // Initialize keyed lookup cache.
2712 isolate_->keyed_lookup_cache()->Clear();
2714 // Initialize context slot cache.
2715 isolate_->context_slot_cache()->Clear();
2717 // Initialize descriptor cache.
2718 isolate_->descriptor_lookup_cache()->Clear();
2720 // Initialize compilation cache.
2721 isolate_->compilation_cache()->Clear();
2727 Object* StringSplitCache::Lookup(
2728 FixedArray* cache, String* string, String* pattern) {
2729 if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
2730 uint32_t hash = string->Hash();
2731 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2732 ~(kArrayEntriesPerCacheEntry - 1));
2733 if (cache->get(index + kStringOffset) == string &&
2734 cache->get(index + kPatternOffset) == pattern) {
2735 return cache->get(index + kArrayOffset);
2737 index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2738 if (cache->get(index + kStringOffset) == string &&
2739 cache->get(index + kPatternOffset) == pattern) {
2740 return cache->get(index + kArrayOffset);
2742 return Smi::FromInt(0);
2746 void StringSplitCache::Enter(Heap* heap,
2750 FixedArray* array) {
2751 if (!string->IsSymbol() || !pattern->IsSymbol()) return;
2752 uint32_t hash = string->Hash();
2753 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2754 ~(kArrayEntriesPerCacheEntry - 1));
2755 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2756 cache->set(index + kStringOffset, string);
2757 cache->set(index + kPatternOffset, pattern);
2758 cache->set(index + kArrayOffset, array);
2761 ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2762 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2763 cache->set(index2 + kStringOffset, string);
2764 cache->set(index2 + kPatternOffset, pattern);
2765 cache->set(index2 + kArrayOffset, array);
2767 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2768 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2769 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2770 cache->set(index + kStringOffset, string);
2771 cache->set(index + kPatternOffset, pattern);
2772 cache->set(index + kArrayOffset, array);
2775 if (array->length() < 100) { // Limit how many new symbols we want to make.
2776 for (int i = 0; i < array->length(); i++) {
2777 String* str = String::cast(array->get(i));
2779 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2780 if (maybe_symbol->ToObject(&symbol)) {
2781 array->set(i, symbol);
2785 array->set_map_no_write_barrier(heap->fixed_cow_array_map());
2789 void StringSplitCache::Clear(FixedArray* cache) {
2790 for (int i = 0; i < kStringSplitCacheSize; i++) {
2791 cache->set(i, Smi::FromInt(0));
2796 MaybeObject* Heap::AllocateInitialNumberStringCache() {
2797 MaybeObject* maybe_obj =
2798 AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
2803 int Heap::FullSizeNumberStringCacheLength() {
2804 // Compute the size of the number string cache based on the max newspace size.
2805 // The number string cache has a minimum size based on twice the initial cache
2806 // size to ensure that it is bigger after being made 'full size'.
2807 int number_string_cache_size = max_semispace_size_ / 512;
2808 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2809 Min(0x4000, number_string_cache_size));
2810 // There is a string and a number per entry so the length is twice the number
2812 return number_string_cache_size * 2;
2816 void Heap::AllocateFullSizeNumberStringCache() {
2817 // The idea is to have a small number string cache in the snapshot to keep
2818 // boot-time memory usage down. If we expand the number string cache already
2819 // while creating the snapshot then that didn't work out.
2820 ASSERT(!Serializer::enabled());
2821 MaybeObject* maybe_obj =
2822 AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
2824 if (maybe_obj->ToObject(&new_cache)) {
2825 // We don't bother to repopulate the cache with entries from the old cache.
2826 // It will be repopulated soon enough with new strings.
2827 set_number_string_cache(FixedArray::cast(new_cache));
2829 // If allocation fails then we just return without doing anything. It is only
2830 // a cache, so best effort is OK here.
2834 void Heap::FlushNumberStringCache() {
2835 // Flush the number to string cache.
2836 int len = number_string_cache()->length();
2837 for (int i = 0; i < len; i++) {
2838 number_string_cache()->set_undefined(this, i);
2843 static inline int double_get_hash(double d) {
2844 DoubleRepresentation rep(d);
2845 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2849 static inline int smi_get_hash(Smi* smi) {
2850 return smi->value();
2854 Object* Heap::GetNumberStringCache(Object* number) {
2856 int mask = (number_string_cache()->length() >> 1) - 1;
2857 if (number->IsSmi()) {
2858 hash = smi_get_hash(Smi::cast(number)) & mask;
2860 hash = double_get_hash(number->Number()) & mask;
2862 Object* key = number_string_cache()->get(hash * 2);
2863 if (key == number) {
2864 return String::cast(number_string_cache()->get(hash * 2 + 1));
2865 } else if (key->IsHeapNumber() &&
2866 number->IsHeapNumber() &&
2867 key->Number() == number->Number()) {
2868 return String::cast(number_string_cache()->get(hash * 2 + 1));
2870 return undefined_value();
2874 void Heap::SetNumberStringCache(Object* number, String* string) {
2876 int mask = (number_string_cache()->length() >> 1) - 1;
2877 if (number->IsSmi()) {
2878 hash = smi_get_hash(Smi::cast(number)) & mask;
2880 hash = double_get_hash(number->Number()) & mask;
2882 if (number_string_cache()->get(hash * 2) != undefined_value() &&
2883 number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
2884 // The first time we have a hash collision, we move to the full sized
2885 // number string cache.
2886 AllocateFullSizeNumberStringCache();
2889 number_string_cache()->set(hash * 2, number);
2890 number_string_cache()->set(hash * 2 + 1, string);
2894 MaybeObject* Heap::NumberToString(Object* number,
2895 bool check_number_string_cache) {
2896 isolate_->counters()->number_to_string_runtime()->Increment();
2897 if (check_number_string_cache) {
2898 Object* cached = GetNumberStringCache(number);
2899 if (cached != undefined_value()) {
2905 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2907 if (number->IsSmi()) {
2908 int num = Smi::cast(number)->value();
2909 str = IntToCString(num, buffer);
2911 double num = HeapNumber::cast(number)->value();
2912 str = DoubleToCString(num, buffer);
2916 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2917 if (maybe_js_string->ToObject(&js_string)) {
2918 SetNumberStringCache(number, String::cast(js_string));
2920 return maybe_js_string;
2924 MaybeObject* Heap::Uint32ToString(uint32_t value,
2925 bool check_number_string_cache) {
2927 MaybeObject* maybe = NumberFromUint32(value);
2928 if (!maybe->To<Object>(&number)) return maybe;
2929 return NumberToString(number, check_number_string_cache);
2933 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2934 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2938 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2939 ExternalArrayType array_type) {
2940 switch (array_type) {
2941 case kExternalByteArray:
2942 return kExternalByteArrayMapRootIndex;
2943 case kExternalUnsignedByteArray:
2944 return kExternalUnsignedByteArrayMapRootIndex;
2945 case kExternalShortArray:
2946 return kExternalShortArrayMapRootIndex;
2947 case kExternalUnsignedShortArray:
2948 return kExternalUnsignedShortArrayMapRootIndex;
2949 case kExternalIntArray:
2950 return kExternalIntArrayMapRootIndex;
2951 case kExternalUnsignedIntArray:
2952 return kExternalUnsignedIntArrayMapRootIndex;
2953 case kExternalFloatArray:
2954 return kExternalFloatArrayMapRootIndex;
2955 case kExternalDoubleArray:
2956 return kExternalDoubleArrayMapRootIndex;
2957 case kExternalPixelArray:
2958 return kExternalPixelArrayMapRootIndex;
2961 return kUndefinedValueRootIndex;
2966 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
2967 // We need to distinguish the minus zero value and this cannot be
2968 // done after conversion to int. Doing this by comparing bit
2969 // patterns is faster than using fpclassify() et al.
2970 static const DoubleRepresentation minus_zero(-0.0);
2972 DoubleRepresentation rep(value);
2973 if (rep.bits == minus_zero.bits) {
2974 return AllocateHeapNumber(-0.0, pretenure);
2977 int int_value = FastD2I(value);
2978 if (value == int_value && Smi::IsValid(int_value)) {
2979 return Smi::FromInt(int_value);
2982 // Materialize the value in the heap.
2983 return AllocateHeapNumber(value, pretenure);
2987 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2988 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2989 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
2990 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2992 MaybeObject* maybe_result = Allocate(foreign_map(), space);
2993 if (!maybe_result->To(&result)) return maybe_result;
2994 result->set_foreign_address(address);
2999 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3000 SharedFunctionInfo* share;
3001 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3002 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3004 // Set pointer fields.
3005 share->set_name(name);
3006 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3007 share->set_code(illegal);
3008 share->set_scope_info(ScopeInfo::Empty());
3009 Code* construct_stub =
3010 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3011 share->set_construct_stub(construct_stub);
3012 share->set_instance_class_name(Object_symbol());
3013 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3014 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3015 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3016 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3017 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3018 share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3019 share->set_ast_node_count(0);
3020 share->set_deopt_counter(FLAG_deopt_every_n_times);
3021 share->set_ic_age(0);
3023 // Set integer fields (smi or int, depending on the architecture).
3024 share->set_length(0);
3025 share->set_formal_parameter_count(0);
3026 share->set_expected_nof_properties(0);
3027 share->set_num_literals(0);
3028 share->set_start_position_and_type(0);
3029 share->set_end_position(0);
3030 share->set_function_token_position(0);
3031 // All compiler hints default to false or 0.
3032 share->set_compiler_hints(0);
3033 share->set_this_property_assignments_count(0);
3034 share->set_opt_count(0);
3040 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3045 Object* stack_trace,
3046 Object* stack_frames) {
3048 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3049 if (!maybe_result->ToObject(&result)) return maybe_result;
3051 JSMessageObject* message = JSMessageObject::cast(result);
3052 message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3053 message->initialize_elements();
3054 message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3055 message->set_type(type);
3056 message->set_arguments(arguments);
3057 message->set_start_position(start_position);
3058 message->set_end_position(end_position);
3059 message->set_script(script);
3060 message->set_stack_trace(stack_trace);
3061 message->set_stack_frames(stack_frames);
3067 // Returns true for a character in a range. Both limits are inclusive.
3068 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3069 // This makes uses of the the unsigned wraparound.
3070 return character - from <= to - from;
3074 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3079 // Numeric strings have a different hash algorithm not known by
3080 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
3081 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3082 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
3084 // Now we know the length is 2, we might as well make use of that fact
3085 // when building the new string.
3086 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
3087 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
3089 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
3090 if (!maybe_result->ToObject(&result)) return maybe_result;
3092 char* dest = SeqAsciiString::cast(result)->GetChars();
3098 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3099 if (!maybe_result->ToObject(&result)) return maybe_result;
3101 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3109 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3110 int first_length = first->length();
3111 if (first_length == 0) {
3115 int second_length = second->length();
3116 if (second_length == 0) {
3120 int length = first_length + second_length;
3122 // Optimization for 2-byte strings often used as keys in a decompression
3123 // dictionary. Check whether we already have the string in the symbol
3124 // table to prevent creation of many unneccesary strings.
3126 unsigned c1 = first->Get(0);
3127 unsigned c2 = second->Get(0);
3128 return MakeOrFindTwoCharacterString(this, c1, c2);
3131 bool first_is_ascii = first->IsAsciiRepresentation();
3132 bool second_is_ascii = second->IsAsciiRepresentation();
3133 bool is_ascii = first_is_ascii && second_is_ascii;
3135 // Make sure that an out of memory exception is thrown if the length
3136 // of the new cons string is too large.
3137 if (length > String::kMaxLength || length < 0) {
3138 isolate()->context()->mark_out_of_memory();
3139 return Failure::OutOfMemoryException();
3142 bool is_ascii_data_in_two_byte_string = false;
3144 // At least one of the strings uses two-byte representation so we
3145 // can't use the fast case code for short ASCII strings below, but
3146 // we can try to save memory if all chars actually fit in ASCII.
3147 is_ascii_data_in_two_byte_string =
3148 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
3149 if (is_ascii_data_in_two_byte_string) {
3150 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3154 // If the resulting string is small make a flat string.
3155 if (length < ConsString::kMinLength) {
3156 // Note that neither of the two inputs can be a slice because:
3157 STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3158 ASSERT(first->IsFlat());
3159 ASSERT(second->IsFlat());
3162 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3163 if (!maybe_result->ToObject(&result)) return maybe_result;
3165 // Copy the characters into the new object.
3166 char* dest = SeqAsciiString::cast(result)->GetChars();
3169 if (first->IsExternalString()) {
3170 src = ExternalAsciiString::cast(first)->GetChars();
3172 src = SeqAsciiString::cast(first)->GetChars();
3174 for (int i = 0; i < first_length; i++) *dest++ = src[i];
3175 // Copy second part.
3176 if (second->IsExternalString()) {
3177 src = ExternalAsciiString::cast(second)->GetChars();
3179 src = SeqAsciiString::cast(second)->GetChars();
3181 for (int i = 0; i < second_length; i++) *dest++ = src[i];
3184 if (is_ascii_data_in_two_byte_string) {
3186 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3187 if (!maybe_result->ToObject(&result)) return maybe_result;
3189 // Copy the characters into the new object.
3190 char* dest = SeqAsciiString::cast(result)->GetChars();
3191 String::WriteToFlat(first, dest, 0, first_length);
3192 String::WriteToFlat(second, dest + first_length, 0, second_length);
3193 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3198 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3199 if (!maybe_result->ToObject(&result)) return maybe_result;
3201 // Copy the characters into the new object.
3202 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3203 String::WriteToFlat(first, dest, 0, first_length);
3204 String::WriteToFlat(second, dest + first_length, 0, second_length);
3209 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
3210 cons_ascii_string_map() : cons_string_map();
3213 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3214 if (!maybe_result->ToObject(&result)) return maybe_result;
3217 AssertNoAllocation no_gc;
3218 ConsString* cons_string = ConsString::cast(result);
3219 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3220 cons_string->set_length(length);
3221 cons_string->set_hash_field(String::kEmptyHashField);
3222 cons_string->set_first(first, mode);
3223 cons_string->set_second(second, mode);
3228 MaybeObject* Heap::AllocateSubString(String* buffer,
3231 PretenureFlag pretenure) {
3232 int length = end - start;
3234 return empty_string();
3235 } else if (length == 1) {
3236 return LookupSingleCharacterStringFromCode(buffer->Get(start));
3237 } else if (length == 2) {
3238 // Optimization for 2-byte strings often used as keys in a decompression
3239 // dictionary. Check whether we already have the string in the symbol
3240 // table to prevent creation of many unneccesary strings.
3241 unsigned c1 = buffer->Get(start);
3242 unsigned c2 = buffer->Get(start + 1);
3243 return MakeOrFindTwoCharacterString(this, c1, c2);
3246 // Make an attempt to flatten the buffer to reduce access time.
3247 buffer = buffer->TryFlattenGetString();
3249 if (!FLAG_string_slices ||
3250 !buffer->IsFlat() ||
3251 length < SlicedString::kMinLength ||
3252 pretenure == TENURED) {
3254 // WriteToFlat takes care of the case when an indirect string has a
3255 // different encoding from its underlying string. These encodings may
3256 // differ because of externalization.
3257 bool is_ascii = buffer->IsAsciiRepresentation();
3258 { MaybeObject* maybe_result = is_ascii
3259 ? AllocateRawAsciiString(length, pretenure)
3260 : AllocateRawTwoByteString(length, pretenure);
3261 if (!maybe_result->ToObject(&result)) return maybe_result;
3263 String* string_result = String::cast(result);
3264 // Copy the characters into the new object.
3266 ASSERT(string_result->IsAsciiRepresentation());
3267 char* dest = SeqAsciiString::cast(string_result)->GetChars();
3268 String::WriteToFlat(buffer, dest, start, end);
3270 ASSERT(string_result->IsTwoByteRepresentation());
3271 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3272 String::WriteToFlat(buffer, dest, start, end);
3277 ASSERT(buffer->IsFlat());
3279 if (FLAG_verify_heap) {
3280 buffer->StringVerify();
3285 // When slicing an indirect string we use its encoding for a newly created
3286 // slice and don't check the encoding of the underlying string. This is safe
3287 // even if the encodings are different because of externalization. If an
3288 // indirect ASCII string is pointing to a two-byte string, the two-byte char
3289 // codes of the underlying string must still fit into ASCII (because
3290 // externalization must not change char codes).
3291 { Map* map = buffer->IsAsciiRepresentation()
3292 ? sliced_ascii_string_map()
3293 : sliced_string_map();
3294 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3295 if (!maybe_result->ToObject(&result)) return maybe_result;
3298 AssertNoAllocation no_gc;
3299 SlicedString* sliced_string = SlicedString::cast(result);
3300 sliced_string->set_length(length);
3301 sliced_string->set_hash_field(String::kEmptyHashField);
3302 if (buffer->IsConsString()) {
3303 ConsString* cons = ConsString::cast(buffer);
3304 ASSERT(cons->second()->length() == 0);
3305 sliced_string->set_parent(cons->first());
3306 sliced_string->set_offset(start);
3307 } else if (buffer->IsSlicedString()) {
3308 // Prevent nesting sliced strings.
3309 SlicedString* parent_slice = SlicedString::cast(buffer);
3310 sliced_string->set_parent(parent_slice->parent());
3311 sliced_string->set_offset(start + parent_slice->offset());
3313 sliced_string->set_parent(buffer);
3314 sliced_string->set_offset(start);
3316 ASSERT(sliced_string->parent()->IsSeqString() ||
3317 sliced_string->parent()->IsExternalString());
3322 MaybeObject* Heap::AllocateExternalStringFromAscii(
3323 const ExternalAsciiString::Resource* resource) {
3324 size_t length = resource->length();
3325 if (length > static_cast<size_t>(String::kMaxLength)) {
3326 isolate()->context()->mark_out_of_memory();
3327 return Failure::OutOfMemoryException();
3330 ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
3332 Map* map = external_ascii_string_map();
3334 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3335 if (!maybe_result->ToObject(&result)) return maybe_result;
3338 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3339 external_string->set_length(static_cast<int>(length));
3340 external_string->set_hash_field(String::kEmptyHashField);
3341 external_string->set_resource(resource);
3347 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3348 const ExternalTwoByteString::Resource* resource) {
3349 size_t length = resource->length();
3350 if (length > static_cast<size_t>(String::kMaxLength)) {
3351 isolate()->context()->mark_out_of_memory();
3352 return Failure::OutOfMemoryException();
3355 // For small strings we check whether the resource contains only
3356 // ASCII characters. If yes, we use a different string map.
3357 static const size_t kAsciiCheckLengthLimit = 32;
3358 bool is_ascii = length <= kAsciiCheckLengthLimit &&
3359 String::IsAscii(resource->data(), static_cast<int>(length));
3360 Map* map = is_ascii ?
3361 external_string_with_ascii_data_map() : external_string_map();
3363 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3364 if (!maybe_result->ToObject(&result)) return maybe_result;
3367 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3368 external_string->set_length(static_cast<int>(length));
3369 external_string->set_hash_field(String::kEmptyHashField);
3370 external_string->set_resource(resource);
3376 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3377 if (code <= String::kMaxAsciiCharCode) {
3378 Object* value = single_character_string_cache()->get(code);
3379 if (value != undefined_value()) return value;
3382 buffer[0] = static_cast<char>(code);
3384 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3386 if (!maybe_result->ToObject(&result)) return maybe_result;
3387 single_character_string_cache()->set(code, result);
3392 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3393 if (!maybe_result->ToObject(&result)) return maybe_result;
3395 String* answer = String::cast(result);
3396 answer->Set(0, code);
3401 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3402 if (length < 0 || length > ByteArray::kMaxLength) {
3403 return Failure::OutOfMemoryException();
3405 if (pretenure == NOT_TENURED) {
3406 return AllocateByteArray(length);
3408 int size = ByteArray::SizeFor(length);
3410 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3411 ? old_data_space_->AllocateRaw(size)
3412 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3413 if (!maybe_result->ToObject(&result)) return maybe_result;
3416 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3418 reinterpret_cast<ByteArray*>(result)->set_length(length);
3423 MaybeObject* Heap::AllocateByteArray(int length) {
3424 if (length < 0 || length > ByteArray::kMaxLength) {
3425 return Failure::OutOfMemoryException();
3427 int size = ByteArray::SizeFor(length);
3428 AllocationSpace space =
3429 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3431 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3432 if (!maybe_result->ToObject(&result)) return maybe_result;
3435 reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3437 reinterpret_cast<ByteArray*>(result)->set_length(length);
3442 void Heap::CreateFillerObjectAt(Address addr, int size) {
3443 if (size == 0) return;
3444 HeapObject* filler = HeapObject::FromAddress(addr);
3445 if (size == kPointerSize) {
3446 filler->set_map_no_write_barrier(one_pointer_filler_map());
3447 } else if (size == 2 * kPointerSize) {
3448 filler->set_map_no_write_barrier(two_pointer_filler_map());
3450 filler->set_map_no_write_barrier(free_space_map());
3451 FreeSpace::cast(filler)->set_size(size);
3456 MaybeObject* Heap::AllocateExternalArray(int length,
3457 ExternalArrayType array_type,
3458 void* external_pointer,
3459 PretenureFlag pretenure) {
3460 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3462 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3465 if (!maybe_result->ToObject(&result)) return maybe_result;
3468 reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3469 MapForExternalArrayType(array_type));
3470 reinterpret_cast<ExternalArray*>(result)->set_length(length);
3471 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3478 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3480 Handle<Object> self_reference,
3482 // Allocate ByteArray before the Code object, so that we do not risk
3483 // leaving uninitialized Code object (and breaking the heap).
3484 ByteArray* reloc_info;
3485 MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3486 if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3489 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3490 int obj_size = Code::SizeFor(body_size);
3491 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3492 MaybeObject* maybe_result;
3493 // Large code objects and code objects which should stay at a fixed address
3494 // are allocated in large object space.
3495 if (obj_size > code_space()->AreaSize() || immovable) {
3496 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3498 maybe_result = code_space_->AllocateRaw(obj_size);
3502 if (!maybe_result->ToObject(&result)) return maybe_result;
3504 // Initialize the object
3505 HeapObject::cast(result)->set_map_no_write_barrier(code_map());
3506 Code* code = Code::cast(result);
3507 ASSERT(!isolate_->code_range()->exists() ||
3508 isolate_->code_range()->contains(code->address()));
3509 code->set_instruction_size(desc.instr_size);
3510 code->set_relocation_info(reloc_info);
3511 code->set_flags(flags);
3512 if (code->is_call_stub() || code->is_keyed_call_stub()) {
3513 code->set_check_type(RECEIVER_MAP_CHECK);
3515 code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3516 code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
3517 code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3518 code->set_gc_metadata(Smi::FromInt(0));
3519 code->set_ic_age(global_ic_age_);
3520 // Allow self references to created code object by patching the handle to
3521 // point to the newly allocated Code object.
3522 if (!self_reference.is_null()) {
3523 *(self_reference.location()) = code;
3525 // Migrate generated code.
3526 // The generated code can contain Object** values (typically from handles)
3527 // that are dereferenced during the copy to point directly to the actual heap
3528 // objects. These pointers can include references to the code object itself,
3529 // through the self_reference parameter.
3530 code->CopyFrom(desc);
3533 if (FLAG_verify_heap) {
3541 MaybeObject* Heap::CopyCode(Code* code) {
3542 // Allocate an object the same size as the code object.
3543 int obj_size = code->Size();
3544 MaybeObject* maybe_result;
3545 if (obj_size > code_space()->AreaSize()) {
3546 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3548 maybe_result = code_space_->AllocateRaw(obj_size);
3552 if (!maybe_result->ToObject(&result)) return maybe_result;
3554 // Copy code object.
3555 Address old_addr = code->address();
3556 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3557 CopyBlock(new_addr, old_addr, obj_size);
3558 // Relocate the copy.
3559 Code* new_code = Code::cast(result);
3560 ASSERT(!isolate_->code_range()->exists() ||
3561 isolate_->code_range()->contains(code->address()));
3562 new_code->Relocate(new_addr - old_addr);
3567 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3568 // Allocate ByteArray before the Code object, so that we do not risk
3569 // leaving uninitialized Code object (and breaking the heap).
3570 Object* reloc_info_array;
3571 { MaybeObject* maybe_reloc_info_array =
3572 AllocateByteArray(reloc_info.length(), TENURED);
3573 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3574 return maybe_reloc_info_array;
3578 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3580 int new_obj_size = Code::SizeFor(new_body_size);
3582 Address old_addr = code->address();
3584 size_t relocation_offset =
3585 static_cast<size_t>(code->instruction_end() - old_addr);
3587 MaybeObject* maybe_result;
3588 if (new_obj_size > code_space()->AreaSize()) {
3589 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3591 maybe_result = code_space_->AllocateRaw(new_obj_size);
3595 if (!maybe_result->ToObject(&result)) return maybe_result;
3597 // Copy code object.
3598 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3600 // Copy header and instructions.
3601 memcpy(new_addr, old_addr, relocation_offset);
3603 Code* new_code = Code::cast(result);
3604 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3606 // Copy patched rinfo.
3607 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3609 // Relocate the copy.
3610 ASSERT(!isolate_->code_range()->exists() ||
3611 isolate_->code_range()->contains(code->address()));
3612 new_code->Relocate(new_addr - old_addr);
3615 if (FLAG_verify_heap) {
3623 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3624 ASSERT(gc_state_ == NOT_IN_GC);
3625 ASSERT(map->instance_type() != MAP_TYPE);
3626 // If allocation failures are disallowed, we may allocate in a different
3627 // space when new space is full and the object is not a large object.
3628 AllocationSpace retry_space =
3629 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3631 { MaybeObject* maybe_result =
3632 AllocateRaw(map->instance_size(), space, retry_space);
3633 if (!maybe_result->ToObject(&result)) return maybe_result;
3635 // No need for write barrier since object is white and map is in old space.
3636 HeapObject::cast(result)->set_map_no_write_barrier(map);
3641 void Heap::InitializeFunction(JSFunction* function,
3642 SharedFunctionInfo* shared,
3643 Object* prototype) {
3644 ASSERT(!prototype->IsMap());
3645 function->initialize_properties();
3646 function->initialize_elements();
3647 function->set_shared(shared);
3648 function->set_code(shared->code());
3649 function->set_prototype_or_initial_map(prototype);
3650 function->set_context(undefined_value());
3651 function->set_literals_or_bindings(empty_fixed_array());
3652 function->set_next_function_link(undefined_value());
3656 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3657 // Allocate the prototype. Make sure to use the object function
3658 // from the function's context, since the function can be from a
3659 // different context.
3660 JSFunction* object_function =
3661 function->context()->global_context()->object_function();
3663 // Each function prototype gets a copy of the object function map.
3664 // This avoid unwanted sharing of maps between prototypes of different
3667 ASSERT(object_function->has_initial_map());
3668 { MaybeObject* maybe_map =
3669 object_function->initial_map()->CopyDropTransitions();
3670 if (!maybe_map->To<Map>(&new_map)) return maybe_map;
3673 { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3674 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3676 // When creating the prototype for the function we must set its
3677 // constructor to the function.
3679 { MaybeObject* maybe_result =
3680 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3681 constructor_symbol(), function, DONT_ENUM);
3682 if (!maybe_result->ToObject(&result)) return maybe_result;
3688 MaybeObject* Heap::AllocateFunction(Map* function_map,
3689 SharedFunctionInfo* shared,
3691 PretenureFlag pretenure) {
3692 AllocationSpace space =
3693 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3695 { MaybeObject* maybe_result = Allocate(function_map, space);
3696 if (!maybe_result->ToObject(&result)) return maybe_result;
3698 InitializeFunction(JSFunction::cast(result), shared, prototype);
3703 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3704 // To get fast allocation and map sharing for arguments objects we
3705 // allocate them based on an arguments boilerplate.
3707 JSObject* boilerplate;
3708 int arguments_object_size;
3709 bool strict_mode_callee = callee->IsJSFunction() &&
3710 !JSFunction::cast(callee)->shared()->is_classic_mode();
3711 if (strict_mode_callee) {
3713 isolate()->context()->global_context()->
3714 strict_mode_arguments_boilerplate();
3715 arguments_object_size = kArgumentsObjectSizeStrict;
3718 isolate()->context()->global_context()->arguments_boilerplate();
3719 arguments_object_size = kArgumentsObjectSize;
3722 // This calls Copy directly rather than using Heap::AllocateRaw so we
3723 // duplicate the check here.
3724 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3726 // Check that the size of the boilerplate matches our
3727 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3728 // on the size being a known constant.
3729 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3731 // Do the allocation.
3733 { MaybeObject* maybe_result =
3734 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3735 if (!maybe_result->ToObject(&result)) return maybe_result;
3738 // Copy the content. The arguments boilerplate doesn't have any
3739 // fields that point to new space so it's safe to skip the write
3741 CopyBlock(HeapObject::cast(result)->address(),
3742 boilerplate->address(),
3743 JSObject::kHeaderSize);
3745 // Set the length property.
3746 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3747 Smi::FromInt(length),
3748 SKIP_WRITE_BARRIER);
3749 // Set the callee property for non-strict mode arguments object only.
3750 if (!strict_mode_callee) {
3751 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3755 // Check the state of the object
3756 ASSERT(JSObject::cast(result)->HasFastProperties());
3757 ASSERT(JSObject::cast(result)->HasFastObjectElements());
3763 static bool HasDuplicates(DescriptorArray* descriptors) {
3764 int count = descriptors->number_of_descriptors();
3766 String* prev_key = descriptors->GetKey(0);
3767 for (int i = 1; i != count; i++) {
3768 String* current_key = descriptors->GetKey(i);
3769 if (prev_key == current_key) return true;
3770 prev_key = current_key;
3777 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3778 ASSERT(!fun->has_initial_map());
3780 // First create a new map with the size and number of in-object properties
3781 // suggested by the function.
3782 int instance_size = fun->shared()->CalculateInstanceSize();
3783 int in_object_properties = fun->shared()->CalculateInObjectProperties();
3785 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
3786 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3789 // Fetch or allocate prototype.
3791 if (fun->has_instance_prototype()) {
3792 prototype = fun->instance_prototype();
3794 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3795 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3798 Map* map = Map::cast(map_obj);
3799 map->set_inobject_properties(in_object_properties);
3800 map->set_unused_property_fields(in_object_properties);
3801 map->set_prototype(prototype);
3802 ASSERT(map->has_fast_object_elements());
3804 // If the function has only simple this property assignments add
3805 // field descriptors for these to the initial map as the object
3806 // cannot be constructed without having these properties. Guard by
3807 // the inline_new flag so we only change the map if we generate a
3808 // specialized construct stub.
3809 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3810 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3811 int count = fun->shared()->this_property_assignments_count();
3812 if (count > in_object_properties) {
3813 // Inline constructor can only handle inobject properties.
3814 fun->shared()->ForbidInlineConstructor();
3816 DescriptorArray* descriptors;
3817 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3818 if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
3819 return maybe_descriptors_obj;
3822 DescriptorArray::WhitenessWitness witness(descriptors);
3823 for (int i = 0; i < count; i++) {
3824 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3825 ASSERT(name->IsSymbol());
3826 FieldDescriptor field(name, i, NONE);
3827 field.SetEnumerationIndex(i);
3828 descriptors->Set(i, &field, witness);
3830 descriptors->SetNextEnumerationIndex(count);
3831 descriptors->SortUnchecked(witness);
3833 // The descriptors may contain duplicates because the compiler does not
3834 // guarantee the uniqueness of property names (it would have required
3835 // quadratic time). Once the descriptors are sorted we can check for
3836 // duplicates in linear time.
3837 if (HasDuplicates(descriptors)) {
3838 fun->shared()->ForbidInlineConstructor();
3840 map->set_instance_descriptors(descriptors);
3841 map->set_pre_allocated_property_fields(count);
3842 map->set_unused_property_fields(in_object_properties - count);
3847 fun->shared()->StartInobjectSlackTracking(map);
3853 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3854 FixedArray* properties,
3856 obj->set_properties(properties);
3857 obj->initialize_elements();
3858 // TODO(1240798): Initialize the object's body using valid initial values
3859 // according to the object's initial map. For example, if the map's
3860 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3861 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3862 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3863 // verification code has to cope with (temporarily) invalid objects. See
3864 // for example, JSArray::JSArrayVerify).
3866 // We cannot always fill with one_pointer_filler_map because objects
3867 // created from API functions expect their internal fields to be initialized
3868 // with undefined_value.
3869 // Pre-allocated fields need to be initialized with undefined_value as well
3870 // so that object accesses before the constructor completes (e.g. in the
3871 // debugger) will not cause a crash.
3872 if (map->constructor()->IsJSFunction() &&
3873 JSFunction::cast(map->constructor())->shared()->
3874 IsInobjectSlackTrackingInProgress()) {
3875 // We might want to shrink the object later.
3876 ASSERT(obj->GetInternalFieldCount() == 0);
3877 filler = Heap::one_pointer_filler_map();
3879 filler = Heap::undefined_value();
3881 obj->InitializeBody(map, Heap::undefined_value(), filler);
3885 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3886 // JSFunctions should be allocated using AllocateFunction to be
3887 // properly initialized.
3888 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3890 // Both types of global objects should be allocated using
3891 // AllocateGlobalObject to be properly initialized.
3892 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3893 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3895 // Allocate the backing storage for the properties.
3897 map->pre_allocated_property_fields() +
3898 map->unused_property_fields() -
3899 map->inobject_properties();
3900 ASSERT(prop_size >= 0);
3902 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3903 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3906 // Allocate the JSObject.
3907 AllocationSpace space =
3908 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3909 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
3911 { MaybeObject* maybe_obj = Allocate(map, space);
3912 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3915 // Initialize the JSObject.
3916 InitializeJSObjectFromMap(JSObject::cast(obj),
3917 FixedArray::cast(properties),
3919 ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
3924 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3925 PretenureFlag pretenure) {
3926 // Allocate the initial map if absent.
3927 if (!constructor->has_initial_map()) {
3928 Object* initial_map;
3929 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3930 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3932 constructor->set_initial_map(Map::cast(initial_map));
3933 Map::cast(initial_map)->set_constructor(constructor);
3935 // Allocate the object based on the constructors initial map.
3936 MaybeObject* result = AllocateJSObjectFromMap(
3937 constructor->initial_map(), pretenure);
3939 // Make sure result is NOT a global object if valid.
3940 Object* non_failure;
3941 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3947 MaybeObject* Heap::AllocateJSModule() {
3948 // Allocate a fresh map. Modules do not have a prototype.
3950 MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
3951 if (!maybe_map->To(&map)) return maybe_map;
3952 // Allocate the object based on the map.
3953 return AllocateJSObjectFromMap(map, TENURED);
3957 MaybeObject* Heap::AllocateJSArrayAndStorage(
3958 ElementsKind elements_kind,
3961 ArrayStorageAllocationMode mode,
3962 PretenureFlag pretenure) {
3963 ASSERT(capacity >= length);
3964 if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
3965 elements_kind = GetHoleyElementsKind(elements_kind);
3967 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
3969 if (!maybe_array->To(&array)) return maybe_array;
3971 if (capacity == 0) {
3972 array->set_length(Smi::FromInt(0));
3973 array->set_elements(empty_fixed_array());
3977 FixedArrayBase* elms;
3978 MaybeObject* maybe_elms = NULL;
3979 if (elements_kind == FAST_DOUBLE_ELEMENTS) {
3980 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
3981 maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
3983 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
3984 maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
3987 ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
3988 if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
3989 maybe_elms = AllocateUninitializedFixedArray(capacity);
3991 ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
3992 maybe_elms = AllocateFixedArrayWithHoles(capacity);
3995 if (!maybe_elms->To(&elms)) return maybe_elms;
3997 array->set_elements(elms);
3998 array->set_length(Smi::FromInt(length));
4003 MaybeObject* Heap::AllocateJSArrayWithElements(
4004 FixedArrayBase* elements,
4005 ElementsKind elements_kind,
4006 PretenureFlag pretenure) {
4007 MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4009 if (!maybe_array->To(&array)) return maybe_array;
4011 array->set_elements(elements);
4012 array->set_length(Smi::FromInt(elements->length()));
4013 array->ValidateElements();
4018 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4020 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4021 // maps. Will probably depend on the identity of the handler object, too.
4023 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4024 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4025 map->set_prototype(prototype);
4027 // Allocate the proxy object.
4029 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4030 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4031 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4032 result->set_handler(handler);
4033 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4038 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4040 Object* construct_trap,
4041 Object* prototype) {
4043 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4044 // maps. Will probably depend on the identity of the handler object, too.
4046 MaybeObject* maybe_map_obj =
4047 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4048 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4049 map->set_prototype(prototype);
4051 // Allocate the proxy object.
4052 JSFunctionProxy* result;
4053 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4054 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4055 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4056 result->set_handler(handler);
4057 result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4058 result->set_call_trap(call_trap);
4059 result->set_construct_trap(construct_trap);
4064 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4065 ASSERT(constructor->has_initial_map());
4066 Map* map = constructor->initial_map();
4068 // Make sure no field properties are described in the initial map.
4069 // This guarantees us that normalizing the properties does not
4070 // require us to change property values to JSGlobalPropertyCells.
4071 ASSERT(map->NextFreePropertyIndex() == 0);
4073 // Make sure we don't have a ton of pre-allocated slots in the
4074 // global objects. They will be unused once we normalize the object.
4075 ASSERT(map->unused_property_fields() == 0);
4076 ASSERT(map->inobject_properties() == 0);
4078 // Initial size of the backing store to avoid resize of the storage during
4079 // bootstrapping. The size differs between the JS global object ad the
4081 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4083 // Allocate a dictionary object for backing storage.
4085 { MaybeObject* maybe_obj =
4086 StringDictionary::Allocate(
4087 map->NumberOfDescribedProperties() * 2 + initial_size);
4088 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4090 StringDictionary* dictionary = StringDictionary::cast(obj);
4092 // The global object might be created from an object template with accessors.
4093 // Fill these accessors into the dictionary.
4094 DescriptorArray* descs = map->instance_descriptors();
4095 for (int i = 0; i < descs->number_of_descriptors(); i++) {
4096 PropertyDetails details = descs->GetDetails(i);
4097 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4099 PropertyDetails(details.attributes(), CALLBACKS, details.index());
4100 Object* value = descs->GetCallbacksObject(i);
4101 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4102 if (!maybe_value->ToObject(&value)) return maybe_value;
4106 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
4107 if (!maybe_result->ToObject(&result)) return maybe_result;
4109 dictionary = StringDictionary::cast(result);
4112 // Allocate the global object and initialize it with the backing store.
4113 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
4114 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4116 JSObject* global = JSObject::cast(obj);
4117 InitializeJSObjectFromMap(global, dictionary, map);
4119 // Create a new map for the global object.
4120 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
4121 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4123 Map* new_map = Map::cast(obj);
4125 // Set up the global object as a normalized object.
4126 global->set_map(new_map);
4127 global->map()->clear_instance_descriptors();
4128 global->set_properties(dictionary);
4130 // Make sure result is a global object with properties in dictionary.
4131 ASSERT(global->IsGlobalObject());
4132 ASSERT(!global->HasFastProperties());
4137 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4138 // Never used to copy functions. If functions need to be copied we
4139 // have to be careful to clear the literals array.
4140 SLOW_ASSERT(!source->IsJSFunction());
4143 Map* map = source->map();
4144 int object_size = map->instance_size();
4147 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4149 // If we're forced to always allocate, we use the general allocation
4150 // functions which may leave us with an object in old space.
4151 if (always_allocate()) {
4152 { MaybeObject* maybe_clone =
4153 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4154 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4156 Address clone_address = HeapObject::cast(clone)->address();
4157 CopyBlock(clone_address,
4160 // Update write barrier for all fields that lie beyond the header.
4161 RecordWrites(clone_address,
4162 JSObject::kHeaderSize,
4163 (object_size - JSObject::kHeaderSize) / kPointerSize);
4165 wb_mode = SKIP_WRITE_BARRIER;
4166 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4167 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4169 SLOW_ASSERT(InNewSpace(clone));
4170 // Since we know the clone is allocated in new space, we can copy
4171 // the contents without worrying about updating the write barrier.
4172 CopyBlock(HeapObject::cast(clone)->address(),
4178 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4179 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4180 FixedArray* properties = FixedArray::cast(source->properties());
4181 // Update elements if necessary.
4182 if (elements->length() > 0) {
4184 { MaybeObject* maybe_elem;
4185 if (elements->map() == fixed_cow_array_map()) {
4186 maybe_elem = FixedArray::cast(elements);
4187 } else if (source->HasFastDoubleElements()) {
4188 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4190 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4192 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4194 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4196 // Update properties if necessary.
4197 if (properties->length() > 0) {
4199 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4200 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4202 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4204 // Return the new clone.
4209 MaybeObject* Heap::ReinitializeJSReceiver(
4210 JSReceiver* object, InstanceType type, int size) {
4211 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4213 // Allocate fresh map.
4214 // TODO(rossberg): Once we optimize proxies, cache these maps.
4216 MaybeObject* maybe = AllocateMap(type, size);
4217 if (!maybe->To<Map>(&map)) return maybe;
4219 // Check that the receiver has at least the size of the fresh object.
4220 int size_difference = object->map()->instance_size() - map->instance_size();
4221 ASSERT(size_difference >= 0);
4223 map->set_prototype(object->map()->prototype());
4225 // Allocate the backing storage for the properties.
4226 int prop_size = map->unused_property_fields() - map->inobject_properties();
4228 maybe = AllocateFixedArray(prop_size, TENURED);
4229 if (!maybe->ToObject(&properties)) return maybe;
4231 // Functions require some allocation, which might fail here.
4232 SharedFunctionInfo* shared = NULL;
4233 if (type == JS_FUNCTION_TYPE) {
4235 maybe = LookupAsciiSymbol("<freezing call trap>");
4236 if (!maybe->To<String>(&name)) return maybe;
4237 maybe = AllocateSharedFunctionInfo(name);
4238 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4241 // Because of possible retries of this function after failure,
4242 // we must NOT fail after this point, where we have changed the type!
4244 // Reset the map for the object.
4245 object->set_map(map);
4246 JSObject* jsobj = JSObject::cast(object);
4248 // Reinitialize the object from the constructor map.
4249 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4251 // Functions require some minimal initialization.
4252 if (type == JS_FUNCTION_TYPE) {
4253 map->set_function_with_prototype(true);
4254 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4255 JSFunction::cast(object)->set_context(
4256 isolate()->context()->global_context());
4259 // Put in filler if the new object is smaller than the old.
4260 if (size_difference > 0) {
4261 CreateFillerObjectAt(
4262 object->address() + map->instance_size(), size_difference);
4269 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4270 JSGlobalProxy* object) {
4271 ASSERT(constructor->has_initial_map());
4272 Map* map = constructor->initial_map();
4274 // Check that the already allocated object has the same size and type as
4275 // objects allocated using the constructor.
4276 ASSERT(map->instance_size() == object->map()->instance_size());
4277 ASSERT(map->instance_type() == object->map()->instance_type());
4279 // Allocate the backing storage for the properties.
4280 int prop_size = map->unused_property_fields() - map->inobject_properties();
4282 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4283 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4286 // Reset the map for the object.
4287 object->set_map(constructor->initial_map());
4289 // Reinitialize the object from the constructor map.
4290 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4295 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
4296 PretenureFlag pretenure) {
4297 if (string.length() == 1) {
4298 return Heap::LookupSingleCharacterStringFromCode(string[0]);
4301 { MaybeObject* maybe_result =
4302 AllocateRawAsciiString(string.length(), pretenure);
4303 if (!maybe_result->ToObject(&result)) return maybe_result;
4306 // Copy the characters into the new object.
4307 SeqAsciiString* string_result = SeqAsciiString::cast(result);
4308 for (int i = 0; i < string.length(); i++) {
4309 string_result->SeqAsciiStringSet(i, string[i]);
4315 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4316 PretenureFlag pretenure) {
4317 // Count the number of characters in the UTF-8 string and check if
4318 // it is an ASCII string.
4319 Access<UnicodeCache::Utf8Decoder>
4320 decoder(isolate_->unicode_cache()->utf8_decoder());
4321 decoder->Reset(string.start(), string.length());
4323 while (decoder->has_more()) {
4324 uint32_t r = decoder->GetNext();
4325 if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
4333 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4334 if (!maybe_result->ToObject(&result)) return maybe_result;
4337 // Convert and copy the characters into the new object.
4338 String* string_result = String::cast(result);
4339 decoder->Reset(string.start(), string.length());
4342 uint32_t r = decoder->GetNext();
4343 if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4344 string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
4345 string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
4347 string_result->Set(i++, r);
4354 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4355 PretenureFlag pretenure) {
4356 // Check if the string is an ASCII string.
4357 MaybeObject* maybe_result;
4358 if (String::IsAscii(string.start(), string.length())) {
4359 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
4360 } else { // It's not an ASCII string.
4361 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
4364 if (!maybe_result->ToObject(&result)) return maybe_result;
4366 // Copy the characters into the new object, which may be either ASCII or
4368 String* string_result = String::cast(result);
4369 for (int i = 0; i < string.length(); i++) {
4370 string_result->Set(i, string[i]);
4376 Map* Heap::SymbolMapForString(String* string) {
4377 // If the string is in new space it cannot be used as a symbol.
4378 if (InNewSpace(string)) return NULL;
4380 // Find the corresponding symbol map for strings.
4381 switch (string->map()->instance_type()) {
4382 case STRING_TYPE: return symbol_map();
4383 case ASCII_STRING_TYPE: return ascii_symbol_map();
4384 case CONS_STRING_TYPE: return cons_symbol_map();
4385 case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4386 case EXTERNAL_STRING_TYPE: return external_symbol_map();
4387 case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4388 case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4389 return external_symbol_with_ascii_data_map();
4390 case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4391 case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4392 return short_external_ascii_symbol_map();
4393 case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
4394 return short_external_symbol_with_ascii_data_map();
4395 default: return NULL; // No match found.
4400 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
4402 uint32_t hash_field) {
4404 // Ensure the chars matches the number of characters in the buffer.
4405 ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
4406 // Determine whether the string is ASCII.
4407 bool is_ascii = true;
4408 while (buffer->has_more()) {
4409 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4416 // Compute map and object size.
4421 if (chars > SeqAsciiString::kMaxLength) {
4422 return Failure::OutOfMemoryException();
4424 map = ascii_symbol_map();
4425 size = SeqAsciiString::SizeFor(chars);
4427 if (chars > SeqTwoByteString::kMaxLength) {
4428 return Failure::OutOfMemoryException();
4431 size = SeqTwoByteString::SizeFor(chars);
4436 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4437 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4438 : old_data_space_->AllocateRaw(size);
4439 if (!maybe_result->ToObject(&result)) return maybe_result;
4442 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4443 // Set length and hash fields of the allocated string.
4444 String* answer = String::cast(result);
4445 answer->set_length(chars);
4446 answer->set_hash_field(hash_field);
4448 ASSERT_EQ(size, answer->Size());
4450 // Fill in the characters.
4453 uint32_t character = buffer->GetNext();
4454 if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4455 answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
4456 answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
4458 answer->Set(i++, character);
4465 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4466 if (length < 0 || length > SeqAsciiString::kMaxLength) {
4467 return Failure::OutOfMemoryException();
4470 int size = SeqAsciiString::SizeFor(length);
4471 ASSERT(size <= SeqAsciiString::kMaxSize);
4473 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4474 AllocationSpace retry_space = OLD_DATA_SPACE;
4476 if (space == NEW_SPACE) {
4477 if (size > kMaxObjectSizeInNewSpace) {
4478 // Allocate in large object space, retry space will be ignored.
4480 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4481 // Allocate in new space, retry in large object space.
4482 retry_space = LO_SPACE;
4484 } else if (space == OLD_DATA_SPACE &&
4485 size > Page::kMaxNonCodeHeapObjectSize) {
4489 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4490 if (!maybe_result->ToObject(&result)) return maybe_result;
4493 // Partially initialize the object.
4494 HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4495 String::cast(result)->set_length(length);
4496 String::cast(result)->set_hash_field(String::kEmptyHashField);
4497 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4500 if (FLAG_verify_heap) {
4501 // Initialize string's content to ensure ASCII-ness (character range 0-127)
4502 // as required when verifying the heap.
4503 char* dest = SeqAsciiString::cast(result)->GetChars();
4504 memset(dest, 0x0F, length * kCharSize);
4512 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4513 PretenureFlag pretenure) {
4514 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4515 return Failure::OutOfMemoryException();
4517 int size = SeqTwoByteString::SizeFor(length);
4518 ASSERT(size <= SeqTwoByteString::kMaxSize);
4519 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4520 AllocationSpace retry_space = OLD_DATA_SPACE;
4522 if (space == NEW_SPACE) {
4523 if (size > kMaxObjectSizeInNewSpace) {
4524 // Allocate in large object space, retry space will be ignored.
4526 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4527 // Allocate in new space, retry in large object space.
4528 retry_space = LO_SPACE;
4530 } else if (space == OLD_DATA_SPACE &&
4531 size > Page::kMaxNonCodeHeapObjectSize) {
4535 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4536 if (!maybe_result->ToObject(&result)) return maybe_result;
4539 // Partially initialize the object.
4540 HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4541 String::cast(result)->set_length(length);
4542 String::cast(result)->set_hash_field(String::kEmptyHashField);
4543 ASSERT_EQ(size, HeapObject::cast(result)->Size());
4548 MaybeObject* Heap::AllocateJSArray(
4549 ElementsKind elements_kind,
4550 PretenureFlag pretenure) {
4551 Context* global_context = isolate()->context()->global_context();
4552 JSFunction* array_function = global_context->array_function();
4553 Map* map = array_function->initial_map();
4554 Object* maybe_map_array = global_context->js_array_maps();
4555 if (!maybe_map_array->IsUndefined()) {
4556 Object* maybe_transitioned_map =
4557 FixedArray::cast(maybe_map_array)->get(elements_kind);
4558 if (!maybe_transitioned_map->IsUndefined()) {
4559 map = Map::cast(maybe_transitioned_map);
4563 return AllocateJSObjectFromMap(map, pretenure);
4567 MaybeObject* Heap::AllocateEmptyFixedArray() {
4568 int size = FixedArray::SizeFor(0);
4570 { MaybeObject* maybe_result =
4571 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4572 if (!maybe_result->ToObject(&result)) return maybe_result;
4574 // Initialize the object.
4575 reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4577 reinterpret_cast<FixedArray*>(result)->set_length(0);
4582 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4583 if (length < 0 || length > FixedArray::kMaxLength) {
4584 return Failure::OutOfMemoryException();
4587 // Use the general function if we're forced to always allocate.
4588 if (always_allocate()) return AllocateFixedArray(length, TENURED);
4589 // Allocate the raw data for a fixed array.
4590 int size = FixedArray::SizeFor(length);
4591 return size <= kMaxObjectSizeInNewSpace
4592 ? new_space_.AllocateRaw(size)
4593 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4597 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4598 int len = src->length();
4600 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4601 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4603 if (InNewSpace(obj)) {
4604 HeapObject* dst = HeapObject::cast(obj);
4605 dst->set_map_no_write_barrier(map);
4606 CopyBlock(dst->address() + kPointerSize,
4607 src->address() + kPointerSize,
4608 FixedArray::SizeFor(len) - kPointerSize);
4611 HeapObject::cast(obj)->set_map_no_write_barrier(map);
4612 FixedArray* result = FixedArray::cast(obj);
4613 result->set_length(len);
4616 AssertNoAllocation no_gc;
4617 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4618 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4623 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4625 int len = src->length();
4627 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4628 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4630 HeapObject* dst = HeapObject::cast(obj);
4631 dst->set_map_no_write_barrier(map);
4633 dst->address() + FixedDoubleArray::kLengthOffset,
4634 src->address() + FixedDoubleArray::kLengthOffset,
4635 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4640 MaybeObject* Heap::AllocateFixedArray(int length) {
4641 ASSERT(length >= 0);
4642 if (length == 0) return empty_fixed_array();
4644 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4645 if (!maybe_result->ToObject(&result)) return maybe_result;
4647 // Initialize header.
4648 FixedArray* array = reinterpret_cast<FixedArray*>(result);
4649 array->set_map_no_write_barrier(fixed_array_map());
4650 array->set_length(length);
4652 ASSERT(!InNewSpace(undefined_value()));
4653 MemsetPointer(array->data_start(), undefined_value(), length);
4658 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4659 if (length < 0 || length > FixedArray::kMaxLength) {
4660 return Failure::OutOfMemoryException();
4663 AllocationSpace space =
4664 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4665 int size = FixedArray::SizeFor(length);
4666 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4667 // Too big for new space.
4669 } else if (space == OLD_POINTER_SPACE &&
4670 size > Page::kMaxNonCodeHeapObjectSize) {
4671 // Too big for old pointer space.
4675 AllocationSpace retry_space =
4676 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4678 return AllocateRaw(size, space, retry_space);
4682 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4685 PretenureFlag pretenure,
4687 ASSERT(length >= 0);
4688 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4689 if (length == 0) return heap->empty_fixed_array();
4691 ASSERT(!heap->InNewSpace(filler));
4693 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4694 if (!maybe_result->ToObject(&result)) return maybe_result;
4697 HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4698 FixedArray* array = FixedArray::cast(result);
4699 array->set_length(length);
4700 MemsetPointer(array->data_start(), filler, length);
4705 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4706 return AllocateFixedArrayWithFiller(this,
4713 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4714 PretenureFlag pretenure) {
4715 return AllocateFixedArrayWithFiller(this,
4722 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4723 if (length == 0) return empty_fixed_array();
4726 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4727 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4730 reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4732 FixedArray::cast(obj)->set_length(length);
4737 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4738 int size = FixedDoubleArray::SizeFor(0);
4740 { MaybeObject* maybe_result =
4741 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4742 if (!maybe_result->ToObject(&result)) return maybe_result;
4744 // Initialize the object.
4745 reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4746 fixed_double_array_map());
4747 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4752 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4754 PretenureFlag pretenure) {
4755 if (length == 0) return empty_fixed_array();
4757 Object* elements_object;
4758 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4759 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4760 FixedDoubleArray* elements =
4761 reinterpret_cast<FixedDoubleArray*>(elements_object);
4763 elements->set_map_no_write_barrier(fixed_double_array_map());
4764 elements->set_length(length);
4769 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
4771 PretenureFlag pretenure) {
4772 if (length == 0) return empty_fixed_array();
4774 Object* elements_object;
4775 MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4776 if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4777 FixedDoubleArray* elements =
4778 reinterpret_cast<FixedDoubleArray*>(elements_object);
4780 for (int i = 0; i < length; ++i) {
4781 elements->set_the_hole(i);
4784 elements->set_map_no_write_barrier(fixed_double_array_map());
4785 elements->set_length(length);
4790 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4791 PretenureFlag pretenure) {
4792 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4793 return Failure::OutOfMemoryException();
4796 AllocationSpace space =
4797 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4798 int size = FixedDoubleArray::SizeFor(length);
4800 #ifndef V8_HOST_ARCH_64_BIT
4801 size += kPointerSize;
4804 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4805 // Too big for new space.
4807 } else if (space == OLD_DATA_SPACE &&
4808 size > Page::kMaxNonCodeHeapObjectSize) {
4809 // Too big for old data space.
4813 AllocationSpace retry_space =
4814 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
4817 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
4818 if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
4821 return EnsureDoubleAligned(this, object, size);
4825 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4827 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4828 if (!maybe_result->ToObject(&result)) return maybe_result;
4830 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
4832 ASSERT(result->IsHashTable());
4837 MaybeObject* Heap::AllocateGlobalContext() {
4839 { MaybeObject* maybe_result =
4840 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
4841 if (!maybe_result->ToObject(&result)) return maybe_result;
4843 Context* context = reinterpret_cast<Context*>(result);
4844 context->set_map_no_write_barrier(global_context_map());
4845 context->set_js_array_maps(undefined_value());
4846 ASSERT(context->IsGlobalContext());
4847 ASSERT(result->IsContext());
4852 MaybeObject* Heap::AllocateModuleContext(Context* previous,
4853 ScopeInfo* scope_info) {
4855 { MaybeObject* maybe_result =
4856 AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
4857 if (!maybe_result->ToObject(&result)) return maybe_result;
4859 Context* context = reinterpret_cast<Context*>(result);
4860 context->set_map_no_write_barrier(module_context_map());
4861 context->set_previous(previous);
4862 context->set_extension(scope_info);
4863 context->set_global(previous->global());
4868 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
4869 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
4871 { MaybeObject* maybe_result = AllocateFixedArray(length);
4872 if (!maybe_result->ToObject(&result)) return maybe_result;
4874 Context* context = reinterpret_cast<Context*>(result);
4875 context->set_map_no_write_barrier(function_context_map());
4876 context->set_closure(function);
4877 context->set_previous(function->context());
4878 context->set_extension(NULL);
4879 context->set_global(function->context()->global());
4884 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4887 Object* thrown_object) {
4888 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4890 { MaybeObject* maybe_result =
4891 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4892 if (!maybe_result->ToObject(&result)) return maybe_result;
4894 Context* context = reinterpret_cast<Context*>(result);
4895 context->set_map_no_write_barrier(catch_context_map());
4896 context->set_closure(function);
4897 context->set_previous(previous);
4898 context->set_extension(name);
4899 context->set_global(previous->global());
4900 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4905 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4907 JSObject* extension) {
4909 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
4910 if (!maybe_result->ToObject(&result)) return maybe_result;
4912 Context* context = reinterpret_cast<Context*>(result);
4913 context->set_map_no_write_barrier(with_context_map());
4914 context->set_closure(function);
4915 context->set_previous(previous);
4916 context->set_extension(extension);
4917 context->set_global(previous->global());
4922 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4924 ScopeInfo* scope_info) {
4926 { MaybeObject* maybe_result =
4927 AllocateFixedArrayWithHoles(scope_info->ContextLength());
4928 if (!maybe_result->ToObject(&result)) return maybe_result;
4930 Context* context = reinterpret_cast<Context*>(result);
4931 context->set_map_no_write_barrier(block_context_map());
4932 context->set_closure(function);
4933 context->set_previous(previous);
4934 context->set_extension(scope_info);
4935 context->set_global(previous->global());
4940 MaybeObject* Heap::AllocateScopeInfo(int length) {
4941 FixedArray* scope_info;
4942 MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
4943 if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
4944 scope_info->set_map_no_write_barrier(scope_info_map());
4949 MaybeObject* Heap::AllocateStruct(InstanceType type) {
4952 #define MAKE_CASE(NAME, Name, name) \
4953 case NAME##_TYPE: map = name##_map(); break;
4954 STRUCT_LIST(MAKE_CASE)
4958 return Failure::InternalError();
4960 int size = map->instance_size();
4961 AllocationSpace space =
4962 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
4964 { MaybeObject* maybe_result = Allocate(map, space);
4965 if (!maybe_result->ToObject(&result)) return maybe_result;
4967 Struct::cast(result)->InitializeBody(size);
4972 bool Heap::IsHeapIterable() {
4973 return (!old_pointer_space()->was_swept_conservatively() &&
4974 !old_data_space()->was_swept_conservatively());
4978 void Heap::EnsureHeapIsIterable() {
4979 ASSERT(IsAllocationAllowed());
4980 if (!IsHeapIterable()) {
4981 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
4983 ASSERT(IsHeapIterable());
4987 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4988 incremental_marking()->Step(step_size,
4989 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
4991 if (incremental_marking()->IsComplete()) {
4992 bool uncommit = false;
4993 if (gc_count_at_last_idle_gc_ == gc_count_) {
4994 // No GC since the last full GC, the mutator is probably not active.
4995 isolate_->compilation_cache()->Clear();
4998 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4999 gc_count_at_last_idle_gc_ = gc_count_;
5001 new_space_.Shrink();
5002 UncommitFromSpace();
5008 bool Heap::IdleNotification(int hint) {
5009 const int kMaxHint = 1000;
5010 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5011 // The size factor is in range [5..250]. The numbers here are chosen from
5012 // experiments. If you changes them, make sure to test with
5013 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5014 intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
5016 if (contexts_disposed_ > 0) {
5017 if (hint >= kMaxHint) {
5018 // The embedder is requesting a lot of GC work after context disposal,
5019 // we age inline caches so that they don't keep objects from
5020 // the old context alive.
5023 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5024 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5025 incremental_marking()->IsStopped()) {
5026 HistogramTimerScope scope(isolate_->counters()->gc_context());
5027 CollectAllGarbage(kReduceMemoryFootprintMask,
5028 "idle notification: contexts disposed");
5030 AdvanceIdleIncrementalMarking(step_size);
5031 contexts_disposed_ = 0;
5033 // Make sure that we have no pending context disposals.
5034 // Take into account that we might have decided to delay full collection
5035 // because incremental marking is in progress.
5036 ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
5037 // After context disposal there is likely a lot of garbage remaining, reset
5038 // the idle notification counters in order to trigger more incremental GCs
5039 // on subsequent idle notifications.
5044 if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5045 return IdleGlobalGC();
5048 // By doing small chunks of GC work in each IdleNotification,
5049 // perform a round of incremental GCs and after that wait until
5050 // the mutator creates enough garbage to justify a new round.
5051 // An incremental GC progresses as follows:
5052 // 1. many incremental marking steps,
5053 // 2. one old space mark-sweep-compact,
5054 // 3. many lazy sweep steps.
5055 // Use mark-sweep-compact events to count incremental GCs in a round.
5058 if (incremental_marking()->IsStopped()) {
5059 if (!IsSweepingComplete() &&
5060 !AdvanceSweepers(static_cast<int>(step_size))) {
5065 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5066 if (EnoughGarbageSinceLastIdleRound()) {
5073 int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5074 mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5075 ms_count_at_last_idle_notification_ = ms_count_;
5077 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5082 if (incremental_marking()->IsStopped()) {
5083 incremental_marking()->Start();
5086 AdvanceIdleIncrementalMarking(step_size);
5091 bool Heap::IdleGlobalGC() {
5092 static const int kIdlesBeforeScavenge = 4;
5093 static const int kIdlesBeforeMarkSweep = 7;
5094 static const int kIdlesBeforeMarkCompact = 8;
5095 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5096 static const unsigned int kGCsBetweenCleanup = 4;
5098 if (!last_idle_notification_gc_count_init_) {
5099 last_idle_notification_gc_count_ = gc_count_;
5100 last_idle_notification_gc_count_init_ = true;
5103 bool uncommit = true;
5104 bool finished = false;
5106 // Reset the number of idle notifications received when a number of
5107 // GCs have taken place. This allows another round of cleanup based
5108 // on idle notifications if enough work has been carried out to
5109 // provoke a number of garbage collections.
5110 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5111 number_idle_notifications_ =
5112 Min(number_idle_notifications_ + 1, kMaxIdleCount);
5114 number_idle_notifications_ = 0;
5115 last_idle_notification_gc_count_ = gc_count_;
5118 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5119 CollectGarbage(NEW_SPACE, "idle notification");
5120 new_space_.Shrink();
5121 last_idle_notification_gc_count_ = gc_count_;
5122 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5123 // Before doing the mark-sweep collections we clear the
5124 // compilation cache to avoid hanging on to source code and
5125 // generated code for cached functions.
5126 isolate_->compilation_cache()->Clear();
5128 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5129 new_space_.Shrink();
5130 last_idle_notification_gc_count_ = gc_count_;
5132 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5133 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5134 new_space_.Shrink();
5135 last_idle_notification_gc_count_ = gc_count_;
5136 number_idle_notifications_ = 0;
5138 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5139 // If we have received more than kIdlesBeforeMarkCompact idle
5140 // notifications we do not perform any cleanup because we don't
5141 // expect to gain much by doing so.
5145 if (uncommit) UncommitFromSpace();
5153 void Heap::Print() {
5154 if (!HasBeenSetUp()) return;
5155 isolate()->PrintStack();
5157 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5162 void Heap::ReportCodeStatistics(const char* title) {
5163 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5164 PagedSpace::ResetCodeStatistics();
5165 // We do not look for code in new space, map space, or old space. If code
5166 // somehow ends up in those spaces, we would miss it here.
5167 code_space_->CollectCodeStatistics();
5168 lo_space_->CollectCodeStatistics();
5169 PagedSpace::ReportCodeStatistics();
5173 // This function expects that NewSpace's allocated objects histogram is
5174 // populated (via a call to CollectStatistics or else as a side effect of a
5175 // just-completed scavenge collection).
5176 void Heap::ReportHeapStatistics(const char* title) {
5178 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5180 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5181 old_gen_promotion_limit_);
5182 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5183 old_gen_allocation_limit_);
5184 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5187 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
5188 isolate_->global_handles()->PrintStats();
5191 PrintF("Heap statistics : ");
5192 isolate_->memory_allocator()->ReportStatistics();
5193 PrintF("To space : ");
5194 new_space_.ReportStatistics();
5195 PrintF("Old pointer space : ");
5196 old_pointer_space_->ReportStatistics();
5197 PrintF("Old data space : ");
5198 old_data_space_->ReportStatistics();
5199 PrintF("Code space : ");
5200 code_space_->ReportStatistics();
5201 PrintF("Map space : ");
5202 map_space_->ReportStatistics();
5203 PrintF("Cell space : ");
5204 cell_space_->ReportStatistics();
5205 PrintF("Large object space : ");
5206 lo_space_->ReportStatistics();
5207 PrintF(">>>>>> ========================================= >>>>>>\n");
5212 bool Heap::Contains(HeapObject* value) {
5213 return Contains(value->address());
5217 bool Heap::Contains(Address addr) {
5218 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5219 return HasBeenSetUp() &&
5220 (new_space_.ToSpaceContains(addr) ||
5221 old_pointer_space_->Contains(addr) ||
5222 old_data_space_->Contains(addr) ||
5223 code_space_->Contains(addr) ||
5224 map_space_->Contains(addr) ||
5225 cell_space_->Contains(addr) ||
5226 lo_space_->SlowContains(addr));
5230 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5231 return InSpace(value->address(), space);
5235 bool Heap::InSpace(Address addr, AllocationSpace space) {
5236 if (OS::IsOutsideAllocatedSpace(addr)) return false;
5237 if (!HasBeenSetUp()) return false;
5241 return new_space_.ToSpaceContains(addr);
5242 case OLD_POINTER_SPACE:
5243 return old_pointer_space_->Contains(addr);
5244 case OLD_DATA_SPACE:
5245 return old_data_space_->Contains(addr);
5247 return code_space_->Contains(addr);
5249 return map_space_->Contains(addr);
5251 return cell_space_->Contains(addr);
5253 return lo_space_->SlowContains(addr);
5261 void Heap::Verify() {
5262 ASSERT(HasBeenSetUp());
5264 store_buffer()->Verify();
5266 VerifyPointersVisitor visitor;
5267 IterateRoots(&visitor, VISIT_ONLY_STRONG);
5269 new_space_.Verify();
5271 old_pointer_space_->Verify(&visitor);
5272 map_space_->Verify(&visitor);
5274 VerifyPointersVisitor no_dirty_regions_visitor;
5275 old_data_space_->Verify(&no_dirty_regions_visitor);
5276 code_space_->Verify(&no_dirty_regions_visitor);
5277 cell_space_->Verify(&no_dirty_regions_visitor);
5279 lo_space_->Verify();
5281 VerifyNoAccessorPairSharing();
5285 void Heap::VerifyNoAccessorPairSharing() {
5286 // Verification is done in 2 phases: First we mark all AccessorPairs, checking
5287 // that we mark only unmarked pairs, then we clear all marks, restoring the
5288 // initial state. We use the Smi tag of the AccessorPair's getter as the
5289 // marking bit, because we can never see a Smi as the getter.
5290 for (int phase = 0; phase < 2; phase++) {
5291 HeapObjectIterator iter(map_space());
5292 for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
5294 DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
5295 for (int i = 0; i < descs->number_of_descriptors(); i++) {
5296 if (descs->GetType(i) == CALLBACKS &&
5297 descs->GetValue(i)->IsAccessorPair()) {
5298 AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
5299 uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
5300 uintptr_t after = (phase == 0) ?
5301 ((before & ~kSmiTagMask) | kSmiTag) :
5302 ((before & ~kHeapObjectTag) | kHeapObjectTag);
5303 CHECK(before != after);
5304 accessors->set_getter(reinterpret_cast<Object*>(after));
5314 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
5315 Object* symbol = NULL;
5317 { MaybeObject* maybe_new_table =
5318 symbol_table()->LookupSymbol(string, &symbol);
5319 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5321 // Can't use set_symbol_table because SymbolTable::cast knows that
5322 // SymbolTable is a singleton and checks for identity.
5323 roots_[kSymbolTableRootIndex] = new_table;
5324 ASSERT(symbol != NULL);
5329 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
5330 Object* symbol = NULL;
5332 { MaybeObject* maybe_new_table =
5333 symbol_table()->LookupAsciiSymbol(string, &symbol);
5334 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5336 // Can't use set_symbol_table because SymbolTable::cast knows that
5337 // SymbolTable is a singleton and checks for identity.
5338 roots_[kSymbolTableRootIndex] = new_table;
5339 ASSERT(symbol != NULL);
5344 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
5347 Object* symbol = NULL;
5349 { MaybeObject* maybe_new_table =
5350 symbol_table()->LookupSubStringAsciiSymbol(string,
5354 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5356 // Can't use set_symbol_table because SymbolTable::cast knows that
5357 // SymbolTable is a singleton and checks for identity.
5358 roots_[kSymbolTableRootIndex] = new_table;
5359 ASSERT(symbol != NULL);
5364 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
5365 Object* symbol = NULL;
5367 { MaybeObject* maybe_new_table =
5368 symbol_table()->LookupTwoByteSymbol(string, &symbol);
5369 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5371 // Can't use set_symbol_table because SymbolTable::cast knows that
5372 // SymbolTable is a singleton and checks for identity.
5373 roots_[kSymbolTableRootIndex] = new_table;
5374 ASSERT(symbol != NULL);
5379 MaybeObject* Heap::LookupSymbol(String* string) {
5380 if (string->IsSymbol()) return string;
5381 Object* symbol = NULL;
5383 { MaybeObject* maybe_new_table =
5384 symbol_table()->LookupString(string, &symbol);
5385 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5387 // Can't use set_symbol_table because SymbolTable::cast knows that
5388 // SymbolTable is a singleton and checks for identity.
5389 roots_[kSymbolTableRootIndex] = new_table;
5390 ASSERT(symbol != NULL);
5395 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
5396 if (string->IsSymbol()) {
5400 return symbol_table()->LookupSymbolIfExists(string, symbol);
5405 void Heap::ZapFromSpace() {
5406 NewSpacePageIterator it(new_space_.FromSpaceStart(),
5407 new_space_.FromSpaceEnd());
5408 while (it.has_next()) {
5409 NewSpacePage* page = it.next();
5410 for (Address cursor = page->area_start(), limit = page->area_end();
5412 cursor += kPointerSize) {
5413 Memory::Address_at(cursor) = kFromSpaceZapValue;
5420 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5422 ObjectSlotCallback callback) {
5423 Address slot_address = start;
5425 // We are not collecting slots on new space objects during mutation
5426 // thus we have to scan for pointers to evacuation candidates when we
5427 // promote objects. But we should not record any slots in non-black
5428 // objects. Grey object's slots would be rescanned.
5429 // White object might not survive until the end of collection
5430 // it would be a violation of the invariant to record it's slots.
5431 bool record_slots = false;
5432 if (incremental_marking()->IsCompacting()) {
5433 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5434 record_slots = Marking::IsBlack(mark_bit);
5437 while (slot_address < end) {
5438 Object** slot = reinterpret_cast<Object**>(slot_address);
5439 Object* object = *slot;
5440 // If the store buffer becomes overfull we mark pages as being exempt from
5441 // the store buffer. These pages are scanned to find pointers that point
5442 // to the new space. In that case we may hit newly promoted objects and
5443 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5444 if (object->IsHeapObject()) {
5445 if (Heap::InFromSpace(object)) {
5446 callback(reinterpret_cast<HeapObject**>(slot),
5447 HeapObject::cast(object));
5448 Object* new_object = *slot;
5449 if (InNewSpace(new_object)) {
5450 SLOW_ASSERT(Heap::InToSpace(new_object));
5451 SLOW_ASSERT(new_object->IsHeapObject());
5452 store_buffer_.EnterDirectlyIntoStoreBuffer(
5453 reinterpret_cast<Address>(slot));
5455 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5456 } else if (record_slots &&
5457 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5458 mark_compact_collector()->RecordSlot(slot, slot, object);
5461 slot_address += kPointerSize;
5467 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5470 bool IsAMapPointerAddress(Object** addr) {
5471 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5472 int mod = a % Map::kSize;
5473 return mod >= Map::kPointerFieldsBeginOffset &&
5474 mod < Map::kPointerFieldsEndOffset;
5478 bool EverythingsAPointer(Object** addr) {
5483 static void CheckStoreBuffer(Heap* heap,
5486 Object**** store_buffer_position,
5487 Object*** store_buffer_top,
5488 CheckStoreBufferFilter filter,
5489 Address special_garbage_start,
5490 Address special_garbage_end) {
5491 Map* free_space_map = heap->free_space_map();
5492 for ( ; current < limit; current++) {
5493 Object* o = *current;
5494 Address current_address = reinterpret_cast<Address>(current);
5496 if (o == free_space_map) {
5497 Address current_address = reinterpret_cast<Address>(current);
5498 FreeSpace* free_space =
5499 FreeSpace::cast(HeapObject::FromAddress(current_address));
5500 int skip = free_space->Size();
5501 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5503 current_address += skip - kPointerSize;
5504 current = reinterpret_cast<Object**>(current_address);
5507 // Skip the current linear allocation space between top and limit which is
5508 // unmarked with the free space map, but can contain junk.
5509 if (current_address == special_garbage_start &&
5510 special_garbage_end != special_garbage_start) {
5511 current_address = special_garbage_end - kPointerSize;
5512 current = reinterpret_cast<Object**>(current_address);
5515 if (!(*filter)(current)) continue;
5516 ASSERT(current_address < special_garbage_start ||
5517 current_address >= special_garbage_end);
5518 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5519 // We have to check that the pointer does not point into new space
5520 // without trying to cast it to a heap object since the hash field of
5521 // a string can contain values like 1 and 3 which are tagged null
5523 if (!heap->InNewSpace(o)) continue;
5524 while (**store_buffer_position < current &&
5525 *store_buffer_position < store_buffer_top) {
5526 (*store_buffer_position)++;
5528 if (**store_buffer_position != current ||
5529 *store_buffer_position == store_buffer_top) {
5530 Object** obj_start = current;
5531 while (!(*obj_start)->IsMap()) obj_start--;
5538 // Check that the store buffer contains all intergenerational pointers by
5539 // scanning a page and ensuring that all pointers to young space are in the
5541 void Heap::OldPointerSpaceCheckStoreBuffer() {
5542 OldSpace* space = old_pointer_space();
5543 PageIterator pages(space);
5545 store_buffer()->SortUniq();
5547 while (pages.has_next()) {
5548 Page* page = pages.next();
5549 Object** current = reinterpret_cast<Object**>(page->area_start());
5551 Address end = page->area_end();
5553 Object*** store_buffer_position = store_buffer()->Start();
5554 Object*** store_buffer_top = store_buffer()->Top();
5556 Object** limit = reinterpret_cast<Object**>(end);
5557 CheckStoreBuffer(this,
5560 &store_buffer_position,
5562 &EverythingsAPointer,
5569 void Heap::MapSpaceCheckStoreBuffer() {
5570 MapSpace* space = map_space();
5571 PageIterator pages(space);
5573 store_buffer()->SortUniq();
5575 while (pages.has_next()) {
5576 Page* page = pages.next();
5577 Object** current = reinterpret_cast<Object**>(page->area_start());
5579 Address end = page->area_end();
5581 Object*** store_buffer_position = store_buffer()->Start();
5582 Object*** store_buffer_top = store_buffer()->Top();
5584 Object** limit = reinterpret_cast<Object**>(end);
5585 CheckStoreBuffer(this,
5588 &store_buffer_position,
5590 &IsAMapPointerAddress,
5597 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5598 LargeObjectIterator it(lo_space());
5599 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5600 // We only have code, sequential strings, or fixed arrays in large
5601 // object space, and only fixed arrays can possibly contain pointers to
5602 // the young generation.
5603 if (object->IsFixedArray()) {
5604 Object*** store_buffer_position = store_buffer()->Start();
5605 Object*** store_buffer_top = store_buffer()->Top();
5606 Object** current = reinterpret_cast<Object**>(object->address());
5608 reinterpret_cast<Object**>(object->address() + object->Size());
5609 CheckStoreBuffer(this,
5612 &store_buffer_position,
5614 &EverythingsAPointer,
5623 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5624 IterateStrongRoots(v, mode);
5625 IterateWeakRoots(v, mode);
5629 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5630 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5631 v->Synchronize(VisitorSynchronization::kSymbolTable);
5632 if (mode != VISIT_ALL_IN_SCAVENGE &&
5633 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5634 // Scavenge collections have special processing for this.
5635 external_string_table_.Iterate(v);
5637 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5641 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5642 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5643 v->Synchronize(VisitorSynchronization::kStrongRootList);
5645 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5646 v->Synchronize(VisitorSynchronization::kSymbol);
5648 isolate_->bootstrapper()->Iterate(v);
5649 v->Synchronize(VisitorSynchronization::kBootstrapper);
5650 isolate_->Iterate(v);
5651 v->Synchronize(VisitorSynchronization::kTop);
5652 Relocatable::Iterate(v);
5653 v->Synchronize(VisitorSynchronization::kRelocatable);
5655 #ifdef ENABLE_DEBUGGER_SUPPORT
5656 isolate_->debug()->Iterate(v);
5657 if (isolate_->deoptimizer_data() != NULL) {
5658 isolate_->deoptimizer_data()->Iterate(v);
5661 v->Synchronize(VisitorSynchronization::kDebug);
5662 isolate_->compilation_cache()->Iterate(v);
5663 v->Synchronize(VisitorSynchronization::kCompilationCache);
5665 // Iterate over local handles in handle scopes.
5666 isolate_->handle_scope_implementer()->Iterate(v);
5667 v->Synchronize(VisitorSynchronization::kHandleScope);
5669 // Iterate over the builtin code objects and code stubs in the
5670 // heap. Note that it is not necessary to iterate over code objects
5671 // on scavenge collections.
5672 if (mode != VISIT_ALL_IN_SCAVENGE) {
5673 isolate_->builtins()->IterateBuiltins(v);
5675 v->Synchronize(VisitorSynchronization::kBuiltins);
5677 // Iterate over global handles.
5679 case VISIT_ONLY_STRONG:
5680 isolate_->global_handles()->IterateStrongRoots(v);
5682 case VISIT_ALL_IN_SCAVENGE:
5683 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5685 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5687 isolate_->global_handles()->IterateAllRoots(v);
5690 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5692 // Iterate over pointers being held by inactive threads.
5693 isolate_->thread_manager()->Iterate(v);
5694 v->Synchronize(VisitorSynchronization::kThreadManager);
5696 // Iterate over the pointers the Serialization/Deserialization code is
5698 // During garbage collection this keeps the partial snapshot cache alive.
5699 // During deserialization of the startup snapshot this creates the partial
5700 // snapshot cache and deserializes the objects it refers to. During
5701 // serialization this does nothing, since the partial snapshot cache is
5702 // empty. However the next thing we do is create the partial snapshot,
5703 // filling up the partial snapshot cache with objects it needs as we go.
5704 SerializerDeserializer::Iterate(v);
5705 // We don't do a v->Synchronize call here, because in debug mode that will
5706 // output a flag to the snapshot. However at this point the serializer and
5707 // deserializer are deliberately a little unsynchronized (see above) so the
5708 // checking of the sync flag in the snapshot would fail.
5712 // TODO(1236194): Since the heap size is configurable on the command line
5713 // and through the API, we should gracefully handle the case that the heap
5714 // size is not big enough to fit all the initial objects.
5715 bool Heap::ConfigureHeap(int max_semispace_size,
5716 intptr_t max_old_gen_size,
5717 intptr_t max_executable_size) {
5718 if (HasBeenSetUp()) return false;
5720 if (FLAG_stress_compaction) {
5721 // This will cause more frequent GCs when stressing.
5722 max_semispace_size_ = Page::kPageSize;
5725 if (max_semispace_size > 0) {
5726 if (max_semispace_size < Page::kPageSize) {
5727 max_semispace_size = Page::kPageSize;
5728 if (FLAG_trace_gc) {
5729 PrintF("Max semispace size cannot be less than %dkbytes\n",
5730 Page::kPageSize >> 10);
5733 max_semispace_size_ = max_semispace_size;
5736 if (Snapshot::IsEnabled()) {
5737 // If we are using a snapshot we always reserve the default amount
5738 // of memory for each semispace because code in the snapshot has
5739 // write-barrier code that relies on the size and alignment of new
5740 // space. We therefore cannot use a larger max semispace size
5741 // than the default reserved semispace size.
5742 if (max_semispace_size_ > reserved_semispace_size_) {
5743 max_semispace_size_ = reserved_semispace_size_;
5744 if (FLAG_trace_gc) {
5745 PrintF("Max semispace size cannot be more than %dkbytes\n",
5746 reserved_semispace_size_ >> 10);
5750 // If we are not using snapshots we reserve space for the actual
5751 // max semispace size.
5752 reserved_semispace_size_ = max_semispace_size_;
5755 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5756 if (max_executable_size > 0) {
5757 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5760 // The max executable size must be less than or equal to the max old
5762 if (max_executable_size_ > max_old_generation_size_) {
5763 max_executable_size_ = max_old_generation_size_;
5766 // The new space size must be a power of two to support single-bit testing
5768 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5769 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5770 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5771 external_allocation_limit_ = 10 * max_semispace_size_;
5773 // The old generation is paged and needs at least one page for each space.
5774 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5775 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5777 RoundUp(max_old_generation_size_,
5785 bool Heap::ConfigureHeapDefault() {
5786 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5787 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5788 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5792 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5793 *stats->start_marker = HeapStats::kStartMarker;
5794 *stats->end_marker = HeapStats::kEndMarker;
5795 *stats->new_space_size = new_space_.SizeAsInt();
5796 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5797 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5798 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5799 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5800 *stats->old_data_space_capacity = old_data_space_->Capacity();
5801 *stats->code_space_size = code_space_->SizeOfObjects();
5802 *stats->code_space_capacity = code_space_->Capacity();
5803 *stats->map_space_size = map_space_->SizeOfObjects();
5804 *stats->map_space_capacity = map_space_->Capacity();
5805 *stats->cell_space_size = cell_space_->SizeOfObjects();
5806 *stats->cell_space_capacity = cell_space_->Capacity();
5807 *stats->lo_space_size = lo_space_->Size();
5808 isolate_->global_handles()->RecordStats(stats);
5809 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5810 *stats->memory_allocator_capacity =
5811 isolate()->memory_allocator()->Size() +
5812 isolate()->memory_allocator()->Available();
5813 *stats->os_error = OS::GetLastError();
5814 isolate()->memory_allocator()->Available();
5815 if (take_snapshot) {
5816 HeapIterator iterator;
5817 for (HeapObject* obj = iterator.next();
5819 obj = iterator.next()) {
5820 InstanceType type = obj->map()->instance_type();
5821 ASSERT(0 <= type && type <= LAST_TYPE);
5822 stats->objects_per_type[type]++;
5823 stats->size_per_type[type] += obj->Size();
5829 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5830 return old_pointer_space_->SizeOfObjects()
5831 + old_data_space_->SizeOfObjects()
5832 + code_space_->SizeOfObjects()
5833 + map_space_->SizeOfObjects()
5834 + cell_space_->SizeOfObjects()
5835 + lo_space_->SizeOfObjects();
5839 intptr_t Heap::PromotedExternalMemorySize() {
5840 if (amount_of_external_allocated_memory_
5841 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5842 return amount_of_external_allocated_memory_
5843 - amount_of_external_allocated_memory_at_last_global_gc_;
5848 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5849 static const int kMarkTag = 2;
5852 class HeapDebugUtils {
5854 explicit HeapDebugUtils(Heap* heap)
5855 : search_for_any_global_(false),
5856 search_target_(NULL),
5857 found_target_(false),
5862 class MarkObjectVisitor : public ObjectVisitor {
5864 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5866 void VisitPointers(Object** start, Object** end) {
5867 // Copy all HeapObject pointers in [start, end)
5868 for (Object** p = start; p < end; p++) {
5869 if ((*p)->IsHeapObject())
5870 utils_->MarkObjectRecursively(p);
5874 HeapDebugUtils* utils_;
5877 void MarkObjectRecursively(Object** p) {
5878 if (!(*p)->IsHeapObject()) return;
5880 HeapObject* obj = HeapObject::cast(*p);
5882 Object* map = obj->map();
5884 if (!map->IsHeapObject()) return; // visited before
5886 if (found_target_) return; // stop if target found
5887 object_stack_.Add(obj);
5888 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5889 (!search_for_any_global_ && (obj == search_target_))) {
5890 found_target_ = true;
5895 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5897 Address map_addr = map_p->address();
5899 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
5901 MarkObjectRecursively(&map);
5903 MarkObjectVisitor mark_visitor(this);
5905 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5908 if (!found_target_) // don't pop if found the target
5909 object_stack_.RemoveLast();
5913 class UnmarkObjectVisitor : public ObjectVisitor {
5915 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5917 void VisitPointers(Object** start, Object** end) {
5918 // Copy all HeapObject pointers in [start, end)
5919 for (Object** p = start; p < end; p++) {
5920 if ((*p)->IsHeapObject())
5921 utils_->UnmarkObjectRecursively(p);
5925 HeapDebugUtils* utils_;
5929 void UnmarkObjectRecursively(Object** p) {
5930 if (!(*p)->IsHeapObject()) return;
5932 HeapObject* obj = HeapObject::cast(*p);
5934 Object* map = obj->map();
5936 if (map->IsHeapObject()) return; // unmarked already
5938 Address map_addr = reinterpret_cast<Address>(map);
5940 map_addr -= kMarkTag;
5942 ASSERT_TAG_ALIGNED(map_addr);
5944 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5946 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
5948 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5950 UnmarkObjectVisitor unmark_visitor(this);
5952 obj->IterateBody(Map::cast(map_p)->instance_type(),
5953 obj->SizeFromMap(Map::cast(map_p)),
5958 void MarkRootObjectRecursively(Object** root) {
5959 if (search_for_any_global_) {
5960 ASSERT(search_target_ == NULL);
5962 ASSERT(search_target_->IsHeapObject());
5964 found_target_ = false;
5965 object_stack_.Clear();
5967 MarkObjectRecursively(root);
5968 UnmarkObjectRecursively(root);
5970 if (found_target_) {
5971 PrintF("=====================================\n");
5972 PrintF("==== Path to object ====\n");
5973 PrintF("=====================================\n\n");
5975 ASSERT(!object_stack_.is_empty());
5976 for (int i = 0; i < object_stack_.length(); i++) {
5977 if (i > 0) PrintF("\n |\n |\n V\n\n");
5978 Object* obj = object_stack_[i];
5981 PrintF("=====================================\n");
5985 // Helper class for visiting HeapObjects recursively.
5986 class MarkRootVisitor: public ObjectVisitor {
5988 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5990 void VisitPointers(Object** start, Object** end) {
5991 // Visit all HeapObject pointers in [start, end)
5992 for (Object** p = start; p < end; p++) {
5993 if ((*p)->IsHeapObject())
5994 utils_->MarkRootObjectRecursively(p);
5998 HeapDebugUtils* utils_;
6001 bool search_for_any_global_;
6002 Object* search_target_;
6004 List<Object*> object_stack_;
6013 V8_DECLARE_ONCE(initialize_gc_once);
6015 static void InitializeGCOnce() {
6016 InitializeScavengingVisitorsTables();
6017 NewSpaceScavenger::Initialize();
6018 MarkCompactCollector::Initialize();
6021 bool Heap::SetUp(bool create_heap_objects) {
6023 allocation_timeout_ = FLAG_gc_interval;
6024 debug_utils_ = new HeapDebugUtils(this);
6027 // Initialize heap spaces and initial maps and objects. Whenever something
6028 // goes wrong, just return false. The caller should check the results and
6029 // call Heap::TearDown() to release allocated memory.
6031 // If the heap is not yet configured (e.g. through the API), configure it.
6032 // Configuration is based on the flags new-space-size (really the semispace
6033 // size) and old-space-size if set or the initial values of semispace_size_
6034 // and old_generation_size_ otherwise.
6036 if (!ConfigureHeapDefault()) return false;
6039 CallOnce(&initialize_gc_once, &InitializeGCOnce);
6041 MarkMapPointersAsEncoded(false);
6043 // Set up memory allocator.
6044 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6047 // Set up new space.
6048 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6052 // Initialize old pointer space.
6053 old_pointer_space_ =
6055 max_old_generation_size_,
6058 if (old_pointer_space_ == NULL) return false;
6059 if (!old_pointer_space_->SetUp()) return false;
6061 // Initialize old data space.
6064 max_old_generation_size_,
6067 if (old_data_space_ == NULL) return false;
6068 if (!old_data_space_->SetUp()) return false;
6070 // Initialize the code space, set its maximum capacity to the old
6071 // generation size. It needs executable memory.
6072 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6073 // virtual address space, so that they can call each other with near calls.
6074 if (code_range_size_ > 0) {
6075 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6081 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6082 if (code_space_ == NULL) return false;
6083 if (!code_space_->SetUp()) return false;
6085 // Initialize map space.
6086 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6087 if (map_space_ == NULL) return false;
6088 if (!map_space_->SetUp()) return false;
6090 // Initialize global property cell space.
6091 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6092 if (cell_space_ == NULL) return false;
6093 if (!cell_space_->SetUp()) return false;
6095 // The large object code space may contain code or data. We set the memory
6096 // to be non-executable here for safety, but this means we need to enable it
6097 // explicitly when allocating large code objects.
6098 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6099 if (lo_space_ == NULL) return false;
6100 if (!lo_space_->SetUp()) return false;
6102 // Set up the seed that is used to randomize the string hash function.
6103 ASSERT(hash_seed() == 0);
6104 if (FLAG_randomize_hashes) {
6105 if (FLAG_hash_seed == 0) {
6107 Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6109 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6113 if (create_heap_objects) {
6114 // Create initial maps.
6115 if (!CreateInitialMaps()) return false;
6116 if (!CreateApiObjects()) return false;
6118 // Create initial objects
6119 if (!CreateInitialObjects()) return false;
6121 global_contexts_list_ = undefined_value();
6124 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6125 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6127 store_buffer()->SetUp();
6133 void Heap::SetStackLimits() {
6134 ASSERT(isolate_ != NULL);
6135 ASSERT(isolate_ == isolate());
6136 // On 64 bit machines, pointers are generally out of range of Smis. We write
6137 // something that looks like an out of range Smi to the GC.
6139 // Set up the special root array entries containing the stack limits.
6140 // These are actually addresses, but the tag makes the GC ignore it.
6141 roots_[kStackLimitRootIndex] =
6142 reinterpret_cast<Object*>(
6143 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6144 roots_[kRealStackLimitRootIndex] =
6145 reinterpret_cast<Object*>(
6146 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6150 void Heap::TearDown() {
6152 if (FLAG_verify_heap) {
6156 if (FLAG_print_cumulative_gc_stat) {
6158 PrintF("gc_count=%d ", gc_count_);
6159 PrintF("mark_sweep_count=%d ", ms_count_);
6160 PrintF("max_gc_pause=%d ", get_max_gc_pause());
6161 PrintF("min_in_mutator=%d ", get_min_in_mutator());
6162 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6163 get_max_alive_after_gc());
6167 isolate_->global_handles()->TearDown();
6169 external_string_table_.TearDown();
6171 new_space_.TearDown();
6173 if (old_pointer_space_ != NULL) {
6174 old_pointer_space_->TearDown();
6175 delete old_pointer_space_;
6176 old_pointer_space_ = NULL;
6179 if (old_data_space_ != NULL) {
6180 old_data_space_->TearDown();
6181 delete old_data_space_;
6182 old_data_space_ = NULL;
6185 if (code_space_ != NULL) {
6186 code_space_->TearDown();
6191 if (map_space_ != NULL) {
6192 map_space_->TearDown();
6197 if (cell_space_ != NULL) {
6198 cell_space_->TearDown();
6203 if (lo_space_ != NULL) {
6204 lo_space_->TearDown();
6209 store_buffer()->TearDown();
6210 incremental_marking()->TearDown();
6212 isolate_->memory_allocator()->TearDown();
6215 delete debug_utils_;
6216 debug_utils_ = NULL;
6221 void Heap::Shrink() {
6222 // Try to shrink all paged spaces.
6224 for (PagedSpace* space = spaces.next();
6226 space = spaces.next()) {
6227 space->ReleaseAllUnusedPages();
6232 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6233 ASSERT(callback != NULL);
6234 GCPrologueCallbackPair pair(callback, gc_type);
6235 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6236 return gc_prologue_callbacks_.Add(pair);
6240 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6241 ASSERT(callback != NULL);
6242 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6243 if (gc_prologue_callbacks_[i].callback == callback) {
6244 gc_prologue_callbacks_.Remove(i);
6252 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6253 ASSERT(callback != NULL);
6254 GCEpilogueCallbackPair pair(callback, gc_type);
6255 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6256 return gc_epilogue_callbacks_.Add(pair);
6260 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
6261 ASSERT(callback != NULL);
6262 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6263 if (gc_epilogue_callbacks_[i].callback == callback) {
6264 gc_epilogue_callbacks_.Remove(i);
6274 class PrintHandleVisitor: public ObjectVisitor {
6276 void VisitPointers(Object** start, Object** end) {
6277 for (Object** p = start; p < end; p++)
6278 PrintF(" handle %p to %p\n",
6279 reinterpret_cast<void*>(p),
6280 reinterpret_cast<void*>(*p));
6284 void Heap::PrintHandles() {
6285 PrintF("Handles:\n");
6286 PrintHandleVisitor v;
6287 isolate_->handle_scope_implementer()->Iterate(&v);
6293 Space* AllSpaces::next() {
6294 switch (counter_++) {
6296 return HEAP->new_space();
6297 case OLD_POINTER_SPACE:
6298 return HEAP->old_pointer_space();
6299 case OLD_DATA_SPACE:
6300 return HEAP->old_data_space();
6302 return HEAP->code_space();
6304 return HEAP->map_space();
6306 return HEAP->cell_space();
6308 return HEAP->lo_space();
6315 PagedSpace* PagedSpaces::next() {
6316 switch (counter_++) {
6317 case OLD_POINTER_SPACE:
6318 return HEAP->old_pointer_space();
6319 case OLD_DATA_SPACE:
6320 return HEAP->old_data_space();
6322 return HEAP->code_space();
6324 return HEAP->map_space();
6326 return HEAP->cell_space();
6334 OldSpace* OldSpaces::next() {
6335 switch (counter_++) {
6336 case OLD_POINTER_SPACE:
6337 return HEAP->old_pointer_space();
6338 case OLD_DATA_SPACE:
6339 return HEAP->old_data_space();
6341 return HEAP->code_space();
6348 SpaceIterator::SpaceIterator()
6349 : current_space_(FIRST_SPACE),
6355 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
6356 : current_space_(FIRST_SPACE),
6358 size_func_(size_func) {
6362 SpaceIterator::~SpaceIterator() {
6363 // Delete active iterator if any.
6368 bool SpaceIterator::has_next() {
6369 // Iterate until no more spaces.
6370 return current_space_ != LAST_SPACE;
6374 ObjectIterator* SpaceIterator::next() {
6375 if (iterator_ != NULL) {
6378 // Move to the next space
6380 if (current_space_ > LAST_SPACE) {
6385 // Return iterator for the new current space.
6386 return CreateIterator();
6390 // Create an iterator for the space to iterate.
6391 ObjectIterator* SpaceIterator::CreateIterator() {
6392 ASSERT(iterator_ == NULL);
6394 switch (current_space_) {
6396 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
6398 case OLD_POINTER_SPACE:
6399 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
6401 case OLD_DATA_SPACE:
6402 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
6405 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
6408 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
6411 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
6414 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
6418 // Return the newly allocated iterator;
6419 ASSERT(iterator_ != NULL);
6424 class HeapObjectsFilter {
6426 virtual ~HeapObjectsFilter() {}
6427 virtual bool SkipObject(HeapObject* object) = 0;
6431 class UnreachableObjectsFilter : public HeapObjectsFilter {
6433 UnreachableObjectsFilter() {
6434 MarkReachableObjects();
6437 ~UnreachableObjectsFilter() {
6438 Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6441 bool SkipObject(HeapObject* object) {
6442 MarkBit mark_bit = Marking::MarkBitFrom(object);
6443 return !mark_bit.Get();
6447 class MarkingVisitor : public ObjectVisitor {
6449 MarkingVisitor() : marking_stack_(10) {}
6451 void VisitPointers(Object** start, Object** end) {
6452 for (Object** p = start; p < end; p++) {
6453 if (!(*p)->IsHeapObject()) continue;
6454 HeapObject* obj = HeapObject::cast(*p);
6455 MarkBit mark_bit = Marking::MarkBitFrom(obj);
6456 if (!mark_bit.Get()) {
6458 marking_stack_.Add(obj);
6463 void TransitiveClosure() {
6464 while (!marking_stack_.is_empty()) {
6465 HeapObject* obj = marking_stack_.RemoveLast();
6471 List<HeapObject*> marking_stack_;
6474 void MarkReachableObjects() {
6475 Heap* heap = Isolate::Current()->heap();
6476 MarkingVisitor visitor;
6477 heap->IterateRoots(&visitor, VISIT_ALL);
6478 visitor.TransitiveClosure();
6481 AssertNoAllocation no_alloc;
6485 HeapIterator::HeapIterator()
6486 : filtering_(HeapIterator::kNoFiltering),
6492 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6493 : filtering_(filtering),
6499 HeapIterator::~HeapIterator() {
6504 void HeapIterator::Init() {
6505 // Start the iteration.
6506 space_iterator_ = new SpaceIterator;
6507 switch (filtering_) {
6508 case kFilterUnreachable:
6509 filter_ = new UnreachableObjectsFilter;
6514 object_iterator_ = space_iterator_->next();
6518 void HeapIterator::Shutdown() {
6520 // Assert that in filtering mode we have iterated through all
6521 // objects. Otherwise, heap will be left in an inconsistent state.
6522 if (filtering_ != kNoFiltering) {
6523 ASSERT(object_iterator_ == NULL);
6526 // Make sure the last iterator is deallocated.
6527 delete space_iterator_;
6528 space_iterator_ = NULL;
6529 object_iterator_ = NULL;
6535 HeapObject* HeapIterator::next() {
6536 if (filter_ == NULL) return NextObject();
6538 HeapObject* obj = NextObject();
6539 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6544 HeapObject* HeapIterator::NextObject() {
6545 // No iterator means we are done.
6546 if (object_iterator_ == NULL) return NULL;
6548 if (HeapObject* obj = object_iterator_->next_object()) {
6549 // If the current iterator has more objects we are fine.
6552 // Go though the spaces looking for one that has objects.
6553 while (space_iterator_->has_next()) {
6554 object_iterator_ = space_iterator_->next();
6555 if (HeapObject* obj = object_iterator_->next_object()) {
6560 // Done with the last space.
6561 object_iterator_ = NULL;
6566 void HeapIterator::reset() {
6567 // Restart the iterator.
6573 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6575 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6577 class PathTracer::MarkVisitor: public ObjectVisitor {
6579 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6580 void VisitPointers(Object** start, Object** end) {
6581 // Scan all HeapObject pointers in [start, end)
6582 for (Object** p = start; !tracer_->found() && (p < end); p++) {
6583 if ((*p)->IsHeapObject())
6584 tracer_->MarkRecursively(p, this);
6589 PathTracer* tracer_;
6593 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6595 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6596 void VisitPointers(Object** start, Object** end) {
6597 // Scan all HeapObject pointers in [start, end)
6598 for (Object** p = start; p < end; p++) {
6599 if ((*p)->IsHeapObject())
6600 tracer_->UnmarkRecursively(p, this);
6605 PathTracer* tracer_;
6609 void PathTracer::VisitPointers(Object** start, Object** end) {
6610 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6611 // Visit all HeapObject pointers in [start, end)
6612 for (Object** p = start; !done && (p < end); p++) {
6613 if ((*p)->IsHeapObject()) {
6615 done = ((what_to_find_ == FIND_FIRST) && found_target_);
6621 void PathTracer::Reset() {
6622 found_target_ = false;
6623 object_stack_.Clear();
6627 void PathTracer::TracePathFrom(Object** root) {
6628 ASSERT((search_target_ == kAnyGlobalObject) ||
6629 search_target_->IsHeapObject());
6630 found_target_in_trace_ = false;
6631 object_stack_.Clear();
6633 MarkVisitor mark_visitor(this);
6634 MarkRecursively(root, &mark_visitor);
6636 UnmarkVisitor unmark_visitor(this);
6637 UnmarkRecursively(root, &unmark_visitor);
6643 static bool SafeIsGlobalContext(HeapObject* obj) {
6644 return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
6648 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6649 if (!(*p)->IsHeapObject()) return;
6651 HeapObject* obj = HeapObject::cast(*p);
6653 Object* map = obj->map();
6655 if (!map->IsHeapObject()) return; // visited before
6657 if (found_target_in_trace_) return; // stop if target found
6658 object_stack_.Add(obj);
6659 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6660 (obj == search_target_)) {
6661 found_target_in_trace_ = true;
6662 found_target_ = true;
6666 bool is_global_context = SafeIsGlobalContext(obj);
6669 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6671 Address map_addr = map_p->address();
6673 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6675 // Scan the object body.
6676 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6677 // This is specialized to scan Context's properly.
6678 Object** start = reinterpret_cast<Object**>(obj->address() +
6679 Context::kHeaderSize);
6680 Object** end = reinterpret_cast<Object**>(obj->address() +
6681 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
6682 mark_visitor->VisitPointers(start, end);
6684 obj->IterateBody(map_p->instance_type(),
6685 obj->SizeFromMap(map_p),
6689 // Scan the map after the body because the body is a lot more interesting
6690 // when doing leak detection.
6691 MarkRecursively(&map, mark_visitor);
6693 if (!found_target_in_trace_) // don't pop if found the target
6694 object_stack_.RemoveLast();
6698 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6699 if (!(*p)->IsHeapObject()) return;
6701 HeapObject* obj = HeapObject::cast(*p);
6703 Object* map = obj->map();
6705 if (map->IsHeapObject()) return; // unmarked already
6707 Address map_addr = reinterpret_cast<Address>(map);
6709 map_addr -= kMarkTag;
6711 ASSERT_TAG_ALIGNED(map_addr);
6713 HeapObject* map_p = HeapObject::FromAddress(map_addr);
6715 obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6717 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6719 obj->IterateBody(Map::cast(map_p)->instance_type(),
6720 obj->SizeFromMap(Map::cast(map_p)),
6725 void PathTracer::ProcessResults() {
6726 if (found_target_) {
6727 PrintF("=====================================\n");
6728 PrintF("==== Path to object ====\n");
6729 PrintF("=====================================\n\n");
6731 ASSERT(!object_stack_.is_empty());
6732 for (int i = 0; i < object_stack_.length(); i++) {
6733 if (i > 0) PrintF("\n |\n |\n V\n\n");
6734 Object* obj = object_stack_[i];
6741 PrintF("=====================================\n");
6744 #endif // DEBUG || LIVE_OBJECT_LIST
6748 // Triggers a depth-first traversal of reachable objects from roots
6749 // and finds a path to a specific heap object and prints it.
6750 void Heap::TracePathToObject(Object* target) {
6751 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6752 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6756 // Triggers a depth-first traversal of reachable objects from roots
6757 // and finds a path to any global object and prints it. Useful for
6758 // determining the source for leaks of global objects.
6759 void Heap::TracePathToGlobal() {
6760 PathTracer tracer(PathTracer::kAnyGlobalObject,
6761 PathTracer::FIND_ALL,
6763 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6768 static intptr_t CountTotalHolesSize() {
6769 intptr_t holes_size = 0;
6771 for (OldSpace* space = spaces.next();
6773 space = spaces.next()) {
6774 holes_size += space->Waste() + space->Available();
6780 GCTracer::GCTracer(Heap* heap,
6781 const char* gc_reason,
6782 const char* collector_reason)
6784 start_object_size_(0),
6785 start_memory_size_(0),
6788 allocated_since_last_gc_(0),
6789 spent_in_mutator_(0),
6790 promoted_objects_size_(0),
6792 gc_reason_(gc_reason),
6793 collector_reason_(collector_reason) {
6794 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6795 start_time_ = OS::TimeCurrentMillis();
6796 start_object_size_ = heap_->SizeOfObjects();
6797 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6799 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6803 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6805 allocated_since_last_gc_ =
6806 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6808 if (heap_->last_gc_end_timestamp_ > 0) {
6809 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6812 steps_count_ = heap_->incremental_marking()->steps_count();
6813 steps_took_ = heap_->incremental_marking()->steps_took();
6814 longest_step_ = heap_->incremental_marking()->longest_step();
6815 steps_count_since_last_gc_ =
6816 heap_->incremental_marking()->steps_count_since_last_gc();
6817 steps_took_since_last_gc_ =
6818 heap_->incremental_marking()->steps_took_since_last_gc();
6822 GCTracer::~GCTracer() {
6823 // Printf ONE line iff flag is set.
6824 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6826 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6828 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6829 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6831 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6833 // Update cumulative GC statistics if required.
6834 if (FLAG_print_cumulative_gc_stat) {
6835 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6836 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6837 heap_->alive_after_last_gc_);
6839 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6840 static_cast<int>(spent_in_mutator_));
6844 PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6846 if (!FLAG_trace_gc_nvp) {
6847 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6849 double end_memory_size_mb =
6850 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6852 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6854 static_cast<double>(start_object_size_) / MB,
6855 static_cast<double>(start_memory_size_) / MB,
6856 SizeOfHeapObjects(),
6857 end_memory_size_mb);
6859 if (external_time > 0) PrintF("%d / ", external_time);
6860 PrintF("%d ms", time);
6861 if (steps_count_ > 0) {
6862 if (collector_ == SCAVENGER) {
6863 PrintF(" (+ %d ms in %d steps since last GC)",
6864 static_cast<int>(steps_took_since_last_gc_),
6865 steps_count_since_last_gc_);
6867 PrintF(" (+ %d ms in %d steps since start of marking, "
6868 "biggest step %f ms)",
6869 static_cast<int>(steps_took_),
6875 if (gc_reason_ != NULL) {
6876 PrintF(" [%s]", gc_reason_);
6879 if (collector_reason_ != NULL) {
6880 PrintF(" [%s]", collector_reason_);
6885 PrintF("pause=%d ", time);
6886 PrintF("mutator=%d ",
6887 static_cast<int>(spent_in_mutator_));
6890 switch (collector_) {
6894 case MARK_COMPACTOR:
6902 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6903 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6904 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6905 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6906 PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
6907 PrintF("new_new=%d ",
6908 static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
6909 PrintF("root_new=%d ",
6910 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6911 PrintF("old_new=%d ",
6912 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6913 PrintF("compaction_ptrs=%d ",
6914 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6915 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6916 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
6917 PrintF("misc_compaction=%d ",
6918 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
6920 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
6921 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6922 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6923 in_free_list_or_wasted_before_gc_);
6924 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6926 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6927 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6929 if (collector_ == SCAVENGER) {
6930 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6931 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6933 PrintF("stepscount=%d ", steps_count_);
6934 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6940 heap_->PrintShortHeapStatistics();
6944 const char* GCTracer::CollectorString() {
6945 switch (collector_) {
6948 case MARK_COMPACTOR:
6949 return "Mark-sweep";
6951 return "Unknown GC";
6955 int KeyedLookupCache::Hash(Map* map, String* name) {
6956 // Uses only lower 32 bits if pointers are larger.
6957 uintptr_t addr_hash =
6958 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6959 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6963 int KeyedLookupCache::Lookup(Map* map, String* name) {
6964 int index = (Hash(map, name) & kHashMask);
6965 for (int i = 0; i < kEntriesPerBucket; i++) {
6966 Key& key = keys_[index + i];
6967 if ((key.map == map) && key.name->Equals(name)) {
6968 return field_offsets_[index + i];
6975 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6977 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
6978 int index = (Hash(map, symbol) & kHashMask);
6979 // After a GC there will be free slots, so we use them in order (this may
6980 // help to get the most frequently used one in position 0).
6981 for (int i = 0; i< kEntriesPerBucket; i++) {
6982 Key& key = keys_[index];
6983 Object* free_entry_indicator = NULL;
6984 if (key.map == free_entry_indicator) {
6987 field_offsets_[index + i] = field_offset;
6991 // No free entry found in this bucket, so we move them all down one and
6992 // put the new entry at position zero.
6993 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6994 Key& key = keys_[index + i];
6995 Key& key2 = keys_[index + i - 1];
6997 field_offsets_[index + i] = field_offsets_[index + i - 1];
7000 // Write the new first entry.
7001 Key& key = keys_[index];
7004 field_offsets_[index] = field_offset;
7009 void KeyedLookupCache::Clear() {
7010 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7014 void DescriptorLookupCache::Clear() {
7015 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
7020 void Heap::GarbageCollectionGreedyCheck() {
7021 ASSERT(FLAG_gc_greedy);
7022 if (isolate_->bootstrapper()->IsActive()) return;
7023 if (disallow_allocation_failure()) return;
7024 CollectGarbage(NEW_SPACE);
7029 TranscendentalCache::SubCache::SubCache(Type t)
7031 isolate_(Isolate::Current()) {
7032 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7033 uint32_t in1 = 0xffffffffu; // generated by the FPU.
7034 for (int i = 0; i < kCacheSize; i++) {
7035 elements_[i].in[0] = in0;
7036 elements_[i].in[1] = in1;
7037 elements_[i].output = NULL;
7042 void TranscendentalCache::Clear() {
7043 for (int i = 0; i < kNumberOfCaches; i++) {
7044 if (caches_[i] != NULL) {
7052 void ExternalStringTable::CleanUp() {
7054 for (int i = 0; i < new_space_strings_.length(); ++i) {
7055 if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7058 if (heap_->InNewSpace(new_space_strings_[i])) {
7059 new_space_strings_[last++] = new_space_strings_[i];
7061 old_space_strings_.Add(new_space_strings_[i]);
7064 new_space_strings_.Rewind(last);
7066 for (int i = 0; i < old_space_strings_.length(); ++i) {
7067 if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7070 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7071 old_space_strings_[last++] = old_space_strings_[i];
7073 old_space_strings_.Rewind(last);
7074 if (FLAG_verify_heap) {
7080 void ExternalStringTable::TearDown() {
7081 new_space_strings_.Free();
7082 old_space_strings_.Free();
7086 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7087 chunk->set_next_chunk(chunks_queued_for_free_);
7088 chunks_queued_for_free_ = chunk;
7092 void Heap::FreeQueuedChunks() {
7093 if (chunks_queued_for_free_ == NULL) return;
7096 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7097 next = chunk->next_chunk();
7098 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7100 if (chunk->owner()->identity() == LO_SPACE) {
7101 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7102 // If FromAnyPointerAddress encounters a slot that belongs to a large
7103 // chunk queued for deletion it will fail to find the chunk because
7104 // it try to perform a search in the list of pages owned by of the large
7105 // object space and queued chunks were detached from that list.
7106 // To work around this we split large chunk into normal kPageSize aligned
7107 // pieces and initialize size, owner and flags field of every piece.
7108 // If FromAnyPointerAddress encounters a slot that belongs to one of
7109 // these smaller pieces it will treat it as a slot on a normal Page.
7110 Address chunk_end = chunk->address() + chunk->size();
7111 MemoryChunk* inner = MemoryChunk::FromAddress(
7112 chunk->address() + Page::kPageSize);
7113 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7114 while (inner <= inner_last) {
7115 // Size of a large chunk is always a multiple of
7116 // OS::AllocateAlignment() so there is always
7117 // enough space for a fake MemoryChunk header.
7118 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7119 // Guard against overflow.
7120 if (area_end < inner->address()) area_end = chunk_end;
7121 inner->SetArea(inner->address(), area_end);
7122 inner->set_size(Page::kPageSize);
7123 inner->set_owner(lo_space());
7124 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7125 inner = MemoryChunk::FromAddress(
7126 inner->address() + Page::kPageSize);
7130 isolate_->heap()->store_buffer()->Compact();
7131 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7132 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7133 next = chunk->next_chunk();
7134 isolate_->memory_allocator()->Free(chunk);
7136 chunks_queued_for_free_ = NULL;
7140 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7141 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7142 // Tag the page pointer to make it findable in the dump file.
7144 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7146 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7148 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7149 reinterpret_cast<Address>(p);
7150 remembered_unmapped_pages_index_++;
7151 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7154 } } // namespace v8::internal