void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return;
- isolate->heap()->CollectAllAvailableGarbage();
+ isolate->heap()->CollectAllAvailableGarbage("low memory notification");
}
{
// We are going to iterate heap to find all functions without
// debug break slots.
- isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "preparing for breakpoints");
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced. The second also sweeps precisely,
// which saves us doing yet another GC to make the heap iterable.
- heap->CollectAllGarbage(Heap::kNoGCFlags);
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "Debug::CreateScriptCache");
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "Debug::GetLoadedScripts");
// Get the scripts from the cache.
return script_cache_->GetScripts();
StackGuard* stack_guard = isolate->stack_guard();
if (stack_guard->IsGCRequest()) {
- isolate->heap()->CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");
stack_guard->Continue(GC_REQUEST);
}
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
return v8::Undefined();
}
}
-bool Heap::CollectGarbage(AllocationSpace space) {
- return CollectGarbage(space, SelectGarbageCollector(space));
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
+ const char* collector_reason = NULL;
+ GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ return CollectGarbage(space, collector, gc_reason, collector_reason);
}
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
} else {
// Avoid underflow.
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space()); \
+ allocation_space(), \
+ "allocation failure"); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage(); \
+ ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
}
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
+ const char** reason) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
+ *reason = "GC in old space requested";
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
+ *reason = "promotion limit reached";
return MARK_COMPACTOR;
}
if (old_gen_exhausted_) {
isolate_->counters()->
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+ *reason = "old generations exhausted";
return MARK_COMPACTOR;
}
if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
isolate_->counters()->
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+ *reason = "scavenge might not succeed";
return MARK_COMPACTOR;
}
// Default
+ *reason = NULL;
return SCAVENGER;
}
}
-void Heap::CollectAllGarbage(int flags) {
+void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
mark_compact_collector_.SetFlags(flags);
- CollectGarbage(OLD_POINTER_SPACE);
+ CollectGarbage(OLD_POINTER_SPACE, gc_reason);
mark_compact_collector_.SetFlags(kNoGCFlags);
}
-void Heap::CollectAllAvailableGarbage() {
+void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+ mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
+ kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
+ if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
break;
}
}
}
-bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
+bool Heap::CollectGarbage(AllocationSpace space,
+ GarbageCollector collector,
+ const char* gc_reason,
+ const char* collector_reason) {
// The VM is in the GC state until exiting this function.
VMState state(isolate_, GC);
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
}
collector = SCAVENGER;
+ collector_reason = "incremental marking delaying mark-sweep";
}
bool next_gc_likely_to_collect_more = false;
- { GCTracer tracer(this);
+ { GCTracer tracer(this, gc_reason, collector_reason);
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
void Heap::PerformScavenge() {
- GCTracer tracer(this);
+ GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
PerformGarbageCollection(SCAVENGER, &tracer);
} else {
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
- Heap::CollectGarbage(NEW_SPACE);
+ Heap::CollectGarbage(NEW_SPACE,
+ "failed to reserve space in the new space");
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ Heap::CollectGarbage(OLD_POINTER_SPACE,
+ "failed to reserve space in the old pointer space");
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
- Heap::CollectGarbage(OLD_DATA_SPACE);
+ Heap::CollectGarbage(OLD_DATA_SPACE,
+ "failed to reserve space in the old data space");
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
- Heap::CollectGarbage(CODE_SPACE);
+ Heap::CollectGarbage(CODE_SPACE,
+ "failed to reserve space in the code space");
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
- Heap::CollectGarbage(MAP_SPACE);
+ Heap::CollectGarbage(MAP_SPACE,
+ "failed to reserve space in the map space");
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
- Heap::CollectGarbage(CELL_SPACE);
+ Heap::CollectGarbage(CELL_SPACE,
+ "failed to reserve space in the cell space");
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
- Heap::CollectGarbage(LO_SPACE);
+ Heap::CollectGarbage(LO_SPACE,
+ "failed to reserve space in the large object space");
gc_performed = true;
}
}
void Heap::EnsureHeapIsIterable() {
ASSERT(IsAllocationAllowed());
if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask);
+ CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
}
ASSERT(IsHeapIterable());
}
isolate_->compilation_cache()->Clear();
uncommit = true;
}
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: contexts disposed");
} else {
- CollectGarbage(NEW_SPACE);
+ CollectGarbage(NEW_SPACE, "idle notification");
}
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
// generated code for cached functions.
isolate_->compilation_cache()->Clear();
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: contexts disposed");
last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
}
-GCTracer::GCTracer(Heap* heap)
+GCTracer::GCTracer(Heap* heap,
+ const char* gc_reason,
+ const char* collector_reason)
: start_time_(0.0),
start_object_size_(0),
start_memory_size_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
- heap_(heap) {
+ heap_(heap),
+ gc_reason_(gc_reason),
+ collector_reason_(collector_reason) {
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_object_size_ = heap_->SizeOfObjects();
longest_step_);
}
}
+
+ if (gc_reason_ != NULL) {
+ PrintF(" [%s]", gc_reason_);
+ }
+
+ if (collector_reason_ != NULL) {
+ PrintF(" [%s]", collector_reason_);
+ }
+
PrintF(".\n");
} else {
PrintF("pause=%d ", time);
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+ bool CollectGarbage(AllocationSpace space,
+ GarbageCollector collector,
+ const char* gc_reason,
+ const char* collector_reason);
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline bool CollectGarbage(AllocationSpace space);
+ inline bool CollectGarbage(AllocationSpace space,
+ const char* gc_reason = NULL);
static const int kNoGCFlags = 0;
static const int kMakeHeapIterableMask = 1;
+ static const int kReduceMemoryFootprintMask = 2;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(int flags);
+ void CollectAllGarbage(int flags, const char* gc_reason = NULL);
// Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage();
+ void CollectAllAvailableGarbage(const char* gc_reason = NULL);
// Check whether the heap is currently iterable.
bool IsHeapIterable();
}
// Checks whether a global GC is necessary
- GarbageCollector SelectGarbageCollector(AllocationSpace space);
+ GarbageCollector SelectGarbageCollector(AllocationSpace space,
+ const char** reason);
// Performs garbage collection
// Returns whether there is a chance another major GC could
double start_time_;
};
- explicit GCTracer(Heap* heap);
+ explicit GCTracer(Heap* heap,
+ const char* gc_reason,
+ const char* collector_reason);
~GCTracer();
// Sets the collector.
double steps_took_since_last_gc_;
Heap* heap_;
+
+ const char* gc_reason_;
+ const char* collector_reason_;
};
void Logger::LogCodeObjects() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "Logger::LogCodeObjects");
HeapIterator iterator;
AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
void Logger::LogCompiledFunctions() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "Logger::LogCompiledFunctions");
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
void Logger::LogAccessorCallbacks() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "Logger::LogAccessorCallbacks");
HeapIterator iterator;
AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
void MarkCompactCollector::SetFlags(int flags) {
sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
+ reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
}
}
+static void TraceFragmentation(PagedSpace* space) {
+ int number_of_pages = space->CountTotalPages();
+ intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
+ intptr_t free = reserved - space->SizeOfObjects();
+ PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
+ AllocationSpaceName(space->identity()),
+ number_of_pages,
+ static_cast<int>(free),
+ static_cast<double>(free) * 100 / reserved);
+}
+
+
bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
ASSERT(evacuation_candidates_.length() == 0);
if (FLAG_compact_code_space) {
CollectEvacuationCandidates(heap()->code_space());
+ } else if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->code_space());
+ }
+
+ if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->map_space());
+ TraceFragmentation(heap()->cell_space());
}
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
}
+// Returns zero for pages that have so little fragmentation that it is not
+// worth defragmenting them. Otherwise a positive integer that gives an
+// estimate of fragmentation on an arbitrary scale.
+static int FreeListFragmentation(PagedSpace* space, Page* p) {
+ // If page was not swept then there are no free list items on it.
+ if (!p->WasSwept()) {
+ if (FLAG_trace_fragmentation) {
+ PrintF("%p [%s]: %d bytes live (unswept)\n",
+ reinterpret_cast<void*>(p),
+ AllocationSpaceName(space->identity()),
+ p->LiveBytes());
+ }
+ return 0;
+ }
+
+ FreeList::SizeStats sizes;
+ space->CountFreeListItems(p, &sizes);
+
+ intptr_t ratio;
+ intptr_t ratio_threshold;
+ if (space->identity() == CODE_SPACE) {
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+ Page::kObjectAreaSize;
+ ratio_threshold = 10;
+ } else {
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+ Page::kObjectAreaSize;
+ ratio_threshold = 15;
+ }
+
+ if (FLAG_trace_fragmentation) {
+ PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+ reinterpret_cast<void*>(p),
+ AllocationSpaceName(space->identity()),
+ static_cast<int>(sizes.small_size_),
+ static_cast<double>(sizes.small_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.medium_size_),
+ static_cast<double>(sizes.medium_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.large_size_),
+ static_cast<double>(sizes.large_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.huge_size_),
+ static_cast<double>(sizes.huge_size_ * 100) /
+ Page::kObjectAreaSize,
+ (ratio > ratio_threshold) ? "[fragmented]" : "");
+ }
+
+ if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+ return 1;
+ }
+
+ if (ratio <= ratio_threshold) return 0; // Not fragmented.
+
+ return static_cast<int>(ratio - ratio_threshold);
+}
+
+
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
ASSERT(space->identity() == OLD_POINTER_SPACE ||
space->identity() == OLD_DATA_SPACE ||
int number_of_pages = space->CountTotalPages();
- PageIterator it(space);
const int kMaxMaxEvacuationCandidates = 1000;
int max_evacuation_candidates = Min(
kMaxMaxEvacuationCandidates,
Page* page_;
};
+ enum CompactionMode {
+ COMPACT_FREE_LISTS,
+ REDUCE_MEMORY_FOOTPRINT
+ };
+
+ CompactionMode mode = COMPACT_FREE_LISTS;
+
+ intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
+ intptr_t over_reserved = reserved - space->SizeOfObjects();
+ static const intptr_t kFreenessThreshold = 50;
+
+ if (over_reserved >= 2 * Page::kObjectAreaSize &&
+ reduce_memory_footprint_) {
+ mode = REDUCE_MEMORY_FOOTPRINT;
+
+ // We expect that empty pages are easier to compact so slightly bump the
+ // limit.
+ max_evacuation_candidates += 2;
+
+ if (FLAG_trace_fragmentation) {
+ PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+ static_cast<double>(over_reserved) / MB,
+ kFreenessThreshold);
+ }
+ }
+
+ intptr_t estimated_release = 0;
+
Candidate candidates[kMaxMaxEvacuationCandidates];
int count = 0;
- if (it.has_next()) it.next(); // Never compact the first page.
int fragmentation = 0;
Candidate* least = NULL;
+
+ PageIterator it(space);
+ if (it.has_next()) it.next(); // Never compact the first page.
+
while (it.has_next()) {
Page* p = it.next();
p->ClearEvacuationCandidate();
+
if (FLAG_stress_compaction) {
int counter = space->heap()->ms_count();
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+ } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+ // Don't try to release too many pages.
+ if (estimated_release >= ((over_reserved * 3) / 4)) {
+ continue;
+ }
+
+ intptr_t free_bytes = 0;
+
+ if (!p->WasSwept()) {
+ free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
+ } else {
+ FreeList::SizeStats sizes;
+ space->CountFreeListItems(p, &sizes);
+ free_bytes = sizes.Total();
+ }
+
+ int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
+
+ if (free_pct >= kFreenessThreshold) {
+ estimated_release += Page::kObjectAreaSize +
+ (Page::kObjectAreaSize - free_bytes);
+ fragmentation = free_pct;
+ } else {
+ fragmentation = 0;
+ }
+
+ if (FLAG_trace_fragmentation) {
+ PrintF("%p [%s]: %d (%.2f%%) free %s\n",
+ reinterpret_cast<void*>(p),
+ AllocationSpaceName(space->identity()),
+ free_bytes,
+ static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
+ (fragmentation > 0) ? "[fragmented]" : "");
+ }
} else {
- fragmentation = space->Fragmentation(p);
+ fragmentation = FreeListFragmentation(space, p);
}
+
if (fragmentation != 0) {
if (count < max_evacuation_candidates) {
candidates[count++] = Candidate(fragmentation, p);
}
}
}
+
for (int i = 0; i < count; i++) {
AddEvacuationCandidate(candidates[i].page());
}
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
+ p->ResetLiveBytes();
+ space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
static const int kNumberOfElements = 1021;
private:
- static const int kChainLengthThreshold = 6;
+ static const int kChainLengthThreshold = 15;
intptr_t idx_;
intptr_t chain_length_;
// heap.
bool sweep_precisely_;
+ bool reduce_memory_footprint_;
+
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
context.Dispose();
CppByteSink sink(argv[1]);
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
// First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "HeapSnapshotsCollection::FindHeapObjectById");
AssertNoAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ Isolate::Current()->heap()->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
+ Isolate::Current()->heap()->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
#ifdef DEBUG
Heap* debug_heap = Isolate::Current()->heap();
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "%DebugReferencedBy");
// The heap iterator reserves the right to do a GC to make the heap iterable.
// Due to the GC above we know it won't need to do that, but it seems cleaner
// to get the heap iterator constructed before we start having unprotected
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "%DebugConstructedBy");
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
// Performs a GC.
// Presently, it only does a full GC.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
- isolate->heap()->CollectAllGarbage(true);
+ isolate->heap()->CollectAllGarbage(true, "%CollectGarbage");
return isolate->heap()->undefined_value();
}
}
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
- isolate->heap()->CollectGarbage(failure->allocation_space());
+ isolate->heap()->CollectGarbage(failure->allocation_space(),
+ "Runtime::PerformGC");
} else {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "Runtime::PerformGC");
}
}
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- // Returns zero for pages that have so little fragmentation that it is not
- // worth defragmenting them. Otherwise a positive integer that gives an
- // estimate of fragmentation on an arbitrary scale.
- int Fragmentation(Page* p) {
- FreeList::SizeStats sizes;
- free_list_.CountFreeListItems(p, &sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- if (identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
- Page::kObjectAreaSize;
- ratio_threshold = 10;
- } else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
- Page::kObjectAreaSize;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p),
- identity(),
- static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) /
- Page::kObjectAreaSize,
- static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) /
- Page::kObjectAreaSize,
- static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) /
- Page::kObjectAreaSize,
- static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) /
- Page::kObjectAreaSize,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
-
- if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
- return 1;
- }
- if (ratio <= ratio_threshold) return 0; // Not fragmented.
-
- return static_cast<int>(ratio - ratio_threshold);
+ void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
+ free_list_.CountFreeListItems(p, sizes);
}
void EvictEvacuationCandidatesFromFreeLists();