int post_gc_processing_count = 0;
-void GlobalHandles::PostGarbageCollectionProcessing() {
+bool GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
+ bool next_gc_could_collect_more = false;
Node** p = &head_;
while (*p != NULL) {
if ((*p)->PostGarbageCollectionProcessing()) {
}
node->set_next_free(first_deallocated());
set_first_deallocated(node);
+ next_gc_could_collect_more = true;
} else {
p = (*p)->next_addr();
}
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
+
+ return next_gc_could_collect_more;
}
static bool IsWeak(Object** location);
// Process pending weak handles.
- static void PostGarbageCollectionProcessing();
+ // Returns true if next major GC is likely to collect more garbage.
+ static bool PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);
}
+bool Heap::CollectGarbage(AllocationSpace space) {
+ return CollectGarbage(space, SelectGarbageCollector(space));
+}
+
+
MaybeObject* Heap::PrepareForCompare(String* str) {
// Always flatten small strings and force flattening of long strings
// after we have accumulated a certain amount we failed to flatten.
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
- Heap::CollectAllGarbage(false); \
+ Heap::CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
}
-void Heap::CollectGarbage(AllocationSpace space) {
+void Heap::CollectAllAvailableGarbage() {
+ // Since we are ignoring the return value, the exact choice of space does
+ // not matter, so long as we do not specify NEW_SPACE, which would not
+ // cause a full GC.
+ MarkCompactCollector::SetForceCompaction(true);
+
+ // Major GC would invoke weak handle callbacks on weakly reachable
+ // handles, but won't collect weakly reachable objects until next
+ // major GC. Therefore if we collect aggressively and weak handle callback
+ // has been invoked, we rerun major GC to release objects which become
+ // garbage.
+ // Note: as weak callbacks can execute arbitrary code, we cannot
+ // hope that eventually there will be no weak callbacks invocations.
+ // Therefore stop recollecting after several attempts.
+ const int kMaxNumberOfAttempts = 7;
+ for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
+ if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
+ break;
+ }
+ }
+ MarkCompactCollector::SetForceCompaction(false);
+}
+
+
+bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
+ bool next_gc_likely_to_collect_more = false;
+
{ GCTracer tracer;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
tracer.set_gc_count(gc_count_);
- GarbageCollector collector = SelectGarbageCollector(space);
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
- PerformGarbageCollection(collector, &tracer);
+ next_gc_likely_to_collect_more =
+ PerformGarbageCollection(collector, &tracer);
rate->Stop();
GarbageCollectionEpilogue();
if (FLAG_log_gc) HeapProfiler::WriteSample();
if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif
+
+ return next_gc_likely_to_collect_more;
}
survival_rate_ = survival_rate;
}
-void Heap::PerformGarbageCollection(GarbageCollector collector,
+bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer) {
+ bool next_gc_likely_to_collect_more = false;
+
if (collector != SCAVENGER) {
PROFILE(CodeMovingGCEvent());
}
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- GlobalHandles::PostGarbageCollectionProcessing();
+ next_gc_likely_to_collect_more =
+ GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
global_gc_epilogue_callback_();
}
VerifySymbolTable();
+
+ return next_gc_likely_to_collect_more;
}
static void GarbageCollectionEpilogue();
// Performs garbage collection operation.
- // Returns whether required_space bytes are available after the collection.
- static void CollectGarbage(AllocationSpace space);
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ static bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ inline static bool CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
static void CollectAllGarbage(bool force_compaction);
+ // Last hope GC, should try to squeeze as much as possible.
+ static void CollectAllAvailableGarbage();
+
// Notify the heap that a context has been disposed.
static int NotifyContextDisposed() { return ++contexts_disposed_; }
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
- static void PerformGarbageCollection(GarbageCollector collector,
+ // Returns whether there is a chance another major GC could
+ // collect more garbage.
+ static bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
// Allocate an uninitialized object in map space. The behavior is identical