GCIdleTimeAction GCIdleTimeHandler::Compute(int idle_time_in_ms,
- int contexts_disposed,
- size_t size_of_objects,
- bool incremental_marking_stopped,
+ HeapState heap_state,
GCTracer* gc_tracer) {
if (IsIdleRoundFinished()) {
- if (EnoughGarbageSinceLastIdleRound() || contexts_disposed > 0) {
+ if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
StartIdleRound();
} else {
return GCIdleTimeAction::Nothing();
}
}
- if (incremental_marking_stopped) {
+ if (heap_state.incremental_marking_stopped) {
size_t speed =
static_cast<size_t>(gc_tracer->MarkCompactSpeedInBytesPerMillisecond());
- if (idle_time_in_ms >=
- static_cast<int>(EstimateMarkCompactTime(size_of_objects, speed))) {
+ if (idle_time_in_ms >= static_cast<int>(EstimateMarkCompactTime(
+ heap_state.size_of_objects, speed))) {
// If there are no more than two GCs left in this idle round and we are
// allowed to do a full GC, then make those GCs full in order to compact
// the code space.
// can get rid of this special case and always start incremental marking.
int remaining_mark_sweeps =
kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
- if (contexts_disposed > 0 || remaining_mark_sweeps <= 2) {
+ if (heap_state.contexts_disposed > 0 || remaining_mark_sweeps <= 2 ||
+ !heap_state.can_start_incremental_marking) {
return GCIdleTimeAction::FullGC();
}
}
+ if (!heap_state.can_start_incremental_marking) {
+ return GCIdleTimeAction::Nothing();
+ }
}
intptr_t speed = gc_tracer->IncrementalMarkingSpeedInBytesPerMillisecond();
size_t step_size =
intptr_t parameter;
};
+
class GCTracer;
// The idle time handler makes decisions about which garbage collection
// Maximum mark-compact time returned by EstimateMarkCompactTime.
static const size_t kMaxMarkCompactTimeInMs;
+ struct HeapState {
+ int contexts_disposed;
+ size_t size_of_objects;
+ bool incremental_marking_stopped;
+ bool can_start_incremental_marking;
+ };
+
GCIdleTimeHandler()
: mark_compacts_since_idle_round_started_(0),
scavenges_since_last_idle_round_(0) {}
- GCIdleTimeAction Compute(int idle_time_in_ms, int contexts_disposed,
- size_t size_of_objects,
- bool incremental_marking_stopped,
+ GCIdleTimeAction Compute(int idle_time_in_ms, HeapState heap_state,
GCTracer* gc_tracer);
void NotifyIdleMarkCompact() {
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
- incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+ WorthActivatingIncrementalMarking()) {
incremental_marking()->Start();
}
}
+bool Heap::WorthActivatingIncrementalMarking() {
+ return incremental_marking()->IsStopped() &&
+ incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
+}
+
+
bool Heap::IdleNotification(int idle_time_in_ms) {
// If incremental marking is off, we do not perform idle notification.
if (!FLAG_incremental_marking) return true;
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
- GCIdleTimeAction action = gc_idle_time_handler_.Compute(
- idle_time_in_ms, contexts_disposed_, static_cast<size_t>(SizeOfObjects()),
- incremental_marking()->IsStopped(), tracer());
+ GCIdleTimeHandler::HeapState heap_state;
+ heap_state.contexts_disposed = contexts_disposed_;
+ heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
+ heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
+ heap_state.can_start_incremental_marking =
+ WorthActivatingIncrementalMarking();
+
+ GCIdleTimeAction action =
+ gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state, tracer());
+
contexts_disposed_ = 0;
bool result = false;
switch (action.type) {
void AdvanceIdleIncrementalMarking(intptr_t step_size);
+ bool WorthActivatingIncrementalMarking();
+
void ClearObjectStats(bool clear_last_time_stats = false);
void set_weak_object_to_code_table(Object* value) {