const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
+const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
size_t GCIdleTimeHandler::EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
// TODO(hpayer): Be more precise about the type of mark-compact event. It
- // makes a huge difference if it is incremental or non-incremental and if
- // compaction is happening.
+ // makes a huge difference if compaction is happening.
if (mark_compact_speed_in_bytes_per_ms == 0) {
mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
}
}
+size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
+ size_t size_of_objects,
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+ if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
+ final_incremental_mark_compact_speed_in_bytes_per_ms =
+ kInitialConservativeFinalIncrementalMarkCompactSpeed;
+ }
+ size_t result =
+ size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
+ return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
+}
+
+
bool GCIdleTimeHandler::ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
size_t scavenge_speed_in_bytes_per_ms,
}
+bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+ return idle_time_in_ms >=
+ EstimateFinalIncrementalMarkCompactTime(
+ size_of_objects,
+ final_incremental_mark_compact_speed_in_bytes_per_ms);
+}
+
+
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
// conservative lower bound for the mark-compact speed.
static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
+ // If we haven't recorded any final incremental mark-compact events yet, we
+ // use conservative lower bound for the mark-compact speed.
+ static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
+ 2 * MB;
+
// Maximum mark-compact time returned by EstimateMarkCompactTime.
static const size_t kMaxMarkCompactTimeInMs;
+ // Maximum final incremental mark-compact time returned by
+ // EstimateFinalIncrementalMarkCompactTime.
+ static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
+
// Minimum time to finalize sweeping phase. The main thread may wait for
// sweeper threads.
static const size_t kMinTimeForFinalizeSweeping;
bool sweeping_in_progress;
size_t mark_compact_speed_in_bytes_per_ms;
size_t incremental_marking_speed_in_bytes_per_ms;
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
size_t scavenge_speed_in_bytes_per_ms;
size_t used_new_space_size;
size_t new_space_capacity;
static size_t EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+ static size_t EstimateFinalIncrementalMarkCompactTime(
+ size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+
static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoContextDisposalMarkCompact(bool context_disposed,
double contexts_disposal_rate);
+ static bool ShouldDoFinalIncrementalMarkCompact(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
+
static bool ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
size_t scavenger_speed_in_bytes_per_ms,
return "Scavenge";
}
case MARK_COMPACTOR:
+ case INCREMENTAL_MARK_COMPACTOR:
if (short_name) {
return "ms";
} else {
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
- previous_ = previous_mark_compactor_event_ = current_;
+ previous_ = previous_incremental_mark_compactor_event_ = current_;
}
reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
new_space_top_after_gc_));
}
- if (current_.type == Event::MARK_COMPACTOR)
- previous_mark_compactor_event_ = current_;
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
+ previous_incremental_mark_compactor_event_ = current_;
if (collector == SCAVENGER) {
current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
- } else {
- current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+ } else if (collector == MARK_COMPACTOR) {
+ if (heap_->incremental_marking()->IsMarking()) {
+ current_ =
+ Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason, collector_reason);
+ } else {
+ current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+ }
}
current_.start_time = start_time;
}
DCHECK(start_counter_ >= 0);
- DCHECK(
- (collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
- (collector == MARK_COMPACTOR && current_.type == Event::MARK_COMPACTOR));
+ DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
+ (collector == MARK_COMPACTOR &&
+ (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
current_.end_time = base::OS::TimeCurrentMillis();
current_.end_object_size = heap_->SizeOfObjects();
current_.cumulative_pure_incremental_marking_duration -
previous_.cumulative_pure_incremental_marking_duration;
scavenger_events_.push_front(current_);
- } else {
+ } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
current_.incremental_marking_steps =
current_.cumulative_incremental_marking_steps -
- previous_mark_compactor_event_.cumulative_incremental_marking_steps;
+ previous_incremental_mark_compactor_event_
+ .cumulative_incremental_marking_steps;
current_.incremental_marking_bytes =
current_.cumulative_incremental_marking_bytes -
- previous_mark_compactor_event_.cumulative_incremental_marking_bytes;
+ previous_incremental_mark_compactor_event_
+ .cumulative_incremental_marking_bytes;
current_.incremental_marking_duration =
current_.cumulative_incremental_marking_duration -
- previous_mark_compactor_event_.cumulative_incremental_marking_duration;
+ previous_incremental_mark_compactor_event_
+ .cumulative_incremental_marking_duration;
current_.pure_incremental_marking_duration =
current_.cumulative_pure_incremental_marking_duration -
- previous_mark_compactor_event_
+ previous_incremental_mark_compactor_event_
.cumulative_pure_incremental_marking_duration;
longest_incremental_marking_step_ = 0.0;
+ incremental_mark_compactor_events_.push_front(current_);
+ } else {
+ DCHECK(current_.incremental_marking_bytes == 0);
+ DCHECK(current_.incremental_marking_duration == 0);
+ DCHECK(current_.pure_incremental_marking_duration == 0);
+ DCHECK(longest_incremental_marking_step_ == 0.0);
mark_compactor_events_.push_front(current_);
}
// We haven't completed an entire round of incremental marking, yet.
// Use data from GCTracer instead of data from event buffers.
- if (mark_compactor_events_.empty()) {
+ if (incremental_mark_compactor_events_.empty()) {
return cumulative_incremental_marking_duration_ /
cumulative_incremental_marking_steps_;
}
int steps = 0;
double durations = 0.0;
- EventBuffer::const_iterator iter = mark_compactor_events_.begin();
- while (iter != mark_compactor_events_.end()) {
+ EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+ while (iter != incremental_mark_compactor_events_.end()) {
steps += iter->incremental_marking_steps;
durations += iter->incremental_marking_duration;
++iter;
double GCTracer::MaxIncrementalMarkingDuration() const {
// We haven't completed an entire round of incremental marking, yet.
// Use data from GCTracer instead of data from event buffers.
- if (mark_compactor_events_.empty()) return longest_incremental_marking_step_;
+ if (incremental_mark_compactor_events_.empty())
+ return longest_incremental_marking_step_;
double max_duration = 0.0;
- EventBuffer::const_iterator iter = mark_compactor_events_.begin();
- while (iter != mark_compactor_events_.end())
+ EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+ while (iter != incremental_mark_compactor_events_.end())
max_duration = Max(iter->longest_incremental_marking_step, max_duration);
return max_duration;
// We haven't completed an entire round of incremental marking, yet.
// Use data from GCTracer instead of data from event buffers.
- if (mark_compactor_events_.empty()) {
+ if (incremental_mark_compactor_events_.empty()) {
return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
cumulative_pure_incremental_marking_duration_);
}
intptr_t bytes = 0;
double durations = 0.0;
- EventBuffer::const_iterator iter = mark_compactor_events_.begin();
- while (iter != mark_compactor_events_.end()) {
+ EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+ while (iter != incremental_mark_compactor_events_.end()) {
bytes += iter->incremental_marking_bytes;
durations += iter->pure_incremental_marking_duration;
++iter;
EventBuffer::const_iterator iter = mark_compactor_events_.begin();
while (iter != mark_compactor_events_.end()) {
bytes += iter->start_object_size;
- durations += iter->end_time - iter->start_time +
- iter->pure_incremental_marking_duration;
+ durations += iter->end_time - iter->start_time;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+
+ return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
+ const {
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+ while (iter != incremental_mark_compactor_events_.end()) {
+ bytes += iter->start_object_size;
+ durations += iter->end_time - iter->start_time;
++iter;
}
class Event {
public:
- enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
+ enum Type {
+ SCAVENGER = 0,
+ MARK_COMPACTOR = 1,
+ INCREMENTAL_MARK_COMPACTOR = 2,
+ START = 3
+ };
// Default constructor leaves the event uninitialized.
Event() {}
// Incremental marking steps since
// - last event for SCAVENGER events
- // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+ // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+ // events
int incremental_marking_steps;
// Bytes marked since creation of tracer (value at start of event).
// Bytes marked since
// - last event for SCAVENGER events
- // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+ // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+ // events
intptr_t incremental_marking_bytes;
// Cumulative duration of incremental marking steps since creation of
// Duration of incremental marking steps since
// - last event for SCAVENGER events
- // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+ // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+ // events
double incremental_marking_duration;
// Cumulative pure duration of incremental marking steps since creation of
// Duration of pure incremental marking steps since
// - last event for SCAVENGER events
- // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+ // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+ // events
double pure_incremental_marking_duration;
// Longest incremental marking step since start of marking.
return MaxDuration(mark_compactor_events_);
}
+ // Compute the mean duration of the last incremental mark compactor
+ // events. Returns 0 if no events have been recorded.
+ double MeanIncrementalMarkCompactorDuration() const {
+ return MeanDuration(incremental_mark_compactor_events_);
+ }
+
// Compute the mean step duration of the last incremental marking round.
// Returns 0 if no incremental marking round has been completed.
double MeanIncrementalMarkingDuration() const;
// Returns 0 if no events have been recorded.
intptr_t ScavengeSpeedInBytesPerMillisecond() const;
- // Compute the max mark-sweep speed in bytes/millisecond.
+ // Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+ // Compute the average incremental mark-sweep finalize speed in
+ // bytes/millisecond.
+ // Returns 0 if no events have been recorded.
+ intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+
// Allocation throughput in the new space in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
// Compute the max duration of the events in the given ring buffer.
double MaxDuration(const EventBuffer& events) const;
+ void ClearMarkCompactStatistics() {
+ cumulative_incremental_marking_steps_ = 0;
+ cumulative_incremental_marking_bytes_ = 0;
+ cumulative_incremental_marking_duration_ = 0;
+ cumulative_pure_incremental_marking_duration_ = 0;
+ longest_incremental_marking_step_ = 0;
+ cumulative_marking_duration_ = 0;
+ cumulative_sweeping_duration_ = 0;
+ }
+
// Pointer to the heap that owns this tracer.
Heap* heap_;
// Previous tracer event.
Event previous_;
- // Previous MARK_COMPACTOR event.
- Event previous_mark_compactor_event_;
+ // Previous INCREMENTAL_MARK_COMPACTOR event.
+ Event previous_incremental_mark_compactor_event_;
// RingBuffers for SCAVENGER events.
EventBuffer scavenger_events_;
// RingBuffers for MARK_COMPACTOR events.
EventBuffer mark_compactor_events_;
+ // RingBuffers for INCREMENTAL_MARK_COMPACTOR events.
+ EventBuffer incremental_mark_compactor_events_;
+
// RingBuffer for allocation events.
AllocationEventBuffer allocation_events_;
void Heap::TryFinalizeIdleIncrementalMarking(
size_t idle_time_in_ms, size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms) {
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->IsMarkingDequeEmpty() &&
- gc_idle_time_handler_.ShouldDoMarkCompact(
+ gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
idle_time_in_ms, size_of_objects,
- mark_compact_speed_in_bytes_per_ms))) {
+ final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
}
}
static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
+ static_cast<size_t>(
+ tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
heap_state.scavenge_speed_in_bytes_per_ms =
static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
heap_state.used_new_space_size = new_space_.Size();
if (remaining_idle_time_in_ms > 0) {
TryFinalizeIdleIncrementalMarking(
remaining_idle_time_in_ms, heap_state.size_of_objects,
- heap_state.mark_compact_speed_in_bytes_per_ms);
+ heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
}
break;
}
}
+TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
+ size_t idle_time_in_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+ idle_time_in_ms, 0, 0));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DontDoFinalIncrementalMarkCompact) {
+ size_t idle_time_in_ms = 1;
+ EXPECT_FALSE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+ idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+}
+
+
TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;