PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
+ PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
mark_compact_speed_in_bytes_per_ms);
PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
new_space_allocation_throughput_in_bytes_per_ms);
- PrintF("current_allocation_throughput=%" V8_PTR_PREFIX "d",
- current_allocation_throughput_in_bytes_per_ms);
}
bool can_start_incremental_marking;
bool sweeping_in_progress;
bool sweeping_completed;
+ bool has_low_allocation_rate;
size_t mark_compact_speed_in_bytes_per_ms;
size_t incremental_marking_speed_in_bytes_per_ms;
size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
size_t used_new_space_size;
size_t new_space_capacity;
size_t new_space_allocation_throughput_in_bytes_per_ms;
- size_t current_allocation_throughput_in_bytes_per_ms;
};
GCIdleTimeHandler()
allocation_duration_since_gc_(0.0),
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
+ combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
.cumulative_pure_incremental_marking_duration;
longest_incremental_marking_step_ = 0.0;
incremental_mark_compactor_events_.push_front(current_);
+ combined_mark_compact_speed_cache_ = 0.0;
} else {
DCHECK(current_.incremental_marking_bytes == 0);
DCHECK(current_.incremental_marking_duration == 0);
DCHECK(current_.pure_incremental_marking_duration == 0);
longest_incremental_marking_step_ = 0.0;
mark_compactor_events_.push_front(current_);
+ combined_mark_compact_speed_cache_ = 0.0;
}
// TODO(ernstm): move the code below out of GCTracer.
allocation_time_ms_ = current_ms;
new_space_allocation_events_.push_front(AllocationEvent(
allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
- allocation_events_.push_front(
+ old_generation_allocation_events_.push_front(
AllocationEvent(allocation_duration_since_gc_,
- new_space_allocation_in_bytes_since_gc_ +
- old_generation_allocation_in_bytes_since_gc_));
+ old_generation_allocation_in_bytes_since_gc_));
allocation_duration_since_gc_ = 0;
new_space_allocation_in_bytes_since_gc_ = 0;
old_generation_allocation_in_bytes_since_gc_ = 0;
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
}
if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
- return static_cast<intptr_t>(bytes / durations);
+
+double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
+ if (combined_mark_compact_speed_cache_ > 0)
+ return combined_mark_compact_speed_cache_;
+ const double kEpsilon = 1;
+ double speed1 =
+ static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
+ double speed2 = static_cast<double>(
+ FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+ if (speed1 + speed2 < kEpsilon) {
+ // No data for the incremental marking speed.
+ // Return the non-incremental mark-compact speed.
+ combined_mark_compact_speed_cache_ =
+ static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+ } else {
+ // Combine the speed of incremental step and the speed of the final step.
+ // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
+ combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
+ }
+ return combined_mark_compact_speed_cache_;
}
-size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
+size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
size_t bytes = new_space_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_;
AllocationEventBuffer::const_iterator iter =
new_space_allocation_events_.begin();
const size_t max_bytes = static_cast<size_t>(-1);
while (iter != new_space_allocation_events_.end() &&
- bytes < max_bytes - bytes) {
+ bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
bytes += iter->allocation_in_bytes_;
durations += iter->duration_;
++iter;
}
if (durations == 0.0) return 0;
-
- return static_cast<size_t>(bytes / durations + 0.5);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
-size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms) const {
- size_t bytes = new_space_allocation_in_bytes_since_gc_ +
- old_generation_allocation_in_bytes_since_gc_;
+ size_t bytes = old_generation_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_;
- AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
+ AllocationEventBuffer::const_iterator iter =
+ old_generation_allocation_events_.begin();
const size_t max_bytes = static_cast<size_t>(-1);
- while (iter != allocation_events_.end() && bytes < max_bytes - bytes &&
- durations < time_ms) {
+ while (iter != old_generation_allocation_events_.end() &&
+ bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
bytes += iter->allocation_in_bytes_;
durations += iter->duration_;
++iter;
}
if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
- return static_cast<size_t>(bytes / durations + 0.5);
+
+size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
+ return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
+ OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
}
// Returns 0 if no events have been recorded.
intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+ // Compute the overall mark compact speed including incremental steps
+ // and the final mark-compact step.
+ double CombinedMarkCompactSpeedInBytesPerMillisecond();
+
// Allocation throughput in the new space in bytes/millisecond.
// Returns 0 if no allocation events have been recorded.
- size_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+ size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+ double time_ms = 0) const;
+
+ // Allocation throughput in the old generation in bytes/millisecond in the
+ // last time_ms milliseconds.
+ // Returns 0 if no allocation events have been recorded.
+ size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+ double time_ms = 0) const;
// Allocation throughput in heap in bytes/millisecond in the last time_ms
// milliseconds.
// RingBuffer for allocation events.
AllocationEventBuffer new_space_allocation_events_;
- AllocationEventBuffer allocation_events_;
+ AllocationEventBuffer old_generation_allocation_events_;
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
size_t new_space_allocation_in_bytes_since_gc_;
size_t old_generation_allocation_in_bytes_since_gc_;
+ double combined_mark_compact_speed_cache_;
+
// Counts how many tracers were started without stopping.
int start_counter_;
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
- UpdateOldGenerationAllocationCounter();
}
new_space_top_after_last_gc_ = new_space()->top();
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
- ReduceNewSpaceSize(
- tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
+ ReduceNewSpaceSize();
}
}
if (collector == MARK_COMPACTOR) {
+ UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
sweep_generation_++;
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which can
+ // cause another GC.
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else {
Scavenge();
}
- // This should be updated before PostGarbageCollectionProcessing, which can
- // cause another GC.
- old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
}
-bool Heap::HasLowAllocationRate(size_t allocation_rate) {
- static const size_t kLowAllocationRate = 1000;
- if (allocation_rate == 0) return false;
- return allocation_rate < kLowAllocationRate;
+bool Heap::HasLowYoungGenerationAllocationRate() {
+ const double high_mutator_utilization = 0.995;
+ double mutator_speed = static_cast<double>(
+ tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+ double gc_speed =
+ static_cast<double>(tracer()->ScavengeSpeedInBytesPerMillisecond());
+ if (mutator_speed == 0 || gc_speed == 0) return false;
+ double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
+ return mutator_utilization > high_mutator_utilization;
}
-void Heap::ReduceNewSpaceSize(size_t allocation_rate) {
- if (!FLAG_predictable && HasLowAllocationRate(allocation_rate)) {
+bool Heap::HasLowOldGenerationAllocationRate() {
+ const double high_mutator_utilization = 0.995;
+ double mutator_speed = static_cast<double>(
+ tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
+ double gc_speed = static_cast<double>(
+ tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
+ if (mutator_speed == 0 || gc_speed == 0) return false;
+ double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
+ return mutator_utilization > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowAllocationRate() {
+ return HasLowYoungGenerationAllocationRate() &&
+ HasLowOldGenerationAllocationRate();
+}
+
+
+void Heap::ReduceNewSpaceSize() {
+ if (!FLAG_predictable && HasLowAllocationRate()) {
new_space_.Shrink();
UncommitFromSpace();
}
heap_state.new_space_capacity = new_space_.Capacity();
heap_state.new_space_allocation_throughput_in_bytes_per_ms =
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
- heap_state.current_allocation_throughput_in_bytes_per_ms =
- tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
+ heap_state.has_low_allocation_rate = HasLowAllocationRate();
intptr_t limit = old_generation_allocation_limit_;
- if (HasLowAllocationRate(
- heap_state.current_allocation_throughput_in_bytes_per_ms)) {
+ if (heap_state.has_low_allocation_rate) {
limit = idle_old_generation_allocation_limit_;
}
heap_state.can_start_incremental_marking =
void SelectScavengingVisitorsTable();
- bool HasLowAllocationRate(size_t allocaion_rate);
+ bool HasLowYoungGenerationAllocationRate();
+ bool HasLowOldGenerationAllocationRate();
+ bool HasLowAllocationRate();
- void ReduceNewSpaceSize(size_t allocaion_rate);
+ void ReduceNewSpaceSize();
bool TryFinalizeIdleIncrementalMarking(
double idle_time_in_ms, size_t size_of_objects,
int total_live_bytes = 0;
bool reduce_memory =
- reduce_memory_footprint_ ||
- heap()->HasLowAllocationRate(
- heap()->tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
+ reduce_memory_footprint_ || heap()->HasLowAllocationRate();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
Heap* heap = isolate->heap();
size_t counter1 = heap->OldGenerationAllocationCounter();
heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
const size_t kSize = 1024;
AllocateInSpace(isolate, kSize, OLD_SPACE);
size_t counter2 = heap->OldGenerationAllocationCounter();
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, counter2, 0);
- size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+ size_t throughput =
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
tracer->SampleAllocation(time3, counter3, 0);
- throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+ throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
}
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, 0, counter2);
- size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+ size_t throughput =
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
tracer->SampleAllocation(time3, 0, counter3);
- throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+ throughput =
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
}