size_t scavenge_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms) {
size_t new_space_allocation_limit =
- kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
+ kMaxScheduledIdleTime * scavenge_speed_in_bytes_per_ms;
// If the limit is larger than the new space size, then scavenging used to be
// really fast. We can take advantage of the whole new space.
} else {
// We have to trigger scavenge before we reach the end of new space.
new_space_allocation_limit -=
- new_space_allocation_throughput_in_bytes_per_ms *
- kMaxFrameRenderingIdleTime;
+ new_space_allocation_throughput_in_bytes_per_ms * kMaxScheduledIdleTime;
}
if (scavenge_speed_in_bytes_per_ms == 0) {
// can get rid of this special case and always start incremental marking.
int remaining_mark_sweeps =
kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
- if (static_cast<size_t>(idle_time_in_ms) > kMaxFrameRenderingIdleTime &&
+ if (static_cast<size_t>(idle_time_in_ms) > kMaxScheduledIdleTime &&
(remaining_mark_sweeps <= 2 ||
!heap_state.can_start_incremental_marking)) {
return GCIdleTimeAction::FullGC();
// Number of scavenges that will trigger start of new idle round.
static const int kIdleScavengeThreshold;
- // That is the maximum idle time we will have during frame rendering.
- static const size_t kMaxFrameRenderingIdleTime = 16;
+ // This is the maximum scheduled idle time. Note that it can be more than
+ // 16 ms when there is currently no rendering going on.
+ static const size_t kMaxScheduledIdleTime = 50;
// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
bool Heap::RecentIdleNotificationHappened() {
return (last_idle_notification_time_ +
- GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
+ GCIdleTimeHandler::kMaxScheduledIdleTime) >
MonotonicallyIncreasingTimeInMs();
}