1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/heap/memory-reducer.h"
8 #include "src/heap/gc-tracer.h"
9 #include "src/heap/heap-inl.h"
10 #include "src/utils.h"
16 const int MemoryReducer::kLongDelayMs = 5000;
17 const int MemoryReducer::kShortDelayMs = 500;
18 const int MemoryReducer::kWatchdogDelayMs = 100000;
19 const int MemoryReducer::kMaxNumberOfGCs = 3;
21 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
22 : CancelableTask(memory_reducer->heap()->isolate()),
23 memory_reducer_(memory_reducer) {}
26 void MemoryReducer::TimerTask::RunInternal() {
27 Heap* heap = memory_reducer_->heap();
29 double time_ms = heap->MonotonicallyIncreasingTimeInMs();
30 heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
31 heap->OldGenerationAllocationCounter());
33 event.time_ms = time_ms;
34 event.low_allocation_rate = heap->HasLowAllocationRate();
35 event.can_start_incremental_gc =
36 heap->incremental_marking()->IsStopped() &&
37 heap->incremental_marking()->CanBeActivated();
38 memory_reducer_->NotifyTimer(event);
42 void MemoryReducer::NotifyTimer(const Event& event) {
43 DCHECK_EQ(kTimer, event.type);
44 DCHECK_EQ(kWait, state_.action);
45 state_ = Step(state_, event);
46 if (state_.action == kRun) {
47 DCHECK(heap()->incremental_marking()->IsStopped());
48 DCHECK(FLAG_incremental_marking);
49 if (FLAG_trace_gc_verbose) {
50 PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
53 if (heap()->ShouldOptimizeForMemoryUsage()) {
54 // Do full GC if memory usage has higher priority than latency. This is
55 // important for background tabs that do not send idle notifications.
56 heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
59 heap()->StartIdleIncrementalMarking();
61 } else if (state_.action == kWait) {
62 if (!heap()->incremental_marking()->IsStopped() &&
63 heap()->ShouldOptimizeForMemoryUsage()) {
64 // Make progress with pending incremental marking if memory usage has
65 // higher priority than latency. This is important for background tabs
66 // that do not send idle notifications.
67 const int kIncrementalMarkingDelayMs = 500;
68 double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
69 kIncrementalMarkingDelayMs;
70 heap()->incremental_marking()->AdvanceIncrementalMarking(
71 0, deadline, i::IncrementalMarking::StepActions(
72 i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
73 i::IncrementalMarking::FORCE_MARKING,
74 i::IncrementalMarking::FORCE_COMPLETION));
75 heap()->FinalizeIncrementalMarkingIfComplete(
76 "Memory reducer: finalize incremental marking");
78 // Re-schedule the timer.
79 ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
80 if (FLAG_trace_gc_verbose) {
81 PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
82 state_.next_gc_start_ms - event.time_ms);
88 void MemoryReducer::NotifyMarkCompact(const Event& event) {
89 DCHECK_EQ(kMarkCompact, event.type);
90 Action old_action = state_.action;
91 state_ = Step(state_, event);
92 if (old_action != kWait && state_.action == kWait) {
93 // If we are transitioning to the WAIT state, start the timer.
94 ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
96 if (old_action == kRun) {
97 if (FLAG_trace_gc_verbose) {
98 PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
100 state_.action == kWait ? "will do more" : "done");
106 void MemoryReducer::NotifyContextDisposed(const Event& event) {
107 DCHECK_EQ(kContextDisposed, event.type);
108 Action old_action = state_.action;
109 state_ = Step(state_, event);
110 if (old_action != kWait && state_.action == kWait) {
111 // If we are transitioning to the WAIT state, start the timer.
112 ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
117 void MemoryReducer::NotifyBackgroundIdleNotification(const Event& event) {
118 DCHECK_EQ(kBackgroundIdleNotification, event.type);
119 Action old_action = state_.action;
120 int old_started_gcs = state_.started_gcs;
121 state_ = Step(state_, event);
122 if (old_action == kWait && state_.action == kWait &&
123 old_started_gcs + 1 == state_.started_gcs) {
124 DCHECK(heap()->incremental_marking()->IsStopped());
125 // TODO(ulan): Replace it with incremental marking GC once
126 // chromium:490559 is fixed.
127 if (event.time_ms > state_.last_gc_time_ms + kLongDelayMs) {
128 heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
129 "memory reducer background GC");
131 DCHECK(FLAG_incremental_marking);
132 heap()->StartIdleIncrementalMarking();
133 if (FLAG_trace_gc_verbose) {
134 PrintIsolate(heap()->isolate(),
135 "Memory reducer: started GC #%d"
136 " (background idle)\n",
144 bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
145 return state.last_gc_time_ms != 0 &&
146 event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
150 // For specification of this function see the comment for MemoryReducer class.
151 MemoryReducer::State MemoryReducer::Step(const State& state,
152 const Event& event) {
153 if (!FLAG_incremental_marking) {
154 return State(kDone, 0, 0, state.last_gc_time_ms);
156 switch (state.action) {
158 if (event.type == kTimer || event.type == kBackgroundIdleNotification) {
161 DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
163 kWait, 0, event.time_ms + kLongDelayMs,
164 event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
167 switch (event.type) {
168 case kContextDisposed:
171 if (state.started_gcs >= kMaxNumberOfGCs) {
172 return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
173 } else if (event.can_start_incremental_gc &&
174 (event.low_allocation_rate || WatchdogGC(state, event))) {
175 if (state.next_gc_start_ms <= event.time_ms) {
176 return State(kRun, state.started_gcs + 1, 0.0,
177 state.last_gc_time_ms);
182 return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
183 state.last_gc_time_ms);
185 case kBackgroundIdleNotification:
186 if (event.can_start_incremental_gc &&
187 state.started_gcs < kMaxNumberOfGCs) {
188 return State(kWait, state.started_gcs + 1,
189 event.time_ms + kLongDelayMs, state.last_gc_time_ms);
194 return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
198 if (event.type != kMarkCompact) {
201 if (state.started_gcs < kMaxNumberOfGCs &&
202 (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
203 return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
206 return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
211 return State(kDone, 0, 0, 0.0); // Make the compiler happy.
215 void MemoryReducer::ScheduleTimer(double delay_ms) {
216 DCHECK(delay_ms > 0);
217 // Leave some room for precision error in task scheduler.
218 const double kSlackMs = 100;
219 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
220 auto timer_task = new MemoryReducer::TimerTask(this);
221 V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
222 isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
226 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
228 } // namespace internal