[presubmit] Enable readability/namespace linter checking.
[platform/upstream/v8.git] / src / heap / memory-reducer.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/memory-reducer.h"
6
7 #include "src/flags.h"
8 #include "src/heap/gc-tracer.h"
9 #include "src/heap/heap-inl.h"
10 #include "src/utils.h"
11 #include "src/v8.h"
12
13 namespace v8 {
14 namespace internal {
15
16 const int MemoryReducer::kLongDelayMs = 5000;
17 const int MemoryReducer::kShortDelayMs = 500;
18 const int MemoryReducer::kWatchdogDelayMs = 100000;
19 const int MemoryReducer::kMaxNumberOfGCs = 3;
20
21 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
22     : CancelableTask(memory_reducer->heap()->isolate()),
23       memory_reducer_(memory_reducer) {}
24
25
26 void MemoryReducer::TimerTask::RunInternal() {
27   Heap* heap = memory_reducer_->heap();
28   Event event;
29   double time_ms = heap->MonotonicallyIncreasingTimeInMs();
30   heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
31                                    heap->OldGenerationAllocationCounter());
32   event.type = kTimer;
33   event.time_ms = time_ms;
34   event.low_allocation_rate = heap->HasLowAllocationRate();
35   event.can_start_incremental_gc =
36       heap->incremental_marking()->IsStopped() &&
37       heap->incremental_marking()->CanBeActivated();
38   memory_reducer_->NotifyTimer(event);
39 }
40
41
42 void MemoryReducer::NotifyTimer(const Event& event) {
43   DCHECK_EQ(kTimer, event.type);
44   DCHECK_EQ(kWait, state_.action);
45   state_ = Step(state_, event);
46   if (state_.action == kRun) {
47     DCHECK(heap()->incremental_marking()->IsStopped());
48     DCHECK(FLAG_incremental_marking);
49     if (FLAG_trace_gc_verbose) {
50       PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
51                    state_.started_gcs);
52     }
53     if (heap()->ShouldOptimizeForMemoryUsage()) {
54       // Do full GC if memory usage has higher priority than latency. This is
55       // important for background tabs that do not send idle notifications.
56       heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
57                                 "memory reducer");
58     } else {
59       heap()->StartIdleIncrementalMarking();
60     }
61   } else if (state_.action == kWait) {
62     if (!heap()->incremental_marking()->IsStopped() &&
63         heap()->ShouldOptimizeForMemoryUsage()) {
64       // Make progress with pending incremental marking if memory usage has
65       // higher priority than latency. This is important for background tabs
66       // that do not send idle notifications.
67       const int kIncrementalMarkingDelayMs = 500;
68       double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
69                         kIncrementalMarkingDelayMs;
70       heap()->incremental_marking()->AdvanceIncrementalMarking(
71           0, deadline, i::IncrementalMarking::StepActions(
72                            i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
73                            i::IncrementalMarking::FORCE_MARKING,
74                            i::IncrementalMarking::FORCE_COMPLETION));
75       heap()->FinalizeIncrementalMarkingIfComplete(
76           "Memory reducer: finalize incremental marking");
77     }
78     // Re-schedule the timer.
79     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
80     if (FLAG_trace_gc_verbose) {
81       PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
82                    state_.next_gc_start_ms - event.time_ms);
83     }
84   }
85 }
86
87
88 void MemoryReducer::NotifyMarkCompact(const Event& event) {
89   DCHECK_EQ(kMarkCompact, event.type);
90   Action old_action = state_.action;
91   state_ = Step(state_, event);
92   if (old_action != kWait && state_.action == kWait) {
93     // If we are transitioning to the WAIT state, start the timer.
94     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
95   }
96   if (old_action == kRun) {
97     if (FLAG_trace_gc_verbose) {
98       PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
99                    state_.started_gcs,
100                    state_.action == kWait ? "will do more" : "done");
101     }
102   }
103 }
104
105
106 void MemoryReducer::NotifyContextDisposed(const Event& event) {
107   DCHECK_EQ(kContextDisposed, event.type);
108   Action old_action = state_.action;
109   state_ = Step(state_, event);
110   if (old_action != kWait && state_.action == kWait) {
111     // If we are transitioning to the WAIT state, start the timer.
112     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
113   }
114 }
115
116
117 void MemoryReducer::NotifyBackgroundIdleNotification(const Event& event) {
118   DCHECK_EQ(kBackgroundIdleNotification, event.type);
119   Action old_action = state_.action;
120   int old_started_gcs = state_.started_gcs;
121   state_ = Step(state_, event);
122   if (old_action == kWait && state_.action == kWait &&
123       old_started_gcs + 1 == state_.started_gcs) {
124     DCHECK(heap()->incremental_marking()->IsStopped());
125     // TODO(ulan): Replace it with incremental marking GC once
126     // chromium:490559 is fixed.
127     if (event.time_ms > state_.last_gc_time_ms + kLongDelayMs) {
128       heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
129                                 "memory reducer background GC");
130     } else {
131       DCHECK(FLAG_incremental_marking);
132       heap()->StartIdleIncrementalMarking();
133       if (FLAG_trace_gc_verbose) {
134         PrintIsolate(heap()->isolate(),
135                      "Memory reducer: started GC #%d"
136                      " (background idle)\n",
137                      state_.started_gcs);
138       }
139     }
140   }
141 }
142
143
144 bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
145   return state.last_gc_time_ms != 0 &&
146          event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
147 }
148
149
150 // For specification of this function see the comment for MemoryReducer class.
151 MemoryReducer::State MemoryReducer::Step(const State& state,
152                                          const Event& event) {
153   if (!FLAG_incremental_marking) {
154     return State(kDone, 0, 0, state.last_gc_time_ms);
155   }
156   switch (state.action) {
157     case kDone:
158       if (event.type == kTimer || event.type == kBackgroundIdleNotification) {
159         return state;
160       } else {
161         DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
162         return State(
163             kWait, 0, event.time_ms + kLongDelayMs,
164             event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
165       }
166     case kWait:
167       switch (event.type) {
168         case kContextDisposed:
169           return state;
170         case kTimer:
171           if (state.started_gcs >= kMaxNumberOfGCs) {
172             return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
173           } else if (event.can_start_incremental_gc &&
174                      (event.low_allocation_rate || WatchdogGC(state, event))) {
175             if (state.next_gc_start_ms <= event.time_ms) {
176               return State(kRun, state.started_gcs + 1, 0.0,
177                            state.last_gc_time_ms);
178             } else {
179               return state;
180             }
181           } else {
182             return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
183                          state.last_gc_time_ms);
184           }
185         case kBackgroundIdleNotification:
186           if (event.can_start_incremental_gc &&
187               state.started_gcs < kMaxNumberOfGCs) {
188             return State(kWait, state.started_gcs + 1,
189                          event.time_ms + kLongDelayMs, state.last_gc_time_ms);
190           } else {
191             return state;
192           }
193         case kMarkCompact:
194           return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
195                        event.time_ms);
196       }
197     case kRun:
198       if (event.type != kMarkCompact) {
199         return state;
200       } else {
201         if (state.started_gcs < kMaxNumberOfGCs &&
202             (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
203           return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
204                        event.time_ms);
205         } else {
206           return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
207         }
208       }
209   }
210   UNREACHABLE();
211   return State(kDone, 0, 0, 0.0);  // Make the compiler happy.
212 }
213
214
215 void MemoryReducer::ScheduleTimer(double delay_ms) {
216   DCHECK(delay_ms > 0);
217   // Leave some room for precision error in task scheduler.
218   const double kSlackMs = 100;
219   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
220   auto timer_task = new MemoryReducer::TimerTask(this);
221   V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
222       isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
223 }
224
225
226 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
227
228 }  // namespace internal
229 }  // namespace v8