1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_CPU_PROFILER_H_
6 #define V8_CPU_PROFILER_H_
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/base/platform/time.h"
11 #include "src/circular-queue.h"
12 #include "src/sampler.h"
13 #include "src/unbound-queue.h"
18 // Forward declarations.
21 class CompilationInfo;
23 class CpuProfilesCollection;
24 class ProfileGenerator;
26 #define CODE_EVENTS_TYPE_LIST(V) \
27 V(CODE_CREATION, CodeCreateEventRecord) \
28 V(CODE_MOVE, CodeMoveEventRecord) \
29 V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
30 V(CODE_DEOPT, CodeDeoptEventRecord) \
31 V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
32 V(REPORT_BUILTIN, ReportBuiltinEventRecord)
35 class CodeEventRecord {
37 #define DECLARE_TYPE(type, ignore) type,
40 CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
46 mutable unsigned order;
50 class CodeCreateEventRecord : public CodeEventRecord {
57 INLINE(void UpdateCodeMap(CodeMap* code_map));
61 class CodeMoveEventRecord : public CodeEventRecord {
66 INLINE(void UpdateCodeMap(CodeMap* code_map));
70 class CodeDisableOptEventRecord : public CodeEventRecord {
73 const char* bailout_reason;
75 INLINE(void UpdateCodeMap(CodeMap* code_map));
79 class CodeDeoptEventRecord : public CodeEventRecord {
82 const char* deopt_reason;
85 INLINE(void UpdateCodeMap(CodeMap* code_map));
89 class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
94 INLINE(void UpdateCodeMap(CodeMap* code_map));
98 class ReportBuiltinEventRecord : public CodeEventRecord {
101 Builtins::Name builtin_id;
103 INLINE(void UpdateCodeMap(CodeMap* code_map));
107 class TickSampleEventRecord {
109 // The parameterless constructor is used when we dequeue data from
111 TickSampleEventRecord() { }
112 explicit TickSampleEventRecord(unsigned order) : order(order) { }
119 class CodeEventsContainer {
121 explicit CodeEventsContainer(
122 CodeEventRecord::Type type = CodeEventRecord::NONE) {
126 CodeEventRecord generic;
127 #define DECLARE_CLASS(ignore, type) type type##_;
128 CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
134 // This class implements both the profile events processor thread and
135 // methods called by event producers: VM and stack sampler threads.
136 class ProfilerEventsProcessor : public base::Thread {
138 ProfilerEventsProcessor(ProfileGenerator* generator,
140 base::TimeDelta period);
141 virtual ~ProfilerEventsProcessor() {}
145 void StopSynchronously();
146 INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
147 void Enqueue(const CodeEventsContainer& event);
149 // Puts current stack into tick sample events buffer.
150 void AddCurrentStack(Isolate* isolate);
151 void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
153 // Tick sample events are filled directly in the buffer of the circular
154 // queue (because the structure is of fixed width, but usually not all
155 // stack frame entries are filled.) This method returns a pointer to the
156 // next record of the buffer.
157 inline TickSample* StartTickSample();
158 inline void FinishTickSample();
160 // SamplingCircularQueue has stricter alignment requirements than a normal new
161 // can fulfil, so we need to provide our own new/delete here.
162 void* operator new(size_t size);
163 void operator delete(void* ptr);
166 // Called from events processing thread (Run() method.)
167 bool ProcessCodeEvent();
169 enum SampleProcessingResult {
171 FoundSampleForNextCodeEvent,
174 SampleProcessingResult ProcessOneSample();
176 ProfileGenerator* generator_;
178 base::Atomic32 running_;
179 // Sampling period in microseconds.
180 const base::TimeDelta period_;
181 UnboundQueue<CodeEventsContainer> events_buffer_;
182 static const size_t kTickSampleBufferSize = 1 * MB;
183 static const size_t kTickSampleQueueLength =
184 kTickSampleBufferSize / sizeof(TickSampleEventRecord);
185 SamplingCircularQueue<TickSampleEventRecord,
186 kTickSampleQueueLength> ticks_buffer_;
187 UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
188 unsigned last_code_event_id_;
189 unsigned last_processed_code_event_id_;
193 #define PROFILE(IsolateGetter, Call) \
195 Isolate* cpu_profiler_isolate = (IsolateGetter); \
196 v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
197 CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
198 if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
204 class CpuProfiler : public CodeEventListener {
206 explicit CpuProfiler(Isolate* isolate);
208 CpuProfiler(Isolate* isolate,
209 CpuProfilesCollection* test_collection,
210 ProfileGenerator* test_generator,
211 ProfilerEventsProcessor* test_processor);
213 virtual ~CpuProfiler();
215 void set_sampling_interval(base::TimeDelta value);
216 void StartProfiling(const char* title, bool record_samples = false);
217 void StartProfiling(String* title, bool record_samples);
218 CpuProfile* StopProfiling(const char* title);
219 CpuProfile* StopProfiling(String* title);
220 int GetProfilesCount();
221 CpuProfile* GetProfile(int index);
222 void DeleteAllProfiles();
223 void DeleteProfile(CpuProfile* profile);
225 // Invoked from stack sampler (thread or signal handler.)
226 inline TickSample* StartTickSample();
227 inline void FinishTickSample();
229 // Must be called via PROFILE macro, otherwise will crash when
230 // profiling is not enabled.
231 virtual void CallbackEvent(Name* name, Address entry_point);
232 virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
233 Code* code, const char* comment);
234 virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
235 Code* code, Name* name);
236 virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
237 SharedFunctionInfo* shared,
238 CompilationInfo* info, Name* script_name);
239 virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
240 SharedFunctionInfo* shared,
241 CompilationInfo* info, Name* script_name,
242 int line, int column);
243 virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
244 Code* code, int args_count);
245 virtual void CodeMovingGCEvent() {}
246 virtual void CodeMoveEvent(Address from, Address to);
247 virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
248 virtual void CodeDeoptEvent(Code* code, int bailout_id, Address pc,
250 virtual void CodeDeleteEvent(Address from);
251 virtual void GetterCallbackEvent(Name* name, Address entry_point);
252 virtual void RegExpCodeCreateEvent(Code* code, String* source);
253 virtual void SetterCallbackEvent(Name* name, Address entry_point);
254 virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
256 INLINE(bool is_profiling() const) { return is_profiling_; }
257 bool* is_profiling_address() {
258 return &is_profiling_;
261 ProfileGenerator* generator() const { return generator_; }
262 ProfilerEventsProcessor* processor() const { return processor_; }
263 Isolate* isolate() const { return isolate_; }
266 void StartProcessorIfNotStarted();
267 void StopProcessorIfLastProfile(const char* title);
268 void StopProcessor();
269 void ResetProfiles();
273 base::TimeDelta sampling_interval_;
274 CpuProfilesCollection* profiles_;
275 ProfileGenerator* generator_;
276 ProfilerEventsProcessor* processor_;
277 bool saved_is_logging_;
280 DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
283 } } // namespace v8::internal
286 #endif // V8_CPU_PROFILER_H_