1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_CPU_PROFILER_H_
29 #define V8_CPU_PROFILER_H_
31 #include "allocation.h"
32 #include "atomicops.h"
33 #include "circular-queue.h"
34 #include "unbound-queue.h"
39 // Forward declarations.
43 class CpuProfilesCollection;
44 class ProfileGenerator;
45 class TokenEnumerator;
47 #define CODE_EVENTS_TYPE_LIST(V) \
48 V(CODE_CREATION, CodeCreateEventRecord) \
49 V(CODE_MOVE, CodeMoveEventRecord) \
50 V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
53 class CodeEventRecord {
55 #define DECLARE_TYPE(type, ignore) type,
58 CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
68 class CodeCreateEventRecord : public CodeEventRecord {
75 INLINE(void UpdateCodeMap(CodeMap* code_map));
79 class CodeMoveEventRecord : public CodeEventRecord {
84 INLINE(void UpdateCodeMap(CodeMap* code_map));
88 class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
93 INLINE(void UpdateCodeMap(CodeMap* code_map));
97 class TickSampleEventRecord {
99 // The parameterless constructor is used when we dequeue data from
101 TickSampleEventRecord() { }
102 explicit TickSampleEventRecord(unsigned order)
105 ASSERT(filler != SamplingCircularQueue::kClear);
108 // The first machine word of a TickSampleEventRecord must not ever
109 // become equal to SamplingCircularQueue::kClear. As both order and
110 // TickSample's first field are not reliable in this sense (order
111 // can overflow, TickSample can have all fields reset), we are
112 // forced to use an artificial filler field.
117 static TickSampleEventRecord* cast(void* value) {
118 return reinterpret_cast<TickSampleEventRecord*>(value);
123 // This class implements both the profile events processor thread and
124 // methods called by event producers: VM and stack sampler threads.
125 class ProfilerEventsProcessor : public Thread {
127 explicit ProfilerEventsProcessor(ProfileGenerator* generator);
128 virtual ~ProfilerEventsProcessor() {}
132 inline void Stop() { running_ = false; }
133 INLINE(bool running()) { return running_; }
135 // Events adding methods. Called by VM threads.
136 void CallbackCreateEvent(Logger::LogEventsAndTags tag,
137 const char* prefix, String* name,
139 void CodeCreateEvent(Logger::LogEventsAndTags tag,
141 String* resource_name, int line_number,
142 Address start, unsigned size,
144 void CodeCreateEvent(Logger::LogEventsAndTags tag,
146 Address start, unsigned size);
147 void CodeCreateEvent(Logger::LogEventsAndTags tag,
149 Address start, unsigned size);
150 void CodeMoveEvent(Address from, Address to);
151 void CodeDeleteEvent(Address from);
152 void SharedFunctionInfoMoveEvent(Address from, Address to);
153 void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
154 const char* prefix, String* name,
155 Address start, unsigned size);
156 // Puts current stack into tick sample events buffer.
157 void AddCurrentStack();
159 // Tick sample events are filled directly in the buffer of the circular
160 // queue (because the structure is of fixed width, but usually not all
161 // stack frame entries are filled.) This method returns a pointer to the
162 // next record of the buffer.
163 INLINE(TickSample* TickSampleEvent());
166 union CodeEventsContainer {
167 CodeEventRecord generic;
168 #define DECLARE_CLASS(ignore, type) type type##_;
169 CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
173 // Called from events processing thread (Run() method.)
174 bool ProcessCodeEvent(unsigned* dequeue_order);
175 bool ProcessTicks(unsigned dequeue_order);
177 INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
179 ProfileGenerator* generator_;
181 UnboundQueue<CodeEventsContainer> events_buffer_;
182 SamplingCircularQueue ticks_buffer_;
183 UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
184 unsigned enqueue_order_;
187 } } // namespace v8::internal
190 #define PROFILE(isolate, Call) \
191 LOG(isolate, Call); \
193 if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
194 v8::internal::CpuProfiler::Call; \
203 // TODO(isolates): isolatify this class.
207 static void TearDown();
209 static void StartProfiling(const char* title);
210 static void StartProfiling(String* title);
211 static CpuProfile* StopProfiling(const char* title);
212 static CpuProfile* StopProfiling(Object* security_token, String* title);
213 static int GetProfilesCount();
214 static CpuProfile* GetProfile(Object* security_token, int index);
215 static CpuProfile* FindProfile(Object* security_token, unsigned uid);
216 static void DeleteAllProfiles();
217 static void DeleteProfile(CpuProfile* profile);
218 static bool HasDetachedProfiles();
220 // Invoked from stack sampler (thread or signal handler.)
221 static TickSample* TickSampleEvent(Isolate* isolate);
223 // Must be called via PROFILE macro, otherwise will crash when
224 // profiling is not enabled.
225 static void CallbackEvent(String* name, Address entry_point);
226 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
227 Code* code, const char* comment);
228 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
229 Code* code, String* name);
230 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
232 SharedFunctionInfo* shared,
234 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
236 SharedFunctionInfo* shared,
237 String* source, int line);
238 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
239 Code* code, int args_count);
240 static void CodeMovingGCEvent() {}
241 static void CodeMoveEvent(Address from, Address to);
242 static void CodeDeleteEvent(Address from);
243 static void GetterCallbackEvent(String* name, Address entry_point);
244 static void RegExpCodeCreateEvent(Code* code, String* source);
245 static void SetterCallbackEvent(String* name, Address entry_point);
246 static void SharedFunctionInfoMoveEvent(Address from, Address to);
248 // TODO(isolates): this doesn't have to use atomics anymore.
250 static INLINE(bool is_profiling(Isolate* isolate)) {
251 CpuProfiler* profiler = isolate->cpu_profiler();
252 return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
258 void StartCollectingProfile(const char* title);
259 void StartCollectingProfile(String* title);
260 void StartProcessorIfNotStarted();
261 CpuProfile* StopCollectingProfile(const char* title);
262 CpuProfile* StopCollectingProfile(Object* security_token, String* title);
263 void StopProcessorIfLastProfile(const char* title);
264 void StopProcessor();
265 void ResetProfiles();
267 CpuProfilesCollection* profiles_;
268 unsigned next_profile_uid_;
269 TokenEnumerator* token_enumerator_;
270 ProfileGenerator* generator_;
271 ProfilerEventsProcessor* processor_;
272 int saved_logging_nesting_;
273 bool need_to_stop_sampler_;
274 Atomic32 is_profiling_;
277 DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
280 } } // namespace v8::internal
283 #endif // V8_CPU_PROFILER_H_