[V8] Introduce a QML compilation mode
[profile/ivi/qtjsbackend.git] / src / 3rdparty / v8 / src / cpu-profiler.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "cpu-profiler-inl.h"
31
32 #include "frames-inl.h"
33 #include "hashmap.h"
34 #include "log-inl.h"
35 #include "vm-state-inl.h"
36
37 #include "../include/v8-profiler.h"
38
39 namespace v8 {
40 namespace internal {
41
42 static const int kEventsBufferSize = 256 * KB;
43 static const int kTickSamplesBufferChunkSize = 64 * KB;
44 static const int kTickSamplesBufferChunksCount = 16;
45 static const int kProfilerStackSize = 64 * KB;
46
47
48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
49     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
50       generator_(generator),
51       running_(true),
52       ticks_buffer_(sizeof(TickSampleEventRecord),
53                     kTickSamplesBufferChunkSize,
54                     kTickSamplesBufferChunksCount),
55       enqueue_order_(0) {
56 }
57
58
59 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
60                                                   const char* prefix,
61                                                   String* name,
62                                                   Address start) {
63   if (FilterOutCodeCreateEvent(tag)) return;
64   CodeEventsContainer evt_rec;
65   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
66   rec->type = CodeEventRecord::CODE_CREATION;
67   rec->order = ++enqueue_order_;
68   rec->start = start;
69   rec->entry = generator_->NewCodeEntry(tag, prefix, name);
70   rec->size = 1;
71   rec->shared = NULL;
72   events_buffer_.Enqueue(evt_rec);
73 }
74
75
76 void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
77                                               String* name,
78                                               String* resource_name,
79                                               int line_number,
80                                               Address start,
81                                               unsigned size,
82                                               Address shared) {
83   if (FilterOutCodeCreateEvent(tag)) return;
84   CodeEventsContainer evt_rec;
85   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
86   rec->type = CodeEventRecord::CODE_CREATION;
87   rec->order = ++enqueue_order_;
88   rec->start = start;
89   rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
90   rec->size = size;
91   rec->shared = shared;
92   events_buffer_.Enqueue(evt_rec);
93 }
94
95
96 void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
97                                               const char* name,
98                                               Address start,
99                                               unsigned size) {
100   if (FilterOutCodeCreateEvent(tag)) return;
101   CodeEventsContainer evt_rec;
102   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
103   rec->type = CodeEventRecord::CODE_CREATION;
104   rec->order = ++enqueue_order_;
105   rec->start = start;
106   rec->entry = generator_->NewCodeEntry(tag, name);
107   rec->size = size;
108   rec->shared = NULL;
109   events_buffer_.Enqueue(evt_rec);
110 }
111
112
113 void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
114                                               int args_count,
115                                               Address start,
116                                               unsigned size) {
117   if (FilterOutCodeCreateEvent(tag)) return;
118   CodeEventsContainer evt_rec;
119   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
120   rec->type = CodeEventRecord::CODE_CREATION;
121   rec->order = ++enqueue_order_;
122   rec->start = start;
123   rec->entry = generator_->NewCodeEntry(tag, args_count);
124   rec->size = size;
125   rec->shared = NULL;
126   events_buffer_.Enqueue(evt_rec);
127 }
128
129
130 void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
131   CodeEventsContainer evt_rec;
132   CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
133   rec->type = CodeEventRecord::CODE_MOVE;
134   rec->order = ++enqueue_order_;
135   rec->from = from;
136   rec->to = to;
137   events_buffer_.Enqueue(evt_rec);
138 }
139
140
141 void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
142                                                           Address to) {
143   CodeEventsContainer evt_rec;
144   SharedFunctionInfoMoveEventRecord* rec =
145       &evt_rec.SharedFunctionInfoMoveEventRecord_;
146   rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
147   rec->order = ++enqueue_order_;
148   rec->from = from;
149   rec->to = to;
150   events_buffer_.Enqueue(evt_rec);
151 }
152
153
154 void ProfilerEventsProcessor::RegExpCodeCreateEvent(
155     Logger::LogEventsAndTags tag,
156     const char* prefix,
157     String* name,
158     Address start,
159     unsigned size) {
160   if (FilterOutCodeCreateEvent(tag)) return;
161   CodeEventsContainer evt_rec;
162   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
163   rec->type = CodeEventRecord::CODE_CREATION;
164   rec->order = ++enqueue_order_;
165   rec->start = start;
166   rec->entry = generator_->NewCodeEntry(tag, prefix, name);
167   rec->size = size;
168   events_buffer_.Enqueue(evt_rec);
169 }
170
171
172 void ProfilerEventsProcessor::AddCurrentStack() {
173   TickSampleEventRecord record(enqueue_order_);
174   TickSample* sample = &record.sample;
175   Isolate* isolate = Isolate::Current();
176   sample->state = isolate->current_vm_state();
177   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
178   for (StackTraceFrameIterator it(isolate);
179        !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
180        it.Advance()) {
181     sample->stack[sample->frames_count++] = it.frame()->pc();
182   }
183   ticks_from_vm_buffer_.Enqueue(record);
184 }
185
186
187 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
188   if (!events_buffer_.IsEmpty()) {
189     CodeEventsContainer record;
190     events_buffer_.Dequeue(&record);
191     switch (record.generic.type) {
192 #define PROFILER_TYPE_CASE(type, clss)                          \
193       case CodeEventRecord::type:                               \
194         record.clss##_.UpdateCodeMap(generator_->code_map());   \
195         break;
196
197       CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
198
199 #undef PROFILER_TYPE_CASE
200       default: return true;  // Skip record.
201     }
202     *dequeue_order = record.generic.order;
203     return true;
204   }
205   return false;
206 }
207
208
209 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
210   while (true) {
211     if (!ticks_from_vm_buffer_.IsEmpty()
212         && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
213       TickSampleEventRecord record;
214       ticks_from_vm_buffer_.Dequeue(&record);
215       generator_->RecordTickSample(record.sample);
216     }
217
218     const TickSampleEventRecord* rec =
219         TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
220     if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
221     // Make a local copy of tick sample record to ensure that it won't
222     // be modified as we are processing it. This is possible as the
223     // sampler writes w/o any sync to the queue, so if the processor
224     // will get far behind, a record may be modified right under its
225     // feet.
226     TickSampleEventRecord record = *rec;
227     if (record.order == dequeue_order) {
228       // A paranoid check to make sure that we don't get a memory overrun
229       // in case of frames_count having a wild value.
230       if (record.sample.frames_count < 0
231           || record.sample.frames_count > TickSample::kMaxFramesCount)
232         record.sample.frames_count = 0;
233       generator_->RecordTickSample(record.sample);
234       ticks_buffer_.FinishDequeue();
235     } else {
236       return true;
237     }
238   }
239 }
240
241
242 void ProfilerEventsProcessor::Run() {
243   unsigned dequeue_order = 0;
244
245   while (running_) {
246     // Process ticks until we have any.
247     if (ProcessTicks(dequeue_order)) {
248       // All ticks of the current dequeue_order are processed,
249       // proceed to the next code event.
250       ProcessCodeEvent(&dequeue_order);
251     }
252     YieldCPU();
253   }
254
255   // Process remaining tick events.
256   ticks_buffer_.FlushResidualRecords();
257   // Perform processing until we have tick events, skip remaining code events.
258   while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
259 }
260
261
262 void CpuProfiler::StartProfiling(const char* title) {
263   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
264   Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
265 }
266
267
268 void CpuProfiler::StartProfiling(String* title) {
269   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
270   Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
271 }
272
273
274 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
275   Isolate* isolate = Isolate::Current();
276   return is_profiling(isolate) ?
277       isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
278 }
279
280
281 CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
282   Isolate* isolate = Isolate::Current();
283   return is_profiling(isolate) ?
284       isolate->cpu_profiler()->StopCollectingProfile(
285           security_token, title) : NULL;
286 }
287
288
289 int CpuProfiler::GetProfilesCount() {
290   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
291   // The count of profiles doesn't depend on a security token.
292   return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
293       TokenEnumerator::kNoSecurityToken)->length();
294 }
295
296
297 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
298   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
299   CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
300   const int token = profiler->token_enumerator_->GetTokenId(security_token);
301   return profiler->profiles_->Profiles(token)->at(index);
302 }
303
304
305 CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
306   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
307   CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
308   const int token = profiler->token_enumerator_->GetTokenId(security_token);
309   return profiler->profiles_->GetProfile(token, uid);
310 }
311
312
313 TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
314   if (CpuProfiler::is_profiling(isolate)) {
315     return isolate->cpu_profiler()->processor_->TickSampleEvent();
316   } else {
317     return NULL;
318   }
319 }
320
321
322 void CpuProfiler::DeleteAllProfiles() {
323   Isolate* isolate = Isolate::Current();
324   ASSERT(isolate->cpu_profiler() != NULL);
325   if (is_profiling(isolate)) {
326     isolate->cpu_profiler()->StopProcessor();
327   }
328   isolate->cpu_profiler()->ResetProfiles();
329 }
330
331
332 void CpuProfiler::DeleteProfile(CpuProfile* profile) {
333   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
334   Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
335   delete profile;
336 }
337
338
339 bool CpuProfiler::HasDetachedProfiles() {
340   ASSERT(Isolate::Current()->cpu_profiler() != NULL);
341   return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
342 }
343
344
345 void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
346   Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
347       Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
348 }
349
350
351 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
352                            Code* code, const char* comment) {
353   Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
354       tag, comment, code->address(), code->ExecutableSize());
355 }
356
357
358 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
359                            Code* code, String* name) {
360   Isolate* isolate = Isolate::Current();
361   isolate->cpu_profiler()->processor_->CodeCreateEvent(
362       tag,
363       name,
364       isolate->heap()->empty_string(),
365       v8::CpuProfileNode::kNoLineNumberInfo,
366       code->address(),
367       code->ExecutableSize(),
368       NULL);
369 }
370
371
372 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
373                                   Code* code,
374                                   SharedFunctionInfo* shared,
375                                   String* name) {
376   Isolate* isolate = Isolate::Current();
377   isolate->cpu_profiler()->processor_->CodeCreateEvent(
378       tag,
379       name,
380       isolate->heap()->empty_string(),
381       v8::CpuProfileNode::kNoLineNumberInfo,
382       code->address(),
383       code->ExecutableSize(),
384       shared->address());
385 }
386
387
388 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
389                                   Code* code,
390                                   SharedFunctionInfo* shared,
391                                   String* source, int line) {
392   Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
393       tag,
394       shared->DebugName(),
395       source,
396       line,
397       code->address(),
398       code->ExecutableSize(),
399       shared->address());
400 }
401
402
403 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
404                            Code* code, int args_count) {
405   Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
406       tag,
407       args_count,
408       code->address(),
409       code->ExecutableSize());
410 }
411
412
413 void CpuProfiler::CodeMoveEvent(Address from, Address to) {
414   Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
415 }
416
417
418 void CpuProfiler::CodeDeleteEvent(Address from) {
419 }
420
421
422 void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
423   CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
424   profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
425 }
426
427
428 void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
429   Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
430       Logger::CALLBACK_TAG, "get ", name, entry_point);
431 }
432
433
434 void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
435   Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
436       Logger::REG_EXP_TAG,
437       "RegExp: ",
438       source,
439       code->address(),
440       code->ExecutableSize());
441 }
442
443
444 void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
445   Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
446       Logger::CALLBACK_TAG, "set ", name, entry_point);
447 }
448
449
450 CpuProfiler::CpuProfiler()
451     : profiles_(new CpuProfilesCollection()),
452       next_profile_uid_(1),
453       token_enumerator_(new TokenEnumerator()),
454       generator_(NULL),
455       processor_(NULL),
456       need_to_stop_sampler_(false),
457       is_profiling_(false) {
458 }
459
460
461 CpuProfiler::~CpuProfiler() {
462   delete token_enumerator_;
463   delete profiles_;
464 }
465
466
467 void CpuProfiler::ResetProfiles() {
468   delete profiles_;
469   profiles_ = new CpuProfilesCollection();
470 }
471
472 void CpuProfiler::StartCollectingProfile(const char* title) {
473   if (profiles_->StartProfiling(title, next_profile_uid_++)) {
474     StartProcessorIfNotStarted();
475   }
476   processor_->AddCurrentStack();
477 }
478
479
480 void CpuProfiler::StartCollectingProfile(String* title) {
481   StartCollectingProfile(profiles_->GetName(title));
482 }
483
484
485 void CpuProfiler::StartProcessorIfNotStarted() {
486   if (processor_ == NULL) {
487     Isolate* isolate = Isolate::Current();
488
489     // Disable logging when using the new implementation.
490     saved_logging_nesting_ = isolate->logger()->logging_nesting_;
491     isolate->logger()->logging_nesting_ = 0;
492     generator_ = new ProfileGenerator(profiles_);
493     processor_ = new ProfilerEventsProcessor(generator_);
494     NoBarrier_Store(&is_profiling_, true);
495     processor_->Start();
496     // Enumerate stuff we already have in the heap.
497     if (isolate->heap()->HasBeenSetUp()) {
498       if (!FLAG_prof_browser_mode) {
499         bool saved_log_code_flag = FLAG_log_code;
500         FLAG_log_code = true;
501         isolate->logger()->LogCodeObjects();
502         FLAG_log_code = saved_log_code_flag;
503       }
504       isolate->logger()->LogCompiledFunctions();
505       isolate->logger()->LogAccessorCallbacks();
506     }
507     // Enable stack sampling.
508     Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
509     if (!sampler->IsActive()) {
510       sampler->Start();
511       need_to_stop_sampler_ = true;
512     }
513     sampler->IncreaseProfilingDepth();
514   }
515 }
516
517
518 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
519   const double actual_sampling_rate = generator_->actual_sampling_rate();
520   StopProcessorIfLastProfile(title);
521   CpuProfile* result =
522       profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
523                                title,
524                                actual_sampling_rate);
525   if (result != NULL) {
526     result->Print();
527   }
528   return result;
529 }
530
531
532 CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
533                                                String* title) {
534   const double actual_sampling_rate = generator_->actual_sampling_rate();
535   const char* profile_title = profiles_->GetName(title);
536   StopProcessorIfLastProfile(profile_title);
537   int token = token_enumerator_->GetTokenId(security_token);
538   return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
539 }
540
541
542 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
543   if (profiles_->IsLastProfile(title)) StopProcessor();
544 }
545
546
547 void CpuProfiler::StopProcessor() {
548   Logger* logger = Isolate::Current()->logger();
549   Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
550   sampler->DecreaseProfilingDepth();
551   if (need_to_stop_sampler_) {
552     sampler->Stop();
553     need_to_stop_sampler_ = false;
554   }
555   NoBarrier_Store(&is_profiling_, false);
556   processor_->Stop();
557   processor_->Join();
558   delete processor_;
559   delete generator_;
560   processor_ = NULL;
561   generator_ = NULL;
562   logger->logging_nesting_ = saved_logging_nesting_;
563 }
564
565
566 void CpuProfiler::SetUp() {
567   Isolate* isolate = Isolate::Current();
568   if (isolate->cpu_profiler() == NULL) {
569     isolate->set_cpu_profiler(new CpuProfiler());
570   }
571 }
572
573
574 void CpuProfiler::TearDown() {
575   Isolate* isolate = Isolate::Current();
576   if (isolate->cpu_profiler() != NULL) {
577     delete isolate->cpu_profiler();
578   }
579   isolate->set_cpu_profiler(NULL);
580 }
581
582 } }  // namespace v8::internal